You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ec...@apache.org on 2015/09/15 21:11:46 UTC

[01/50] [abbrv] hadoop git commit: HDFS-9019. Adding informative message to sticky bit permission denied exception. Contributed by Xiaoyu Yao.

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-11890 53c38cc89 -> a3d952b4d


HDFS-9019. Adding informative message to sticky bit permission denied exception. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/970daaa5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/970daaa5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/970daaa5

Branch: refs/heads/HADOOP-11890
Commit: 970daaa5e44d3c09afd46d1c8e923a5096708c44
Parents: 090d266
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue Sep 8 09:57:36 2015 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue Sep 8 09:57:36 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  3 +++
 .../hdfs/server/namenode/FSPermissionChecker.java   | 16 +++++++++++-----
 .../apache/hadoop/fs/permission/TestStickyBit.java  |  3 +++
 3 files changed, 17 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/970daaa5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index de44324..8b50065 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -900,6 +900,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8984. Move replication queues related methods in FSNamesystem to
     BlockManager. (wheat9)
 
+    HDFS-9019. Adding informative message to sticky bit permission denied
+    exception. (xyao)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/970daaa5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index e6570f5..041ce0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -207,7 +207,7 @@ class FSPermissionChecker implements AccessControlEnforcer {
     final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1];
     if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
         && inodeAttrs.length > 1 && last != null) {
-      checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last);
+      checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last, path);
     }
     if (ancestorAccess != null && inodeAttrs.length > 1) {
       check(inodeAttrs, path, ancestorIndex, ancestorAccess);
@@ -405,8 +405,8 @@ class FSPermissionChecker implements AccessControlEnforcer {
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkStickyBit(INodeAttributes parent, INodeAttributes inode
-      ) throws AccessControlException {
+  private void checkStickyBit(INodeAttributes parent, INodeAttributes inode,
+      String path) throws AccessControlException {
     if (!parent.getFsPermission().getStickyBit()) {
       return;
     }
@@ -421,8 +421,14 @@ class FSPermissionChecker implements AccessControlEnforcer {
       return;
     }
 
-    throw new AccessControlException("Permission denied by sticky bit setting:" +
-      " user=" + getUser() + ", inode=" + inode);
+    throw new AccessControlException(String.format(
+        "Permission denied by sticky bit: user=%s, path=\"%s\":%s:%s:%s%s, " +
+        "parent=\"%s\":%s:%s:%s%s", user,
+        path, inode.getUserName(), inode.getGroupName(),
+        inode.isDirectory() ? "d" : "-", inode.getFsPermission().toString(),
+        path.substring(0, path.length() - inode.toString().length() - 1 ),
+        parent.getUserName(), parent.getGroupName(),
+        parent.isDirectory() ? "d" : "-", parent.getFsPermission().toString()));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/970daaa5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
index 9d0e31b..d5cece4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
@@ -140,6 +140,9 @@ public class TestStickyBit {
     } catch (IOException ioe) {
       assertTrue(ioe instanceof AccessControlException);
       assertTrue(ioe.getMessage().contains("sticky bit"));
+      assertTrue(ioe.getMessage().contains("user="+user2.getUserName()));
+      assertTrue(ioe.getMessage().contains("path=\"" + file + "\""));
+      assertTrue(ioe.getMessage().contains("parent=\"" + file.getParent() + "\""));
     }
   }
 


[39/50] [abbrv] hadoop git commit: YARN-4151. Fix findbugs errors in hadoop-yarn-server-common module. (Meng Ding via wangda)

Posted by ec...@apache.org.
YARN-4151. Fix findbugs errors in hadoop-yarn-server-common module. (Meng Ding via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2a02702
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2a02702
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2a02702

Branch: refs/heads/HADOOP-11890
Commit: e2a02702178db60150cc0c2253d48b8532a474c2
Parents: 53bad4e
Author: Wangda Tan <wa...@apache.org>
Authored: Mon Sep 14 16:00:35 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Mon Sep 14 16:00:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                             | 3 +++
 .../yarn/server/api/records/impl/pb/NodeStatusPBImpl.java   | 8 ++++----
 .../org/apache/hadoop/yarn/server/webapp/WebPageUtils.java  | 3 +--
 .../org/apache/hadoop/yarn/server/webapp/WebServices.java   | 9 +++++++++
 4 files changed, 17 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a02702/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7a9d156..766d4ef 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -826,6 +826,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4115. Reduce loglevel of ContainerManagementProtocolProxy to Debug
     (adhoot via rkanter)
 
+    YARN-4151. Fix findbugs errors in hadoop-yarn-server-common module.
+    (Meng Ding via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a02702/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
index 2d139fe..7d4e83f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeStatusPBImpl.java
@@ -293,7 +293,7 @@ public class NodeStatusPBImpl extends NodeStatus {
   }
 
   @Override
-  public ResourceUtilization getContainersUtilization() {
+  public synchronized ResourceUtilization getContainersUtilization() {
     NodeStatusProtoOrBuilder p =
         this.viaProto ? this.proto : this.builder;
     if (!p.hasContainersUtilization()) {
@@ -303,7 +303,7 @@ public class NodeStatusPBImpl extends NodeStatus {
   }
 
   @Override
-  public void setContainersUtilization(
+  public synchronized void setContainersUtilization(
       ResourceUtilization containersUtilization) {
     maybeInitBuilder();
     if (containersUtilization == null) {
@@ -315,7 +315,7 @@ public class NodeStatusPBImpl extends NodeStatus {
   }
 
   @Override
-  public ResourceUtilization getNodeUtilization() {
+  public synchronized ResourceUtilization getNodeUtilization() {
     NodeStatusProtoOrBuilder p =
         this.viaProto ? this.proto : this.builder;
     if (!p.hasNodeUtilization()) {
@@ -325,7 +325,7 @@ public class NodeStatusPBImpl extends NodeStatus {
   }
 
   @Override
-  public void setNodeUtilization(
+  public synchronized void setNodeUtilization(
       ResourceUtilization nodeUtilization) {
     maybeInitBuilder();
     if (nodeUtilization == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a02702/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
index ffc5637..f32dd0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
@@ -51,8 +51,7 @@ public class WebPageUtils {
     sb.append("[\n")
       .append("{'sType':'string', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
-      .append("\n, {'sType':'numeric', 'aTargets': " +
-          (isFairSchedulerPage ? "[6, 7]": "[6, 7]"))
+      .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
       .append(", 'mRender': renderHadoopDate }")
       .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
     if (isFairSchedulerPage) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2a02702/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
index 8c28263..40e40c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebServices.java
@@ -167,6 +167,9 @@ public class WebServices {
     } catch (Exception e) {
       rewrapAndThrowException(e);
     }
+    if (appReports == null) {
+      return allApps;
+    }
     for (ApplicationReport appReport : appReports) {
 
       if (checkAppStates &&
@@ -266,6 +269,9 @@ public class WebServices {
       rewrapAndThrowException(e);
     }
     AppAttemptsInfo appAttemptsInfo = new AppAttemptsInfo();
+    if (appAttemptReports == null) {
+      return appAttemptsInfo;
+    }
     for (ApplicationAttemptReport appAttemptReport : appAttemptReports) {
       AppAttemptInfo appAttemptInfo = new AppAttemptInfo(appAttemptReport);
       appAttemptsInfo.add(appAttemptInfo);
@@ -336,6 +342,9 @@ public class WebServices {
       rewrapAndThrowException(e);
     }
     ContainersInfo containersInfo = new ContainersInfo();
+    if (containerReports == null) {
+      return containersInfo;
+    }
     for (ContainerReport containerReport : containerReports) {
       ContainerInfo containerInfo = new ContainerInfo(containerReport);
       containersInfo.add(containerInfo);


[33/50] [abbrv] hadoop git commit: HADOOP-12087. [JDK8] Fix javadoc errors caused by incorrect or illegal tags. (Akira AJISAKA via stevel).

Posted by ec...@apache.org.
HADOOP-12087. [JDK8] Fix javadoc errors caused by incorrect or illegal tags. (Akira AJISAKA via stevel).


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72699062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72699062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72699062

Branch: refs/heads/HADOOP-11890
Commit: 7269906254afe59ea464ed674d8218f82eeaec26
Parents: 0c7d3f4
Author: Steve Loughran <st...@apache.org>
Authored: Sun Sep 13 14:25:06 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sun Sep 13 14:25:26 2015 +0100

----------------------------------------------------------------------
 .../server/AuthenticationToken.java              |  3 ++-
 hadoop-common-project/hadoop-common/CHANGES.txt  |  3 +++
 .../hadoop/tools/util/ProducerConsumer.java      | 19 ++++++++++---------
 .../org/apache/hadoop/tools/util/WorkReport.java |  2 +-
 .../apache/hadoop/tools/util/WorkRequest.java    |  2 +-
 .../yarn/api/records/ApplicationReport.java      |  2 +-
 .../hadoop/yarn/server/api/ContainerType.java    |  1 -
 .../yarn/nodelabels/CommonNodeLabelsManager.java |  2 +-
 .../yarn/server/api/records/NodeStatus.java      |  2 +-
 .../scheduler/policy/OrderingPolicy.java         |  2 +-
 10 files changed, 21 insertions(+), 17 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
index 6303c95..8295fe1 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
@@ -62,7 +62,8 @@ public class AuthenticationToken extends AuthToken {
   /**
    * Sets the max inactive time of the token.
    *
-   * @param max inactive time of the token in milliseconds since the epoch.
+   * @param maxInactives inactive time of the token in milliseconds
+   *                     since the epoch.
    */
   public void setMaxInactives(long maxInactives) {
     if (this != AuthenticationToken.ANONYMOUS) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index db671ae..a7ea0aa 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1060,6 +1060,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with wrong
     time unit parameter. (zxu via rkanter)
 
+    HADOOP-12087. [JDK8] Fix javadoc errors caused by incorrect or illegal
+    tags. (Akira AJISAKA via stevel).
+
   OPTIMIZATIONS
 
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
index 3dad4e3..bf72bb8 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/ProducerConsumer.java
@@ -32,9 +32,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 
 /**
  * ProducerConsumer class encapsulates input and output queues and a
- * thread-pool of Workers that loop on WorkRequest<T> inputQueue and for each
- * consumed WorkRequest Workers invoke WorkRequestProcessor.processItem()
- * and output resulting WorkReport<R> to the outputQueue.
+ * thread-pool of Workers that loop on WorkRequest{@literal <T>} inputQueue
+ * and for each consumed WorkRequest Workers invoke
+ * WorkRequestProcessor.processItem() and output resulting
+ * WorkReport{@literal <R>} to the outputQueue.
  */
 public class ProducerConsumer<T, R> {
   private Log LOG = LogFactory.getLog(ProducerConsumer.class);
@@ -57,9 +58,9 @@ public class ProducerConsumer<T, R> {
   }
 
   /**
-   *  Add another worker that will consume WorkRequest<T> items from input
-   *  queue, process each item using supplied processor, and for every
-   *  processed item output WorkReport<R> to output queue.
+   *  Add another worker that will consume WorkRequest{@literal <T>} items
+   *  from input queue, process each item using supplied processor, and for
+   *  every processed item output WorkReport{@literal <R>} to output queue.
    *
    *  @param processor  Processor implementing WorkRequestProcessor interface.
    *
@@ -102,7 +103,7 @@ public class ProducerConsumer<T, R> {
   /**
    *  Blocking put workRequest to ProducerConsumer input queue.
    *
-   *  @param  WorkRequest<T> item to be processed.
+   *  @param  workRequest item to be processed.
    */
   public void put(WorkRequest<T> workRequest) {
     boolean isDone = false;
@@ -120,7 +121,7 @@ public class ProducerConsumer<T, R> {
   /**
    *  Blocking take from ProducerConsumer output queue that can be interrupted.
    *
-   *  @return  WorkReport<R> item returned by processor's processItem().
+   *  @return  item returned by processor's processItem().
    */
   public WorkReport<R> take() throws InterruptedException {
     WorkReport<R> report = outputQueue.take();
@@ -132,7 +133,7 @@ public class ProducerConsumer<T, R> {
    *  Blocking take from ProducerConsumer output queue (catches exceptions and
    *  retries forever).
    *
-   *  @return  WorkReport<R> item returned by processor's processItem().
+   *  @return  item returned by processor's processItem().
    */
   public WorkReport<R> blockingTake() {
     while (true) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java
index 91c9805..43ce212 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkReport.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.tools.util;
 
 /**
- *  WorkReport<T> is a simple container for items of class T and its
+ *  WorkReport{@literal <T>} is a simple container for items of class T and its
  *  corresponding retry counter that indicates how many times this item
  *  was previously attempted to be processed.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java
index 339a3ab..597dd8a 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/WorkRequest.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.tools.util;
 
 /**
- *  WorkRequest<T> is a simple container for items of class T and its
+ *  WorkRequest{@literal <T>} is a simple container for items of class T and its
  *  corresponding retry counter that indicates how many times this item
  *  was previously attempted to be processed.
  */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index fa3b1e5..5de7858 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -404,7 +404,7 @@ public abstract class ApplicationReport {
   public abstract boolean isUnmanagedApp();
 
   /**
-   * @param value true if RM should not manage the AM
+   * @param unmanagedApplication true if RM should not manage the AM
    */
   @Public
   @Unstable

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
index ffae811..df8a4c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerType.java
@@ -27,7 +27,6 @@ package org.apache.hadoop.yarn.server.api;
  * <li>{@link #APPLICATION_MASTER}
  * <li>{@link #TASK}
  * </ul>
- * </p>
  */
 public enum ContainerType {
   APPLICATION_MASTER, TASK

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index 8cc3770..deec6ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -827,7 +827,7 @@ public class CommonNodeLabelsManager extends AbstractService {
   /**
    * Get mapping of labels info to nodes for specified set of labels.
    *
-   * @param nodelabels
+   * @param labels
    *          set of nodelabels for which labels to nodes mapping will be
    *          returned.
    * @return labels to nodes map

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
index 24391bf..7b8262f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/NodeStatus.java
@@ -46,7 +46,7 @@ public abstract class NodeStatus {
    * @param containerStatuses Status of the containers running in this node.
    * @param keepAliveApplications Applications to keep alive.
    * @param nodeHealthStatus Health status of the node.
-   * @param containersUtilizations Utilization of the containers in this node.
+   * @param containersUtilization Utilization of the containers in this node.
    * @param nodeUtilization Utilization of the node.
    * @return New {@code NodeStatus} with the provided information.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/72699062/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
index e3f67ce..1616bb1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/OrderingPolicy.java
@@ -107,7 +107,7 @@ public interface OrderingPolicy<S extends SchedulableEntity> {
   void demandUpdated(S schedulableEntity);
 
   /**
-   * Display information regarding configuration & status
+   * Display information regarding configuration and status
    */
   public String getInfo();
   


[07/50] [abbrv] hadoop git commit: HDFS-8716. Introduce a new config specifically for safe mode block count. Contributed by Chang Li.

Posted by ec...@apache.org.
HDFS-8716. Introduce a new config specifically for safe mode block count. Contributed by Chang Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d13335f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d13335f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d13335f

Branch: refs/heads/HADOOP-11890
Commit: 4d13335fc93780126bab35de92a640fa31b204d9
Parents: 0113e45
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Sep 9 11:08:24 2015 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Wed Sep 9 11:08:24 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java  |  2 ++
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   | 16 +++++++++++-----
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml | 12 ++++++++++++
 .../hdfs/server/namenode/TestFSNamesystem.java      | 14 ++++++++++++++
 5 files changed, 42 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d13335f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a07fca2..77f3b3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -905,6 +905,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe)
 
+    HDFS-8716. Introduce a new config specifically for safe mode block count
+    (Chang Li via kihwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d13335f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 84858f6..62abc35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -170,6 +170,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = "dfs.namenode.replication.min";
   public static final int     DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY =
+      "dfs.namenode.safemode.replication.min";
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
   public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d13335f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1b770b0..7d766b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -64,8 +64,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
@@ -4151,9 +4149,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
         DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT);
       this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
-      this.safeReplication = conf.getInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 
-                                         DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
-      
+      int minReplication =
+          conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
+              DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
+      // DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY is an expert level setting,
+      // setting this lower than the min replication is not recommended
+      // and/or dangerous for production setups.
+      // When it's unset, safeReplication will use dfs.namenode.replication.min
+      this.safeReplication =
+          conf.getInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY,
+              minReplication);
+
       LOG.info(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY + " = " + threshold);
       LOG.info(DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY + " = " + datanodeThreshold);
       LOG.info(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + "     = " + extension);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d13335f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 96776e4..62665fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -470,6 +470,18 @@
 </property>
 
 <property>
+  <name>dfs.namenode.safemode.replication.min</name>
+  <value></value>
+  <description>
+      a separate minimum replication factor for calculating safe block count.
+      This is an expert level setting.
+      Setting this lower than the dfs.namenode.replication.min
+      is not recommend and/or dangerous for production setups.
+      When it's not set it takes value from dfs.namenode.replication.min
+  </description>
+</property>
+
+<property>
   <name>dfs.blocksize</name>
   <value>134217728</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d13335f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index c2d8c57..af9a6f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -29,11 +29,13 @@ import java.util.Collection;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -264,4 +266,16 @@ public class TestFSNamesystem {
     Assert.assertEquals("Expected number of blocked thread not found",
                         threadCount, rwLock.getQueueLength());
   }
+
+  @Test
+  public void testSafemodeReplicationConf() throws IOException {
+    Configuration conf = new Configuration();
+    FSImage fsImage = Mockito.mock(FSImage.class);
+    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
+    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    SafeModeInfo safemodeInfo = fsn.getSafeModeInfoForTests();
+    assertTrue(safemodeInfo.toString().contains("Minimal replication = 2"));
+  }
 }


[11/50] [abbrv] hadoop git commit: MAPREDUCE-6415. Create a tool to combine aggregated logs into HAR files. (Robert Kanter via kasha)

Posted by ec...@apache.org.
MAPREDUCE-6415. Create a tool to combine aggregated logs into HAR files. (Robert Kanter via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/119cc75e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/119cc75e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/119cc75e

Branch: refs/heads/HADOOP-11890
Commit: 119cc75e7ebd723790f6326498383304aba384a2
Parents: 4014ce5
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Sep 9 17:41:27 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Sep 9 17:45:19 2015 -0700

----------------------------------------------------------------------
 MAPREDUCE-6415.003.patch                        | 1308 ++++++++++++++++++
 .../main/resources/assemblies/hadoop-tools.xml  |    7 +
 hadoop-mapreduce-project/CHANGES.txt            |    3 +
 hadoop-mapreduce-project/bin/mapred             |    8 +
 hadoop-project/pom.xml                          |    5 +
 hadoop-tools/hadoop-archive-logs/pom.xml        |  171 +++
 .../apache/hadoop/tools/HadoopArchiveLogs.java  |  403 ++++++
 .../hadoop/tools/HadoopArchiveLogsRunner.java   |  180 +++
 .../hadoop/tools/TestHadoopArchiveLogs.java     |  293 ++++
 .../tools/TestHadoopArchiveLogsRunner.java      |  143 ++
 hadoop-tools/hadoop-tools-dist/pom.xml          |    5 +
 hadoop-tools/pom.xml                            |    1 +
 12 files changed, 2527 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/MAPREDUCE-6415.003.patch
----------------------------------------------------------------------
diff --git a/MAPREDUCE-6415.003.patch b/MAPREDUCE-6415.003.patch
new file mode 100644
index 0000000..7c14341
--- /dev/null
+++ b/MAPREDUCE-6415.003.patch
@@ -0,0 +1,1308 @@
+diff --git hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
+index fa55703..3f646e6 100644
+--- hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
++++ hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
+@@ -52,6 +52,13 @@
+       </includes>
+     </fileSet>
+     <fileSet>
++      <directory>../hadoop-archive-logs/target</directory>
++      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
++      <includes>
++        <include>*-sources.jar</include>
++      </includes>
++    </fileSet>
++    <fileSet>
+       <directory>../hadoop-datajoin/target</directory>
+       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
+       <includes>
+diff --git hadoop-mapreduce-project/bin/mapred hadoop-mapreduce-project/bin/mapred
+index 426af80..2d56a8d 100755
+--- hadoop-mapreduce-project/bin/mapred
++++ hadoop-mapreduce-project/bin/mapred
+@@ -20,6 +20,7 @@ MYNAME="${BASH_SOURCE-$0}"
+ function hadoop_usage
+ {
+   hadoop_add_subcommand "archive" "create a hadoop archive"
++  hadoop_add_subcommand "archive-logs" "combine aggregated logs into hadoop archives"
+   hadoop_add_subcommand "classpath" "prints the class path needed for running mapreduce subcommands"
+   hadoop_add_subcommand "distcp" "copy file or directories recursively"
+   hadoop_add_subcommand "historyserver" "run job history servers as a standalone daemon"
+@@ -72,6 +73,13 @@ case ${COMMAND} in
+     hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+     HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+   ;;
++  archive-logs)
++    CLASS=org.apache.hadoop.tools.HadoopArchiveLogs
++    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
++    hadoop_add_classpath "${TOOL_PATH}"
++    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
++    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
++  ;;
+   classpath)
+     hadoop_do_classpath_subcommand CLASS "$@"
+   ;;
+diff --git hadoop-project/pom.xml hadoop-project/pom.xml
+index 9863475..636e063 100644
+--- hadoop-project/pom.xml
++++ hadoop-project/pom.xml
+@@ -324,6 +324,11 @@
+       </dependency>
+       <dependency>
+         <groupId>org.apache.hadoop</groupId>
++        <artifactId>hadoop-archive-logs</artifactId>
++        <version>${project.version}</version>
++      </dependency>
++      <dependency>
++        <groupId>org.apache.hadoop</groupId>
+         <artifactId>hadoop-distcp</artifactId>
+         <version>${project.version}</version>
+       </dependency>
+diff --git hadoop-tools/hadoop-archive-logs/pom.xml hadoop-tools/hadoop-archive-logs/pom.xml
+new file mode 100644
+index 0000000..2a480a8
+--- /dev/null
++++ hadoop-tools/hadoop-archive-logs/pom.xml
+@@ -0,0 +1,171 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!--
++  Licensed under the Apache License, Version 2.0 (the "License");
++  you may not use this file except in compliance with the License.
++  You may obtain a copy of the License at
++
++    http://www.apache.org/licenses/LICENSE-2.0
++
++  Unless required by applicable law or agreed to in writing, software
++  distributed under the License is distributed on an "AS IS" BASIS,
++  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++  See the License for the specific language governing permissions and
++  limitations under the License. See accompanying LICENSE file.
++-->
++<project xmlns="http://maven.apache.org/POM/4.0.0"
++  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
++  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
++                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
++  <modelVersion>4.0.0</modelVersion>
++  <parent>
++    <groupId>org.apache.hadoop</groupId>
++    <artifactId>hadoop-project</artifactId>
++    <version>3.0.0-SNAPSHOT</version>
++    <relativePath>../../hadoop-project</relativePath>
++  </parent>
++  <groupId>org.apache.hadoop</groupId>
++  <artifactId>hadoop-archive-logs</artifactId>
++  <version>3.0.0-SNAPSHOT</version>
++  <description>Apache Hadoop Archive Logs</description>
++  <name>Apache Hadoop Archive Logs</name>
++  <packaging>jar</packaging>
++
++  <properties>
++    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
++  </properties>
++
++  <dependencies>
++    <dependency>
++      <groupId>junit</groupId>
++      <artifactId>junit</artifactId>
++      <scope>test</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-mapreduce-client-core</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-common</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-hdfs</artifactId>
++      <scope>test</scope>
++      <type>test-jar</type>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-server-tests</artifactId>
++      <type>test-jar</type>
++      <scope>test</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-archives</artifactId>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-common</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-api</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>com.google.guava</groupId>
++      <artifactId>guava</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>commons-io</groupId>
++      <artifactId>commons-io</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>commons-logging</groupId>
++      <artifactId>commons-logging</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>commons-cli</groupId>
++      <artifactId>commons-cli</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-client</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
++      <scope>provided</scope>
++    </dependency>
++    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-hdfs</artifactId>
++      <scope>test</scope>
++    </dependency>
++    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-common</artifactId>
++      <scope>test</scope>
++      <type>test-jar</type>
++    </dependency>
++    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
++      <scope>test</scope>
++      <type>test-jar</type>
++    </dependency>
++  </dependencies>
++
++  <build>
++    <plugins>
++      <plugin>
++        <groupId>org.apache.maven.plugins</groupId>
++        <artifactId>maven-antrun-plugin</artifactId>
++        <executions>
++          <execution>
++            <id>create-log-dir</id>
++            <phase>process-test-resources</phase>
++            <goals>
++              <goal>run</goal>
++            </goals>
++            <configuration>
++              <target>
++                <delete dir="${test.build.data}"/>
++                <mkdir dir="${test.build.data}"/>
++                <mkdir dir="${hadoop.log.dir}"/>
++              </target>
++            </configuration>
++          </execution>
++        </executions>
++      </plugin>
++      <plugin>
++        <groupId>org.apache.maven.plugins</groupId>
++        <artifactId>maven-jar-plugin</artifactId>
++         <configuration>
++          <archive>
++           <manifest>
++            <mainClass>org.apache.hadoop.tools.HadoopArchiveLogs</mainClass>
++           </manifest>
++         </archive>
++        </configuration>
++       </plugin>
++    </plugins>
++  </build>
++</project>
+diff --git hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
+new file mode 100644
+index 0000000..4778dcb
+--- /dev/null
++++ hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
+@@ -0,0 +1,403 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.tools;
++
++import com.google.common.annotations.VisibleForTesting;
++import org.apache.commons.cli.CommandLine;
++import org.apache.commons.cli.CommandLineParser;
++import org.apache.commons.cli.GnuParser;
++import org.apache.commons.cli.HelpFormatter;
++import org.apache.commons.cli.Option;
++import org.apache.commons.cli.Options;
++import org.apache.commons.cli.ParseException;
++import org.apache.commons.logging.Log;
++import org.apache.commons.logging.LogFactory;
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileStatus;
++import org.apache.hadoop.fs.FileSystem;
++import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.fs.permission.FsAction;
++import org.apache.hadoop.fs.permission.FsPermission;
++import org.apache.hadoop.mapred.JobConf;
++import org.apache.hadoop.util.Tool;
++import org.apache.hadoop.util.ToolRunner;
++import org.apache.hadoop.yarn.api.records.ApplicationReport;
++import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
++import org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster;
++import org.apache.hadoop.yarn.applications.distributedshell.Client;
++import org.apache.hadoop.yarn.client.api.YarnClient;
++import org.apache.hadoop.yarn.conf.YarnConfiguration;
++import org.apache.hadoop.yarn.exceptions.YarnException;
++import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
++
++import java.io.File;
++import java.io.FileWriter;
++import java.io.IOException;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.Comparator;
++import java.util.HashSet;
++import java.util.Iterator;
++import java.util.List;
++import java.util.Set;
++
++
++/**
++ * This tool moves Aggregated Log files into HAR archives using the
++ * {@link HadoopArchives} tool and the Distributed Shell via the
++ * {@link HadoopArchiveLogsRunner}.
++ */
++public class HadoopArchiveLogs implements Tool {
++  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogs.class);
++
++  private static final String HELP_OPTION = "help";
++  private static final String MAX_ELIGIBLE_APPS_OPTION = "maxEligibleApps";
++  private static final String MIN_NUM_LOG_FILES_OPTION = "minNumberLogFiles";
++  private static final String MAX_TOTAL_LOGS_SIZE_OPTION = "maxTotalLogsSize";
++  private static final String MEMORY_OPTION = "memory";
++
++  private static final int DEFAULT_MAX_ELIGIBLE = -1;
++  private static final int DEFAULT_MIN_NUM_LOG_FILES = 20;
++  private static final long DEFAULT_MAX_TOTAL_LOGS_SIZE = 1024L;
++  private static final long DEFAULT_MEMORY = 1024L;
++
++  @VisibleForTesting
++  int maxEligible = DEFAULT_MAX_ELIGIBLE;
++  @VisibleForTesting
++  int minNumLogFiles = DEFAULT_MIN_NUM_LOG_FILES;
++  @VisibleForTesting
++  long maxTotalLogsSize = DEFAULT_MAX_TOTAL_LOGS_SIZE * 1024L * 1024L;
++  @VisibleForTesting
++  long memory = DEFAULT_MEMORY;
++
++  @VisibleForTesting
++  Set<ApplicationReport> eligibleApplications;
++
++  private JobConf conf;
++
++  public HadoopArchiveLogs(Configuration conf) {
++    setConf(conf);
++    eligibleApplications = new HashSet<>();
++  }
++
++  public static void main(String[] args) {
++    JobConf job = new JobConf(HadoopArchiveLogs.class);
++
++    HadoopArchiveLogs hal = new HadoopArchiveLogs(job);
++    int ret = 0;
++
++    try{
++      ret = ToolRunner.run(hal, args);
++    } catch(Exception e) {
++      LOG.debug("Exception", e);
++      System.err.println(e.getClass().getSimpleName());
++      final String s = e.getLocalizedMessage();
++      if (s != null) {
++        System.err.println(s);
++      } else {
++        e.printStackTrace(System.err);
++      }
++      System.exit(1);
++    }
++    System.exit(ret);
++  }
++
++  @Override
++  public int run(String[] args) throws Exception {
++    handleOpts(args);
++
++    findAggregatedApps();
++
++    FileSystem fs = null;
++    Path remoteRootLogDir = new Path(conf.get(
++        YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
++        YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
++    String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
++    Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
++    try {
++      fs = FileSystem.get(conf);
++      checkFiles(fs, remoteRootLogDir, suffix);
++
++      // Prepare working directory
++      if (fs.exists(workingDir)) {
++        fs.delete(workingDir, true);
++      }
++      fs.mkdirs(workingDir);
++      fs.setPermission(workingDir,
++          new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
++    } finally {
++      if (fs != null) {
++        fs.close();
++      }
++    }
++
++    checkMaxEligible();
++
++    if (eligibleApplications.isEmpty()) {
++      LOG.info("No eligible applications to process");
++      System.exit(0);
++    }
++
++    StringBuilder sb =
++        new StringBuilder("Will process the following applications:");
++    for (ApplicationReport report : eligibleApplications) {
++      sb.append("\n\t").append(report.getApplicationId());
++    }
++    LOG.info(sb.toString());
++
++    File localScript = File.createTempFile("hadoop-archive-logs-", ".sh");
++    generateScript(localScript, workingDir, remoteRootLogDir, suffix);
++
++    if (runDistributedShell(localScript)) {
++      return 0;
++    }
++    return -1;
++  }
++
++  private void handleOpts(String[] args) throws ParseException {
++    Options opts = new Options();
++    Option helpOpt = new Option(HELP_OPTION, false, "Prints this message");
++    Option maxEligibleOpt = new Option(MAX_ELIGIBLE_APPS_OPTION, true,
++        "The maximum number of eligible apps to process (default: "
++            + DEFAULT_MAX_ELIGIBLE + " (all))");
++    maxEligibleOpt.setArgName("n");
++    Option minNumLogFilesOpt = new Option(MIN_NUM_LOG_FILES_OPTION, true,
++        "The minimum number of log files required to be eligible (default: "
++            + DEFAULT_MIN_NUM_LOG_FILES + ")");
++    minNumLogFilesOpt.setArgName("n");
++    Option maxTotalLogsSizeOpt = new Option(MAX_TOTAL_LOGS_SIZE_OPTION, true,
++        "The maximum total logs size (in megabytes) required to be eligible" +
++            " (default: " + DEFAULT_MAX_TOTAL_LOGS_SIZE + ")");
++    maxTotalLogsSizeOpt.setArgName("megabytes");
++    Option memoryOpt = new Option(MEMORY_OPTION, true,
++        "The amount of memory (in megabytes) for each container (default: "
++            + DEFAULT_MEMORY + ")");
++    memoryOpt.setArgName("megabytes");
++    opts.addOption(helpOpt);
++    opts.addOption(maxEligibleOpt);
++    opts.addOption(minNumLogFilesOpt);
++    opts.addOption(maxTotalLogsSizeOpt);
++    opts.addOption(memoryOpt);
++
++    try {
++      CommandLineParser parser = new GnuParser();
++      CommandLine commandLine = parser.parse(opts, args);
++      if (commandLine.hasOption(HELP_OPTION)) {
++        HelpFormatter formatter = new HelpFormatter();
++        formatter.printHelp("yarn archive-logs", opts);
++        System.exit(0);
++      }
++      if (commandLine.hasOption(MAX_ELIGIBLE_APPS_OPTION)) {
++        maxEligible = Integer.parseInt(
++            commandLine.getOptionValue(MAX_ELIGIBLE_APPS_OPTION));
++        if (maxEligible == 0) {
++          LOG.info("Setting " + MAX_ELIGIBLE_APPS_OPTION + " to 0 accomplishes "
++              + "nothing. Please either set it to a negative value "
++              + "(default, all) or a more reasonable value.");
++          System.exit(0);
++        }
++      }
++      if (commandLine.hasOption(MIN_NUM_LOG_FILES_OPTION)) {
++        minNumLogFiles = Integer.parseInt(
++            commandLine.getOptionValue(MIN_NUM_LOG_FILES_OPTION));
++      }
++      if (commandLine.hasOption(MAX_TOTAL_LOGS_SIZE_OPTION)) {
++        maxTotalLogsSize = Long.parseLong(
++            commandLine.getOptionValue(MAX_TOTAL_LOGS_SIZE_OPTION));
++        maxTotalLogsSize *= 1024L * 1024L;
++      }
++      if (commandLine.hasOption(MEMORY_OPTION)) {
++        memory = Long.parseLong(commandLine.getOptionValue(MEMORY_OPTION));
++      }
++    } catch (ParseException pe) {
++      HelpFormatter formatter = new HelpFormatter();
++      formatter.printHelp("yarn archive-logs", opts);
++      throw pe;
++    }
++  }
++
++  @VisibleForTesting
++  void findAggregatedApps() throws IOException, YarnException {
++    YarnClient client = YarnClient.createYarnClient();
++    try {
++      client.init(getConf());
++      client.start();
++      List<ApplicationReport> reports = client.getApplications();
++      for (ApplicationReport report : reports) {
++        LogAggregationStatus aggStatus = report.getLogAggregationStatus();
++        if (aggStatus.equals(LogAggregationStatus.SUCCEEDED) ||
++            aggStatus.equals(LogAggregationStatus.FAILED)) {
++          eligibleApplications.add(report);
++        }
++      }
++    } finally {
++      if (client != null) {
++        client.stop();
++      }
++    }
++  }
++
++  @VisibleForTesting
++  void checkFiles(FileSystem fs, Path remoteRootLogDir, String suffix) {
++    for (Iterator<ApplicationReport> reportIt = eligibleApplications.iterator();
++         reportIt.hasNext(); ) {
++      ApplicationReport report = reportIt.next();
++      long totalFileSize = 0L;
++      try {
++        FileStatus[] files = fs.listStatus(
++            LogAggregationUtils.getRemoteAppLogDir(remoteRootLogDir,
++                report.getApplicationId(), report.getUser(), suffix));
++        if (files.length < minNumLogFiles) {
++          reportIt.remove();
++        } else {
++          for (FileStatus file : files) {
++            if (file.getPath().getName().equals(report.getApplicationId()
++                + ".har")) {
++              reportIt.remove();
++              break;
++            }
++            totalFileSize += file.getLen();
++          }
++          if (totalFileSize > maxTotalLogsSize) {
++            reportIt.remove();
++          }
++        }
++      } catch (IOException ioe) {
++        // If the user doesn't have permission or it doesn't exist, then skip it
++        reportIt.remove();
++      }
++    }
++  }
++
++  @VisibleForTesting
++  void checkMaxEligible() {
++    // If we have too many eligible apps, remove the newest ones first
++    if (maxEligible > 0 && eligibleApplications.size() > maxEligible) {
++      List<ApplicationReport> sortedApplications =
++          new ArrayList<ApplicationReport>(eligibleApplications);
++      Collections.sort(sortedApplications, new Comparator<ApplicationReport>() {
++        @Override
++        public int compare(ApplicationReport o1, ApplicationReport o2) {
++          return Long.compare(o1.getFinishTime(), o2.getFinishTime());
++        }
++      });
++      for (int i = maxEligible; i < sortedApplications.size(); i++) {
++        eligibleApplications.remove(sortedApplications.get(i));
++      }
++    }
++  }
++
++  /*
++  The generated script looks like this:
++  #!/bin/bash
++  set -e
++  set -x
++  if [ "$YARN_SHELL_ID" == "1" ]; then
++        appId="application_1440448768987_0001"
++        user="rkanter"
++  elif [ "$YARN_SHELL_ID" == "2" ]; then
++        appId="application_1440448768987_0002"
++        user="rkanter"
++  else
++        echo "Unknown Mapping!"
++        exit 1
++  fi
++  export HADOOP_CLIENT_OPTS="-Xmx1024m"
++  export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
++  "$HADOOP_HOME"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
++   */
++  @VisibleForTesting
++  void generateScript(File localScript, Path workingDir,
++        Path remoteRootLogDir, String suffix) throws IOException {
++    LOG.info("Generating script at: " + localScript.getAbsolutePath());
++    String halrJarPath = HadoopArchiveLogsRunner.class.getProtectionDomain()
++        .getCodeSource().getLocation().getPath();
++    String harJarPath = HadoopArchives.class.getProtectionDomain()
++        .getCodeSource().getLocation().getPath();
++    String classpath = halrJarPath + File.pathSeparator + harJarPath;
++    FileWriter fw = null;
++    try {
++      fw = new FileWriter(localScript);
++      fw.write("#!/bin/bash\nset -e\nset -x\n");
++      int containerCount = 1;
++      for (ApplicationReport report : eligibleApplications) {
++        fw.write("if [ \"$YARN_SHELL_ID\" == \"");
++        fw.write(Integer.toString(containerCount));
++        fw.write("\" ]; then\n\tappId=\"");
++        fw.write(report.getApplicationId().toString());
++        fw.write("\"\n\tuser=\"");
++        fw.write(report.getUser());
++        fw.write("\"\nel");
++        containerCount++;
++      }
++      fw.write("se\n\techo \"Unknown Mapping!\"\n\texit 1\nfi\n");
++      fw.write("export HADOOP_CLIENT_OPTS=\"-Xmx");
++      fw.write(Long.toString(memory));
++      fw.write("m\"\n");
++      fw.write("export HADOOP_CLASSPATH=");
++      fw.write(classpath);
++      fw.write("\n\"$HADOOP_HOME\"/bin/hadoop ");
++      fw.write(HadoopArchiveLogsRunner.class.getName());
++      fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
++      fw.write(workingDir.toString());
++      fw.write(" -remoteRootLogDir ");
++      fw.write(remoteRootLogDir.toString());
++      fw.write(" -suffix ");
++      fw.write(suffix);
++      fw.write("\n");
++    } finally {
++      if (fw != null) {
++        fw.close();
++      }
++    }
++  }
++
++  private boolean runDistributedShell(File localScript) throws Exception {
++    String[] dsArgs = {
++        "--appname",
++        "ArchiveLogs",
++        "--jar",
++        ApplicationMaster.class.getProtectionDomain().getCodeSource()
++            .getLocation().getPath(),
++        "--num_containers",
++        Integer.toString(eligibleApplications.size()),
++        "--container_memory",
++        Long.toString(memory),
++        "--shell_script",
++        localScript.getAbsolutePath()
++    };
++    final Client dsClient = new Client(new Configuration(conf));
++    dsClient.init(dsArgs);
++    return dsClient.run();
++  }
++
++  @Override
++  public void setConf(Configuration conf) {
++    if (conf instanceof JobConf) {
++      this.conf = (JobConf) conf;
++    } else {
++      this.conf = new JobConf(conf, HadoopArchiveLogs.class);
++    }
++  }
++
++  @Override
++  public Configuration getConf() {
++    return this.conf;
++  }
++}
+diff --git hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
+new file mode 100644
+index 0000000..347e5fb
+--- /dev/null
++++ hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
+@@ -0,0 +1,180 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.tools;
++
++import org.apache.commons.cli.CommandLine;
++import org.apache.commons.cli.CommandLineParser;
++import org.apache.commons.cli.GnuParser;
++import org.apache.commons.cli.Option;
++import org.apache.commons.cli.Options;
++import org.apache.commons.cli.ParseException;
++import org.apache.commons.logging.Log;
++import org.apache.commons.logging.LogFactory;
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FileStatus;
++import org.apache.hadoop.fs.FileSystem;
++import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.fs.PathFilter;
++import org.apache.hadoop.mapred.JobConf;
++import org.apache.hadoop.util.Tool;
++import org.apache.hadoop.util.ToolRunner;
++
++import java.io.File;
++
++/**
++ * This is a child program designed to be used by the {@link HadoopArchiveLogs}
++ * tool via the Distributed Shell.  It's not meant to be run directly.
++ */
++public class HadoopArchiveLogsRunner implements Tool {
++  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogsRunner.class);
++
++  private static final String APP_ID_OPTION = "appId";
++  private static final String USER_OPTION = "user";
++  private static final String WORKING_DIR_OPTION = "workingDir";
++  private static final String REMOTE_ROOT_LOG_DIR = "remoteRootLogDir";
++  private static final String SUFFIX_OPTION = "suffix";
++
++  private String appId;
++  private String user;
++  private String workingDir;
++  private String remoteLogDir;
++  private String suffix;
++
++  private JobConf conf;
++
++  public HadoopArchiveLogsRunner(Configuration conf) {
++    setConf(conf);
++  }
++
++  public static void main(String[] args) {
++    JobConf job = new JobConf(HadoopArchiveLogsRunner.class);
++
++    HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(job);
++    int ret = 0;
++
++    try{
++      ret = ToolRunner.run(halr, args);
++    } catch(Exception e) {
++      LOG.debug("Exception", e);
++      System.err.println(e.getClass().getSimpleName());
++      final String s = e.getLocalizedMessage();
++      if (s != null) {
++        System.err.println(s);
++      } else {
++        e.printStackTrace(System.err);
++      }
++      System.exit(1);
++    }
++    System.exit(ret);
++  }
++
++  @Override
++  public int run(String[] args) throws Exception {
++    handleOpts(args);
++    String remoteAppLogDir = remoteLogDir + File.separator + user
++        + File.separator + suffix + File.separator + appId;
++
++    // Run 'hadoop archives' command in local mode
++    Configuration haConf = new Configuration(getConf());
++    haConf.set("mapreduce.framework.name", "local");
++    HadoopArchives ha = new HadoopArchives(haConf);
++    String[] haArgs = {
++        "-archiveName",
++        appId + ".har",
++        "-p",
++        remoteAppLogDir,
++        "*",
++        workingDir
++    };
++    StringBuilder sb = new StringBuilder("Executing 'hadoop archives'");
++    for (String haArg : haArgs) {
++      sb.append("\n\t").append(haArg);
++    }
++    LOG.info(sb.toString());
++    ha.run(haArgs);
++
++    FileSystem fs = null;
++    // Move har file to correct location and delete original logs
++    try {
++      fs = FileSystem.get(conf);
++      LOG.info("Moving har to original location");
++      fs.rename(new Path(workingDir, appId + ".har"),
++          new Path(remoteAppLogDir, appId + ".har"));
++      LOG.info("Deleting original logs");
++      for (FileStatus original : fs.listStatus(new Path(remoteAppLogDir),
++          new PathFilter() {
++            @Override
++            public boolean accept(Path path) {
++              return !path.getName().endsWith(".har");
++            }
++          })) {
++        fs.delete(original.getPath(), false);
++      }
++    } finally {
++      if (fs != null) {
++        fs.close();
++      }
++    }
++
++    return 0;
++  }
++
++  private void handleOpts(String[] args) throws ParseException {
++    Options opts = new Options();
++    Option appIdOpt = new Option(APP_ID_OPTION, true, "Application ID");
++    appIdOpt.setRequired(true);
++    Option userOpt = new Option(USER_OPTION, true, "User");
++    userOpt.setRequired(true);
++    Option workingDirOpt = new Option(WORKING_DIR_OPTION, true,
++        "Working Directory");
++    workingDirOpt.setRequired(true);
++    Option remoteLogDirOpt = new Option(REMOTE_ROOT_LOG_DIR, true,
++        "Remote Root Log Directory");
++    remoteLogDirOpt.setRequired(true);
++    Option suffixOpt = new Option(SUFFIX_OPTION, true, "Suffix");
++    suffixOpt.setRequired(true);
++    opts.addOption(appIdOpt);
++    opts.addOption(userOpt);
++    opts.addOption(workingDirOpt);
++    opts.addOption(remoteLogDirOpt);
++    opts.addOption(suffixOpt);
++
++    CommandLineParser parser = new GnuParser();
++    CommandLine commandLine = parser.parse(opts, args);
++    appId = commandLine.getOptionValue(APP_ID_OPTION);
++    user = commandLine.getOptionValue(USER_OPTION);
++    workingDir = commandLine.getOptionValue(WORKING_DIR_OPTION);
++    remoteLogDir = commandLine.getOptionValue(REMOTE_ROOT_LOG_DIR);
++    suffix = commandLine.getOptionValue(SUFFIX_OPTION);
++  }
++
++  @Override
++  public void setConf(Configuration conf) {
++    if (conf instanceof JobConf) {
++      this.conf = (JobConf) conf;
++    } else {
++      this.conf = new JobConf(conf, HadoopArchiveLogsRunner.class);
++    }
++  }
++
++  @Override
++  public Configuration getConf() {
++    return this.conf;
++  }
++}
+diff --git hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+new file mode 100644
+index 0000000..c8ff201
+--- /dev/null
++++ hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
+@@ -0,0 +1,293 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.tools;
++
++import org.apache.commons.io.IOUtils;
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FSDataOutputStream;
++import org.apache.hadoop.fs.FileSystem;
++import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
++import org.apache.hadoop.yarn.api.records.ApplicationId;
++import org.apache.hadoop.yarn.api.records.ApplicationReport;
++import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
++import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
++import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
++import org.apache.hadoop.yarn.api.records.Priority;
++import org.apache.hadoop.yarn.api.records.Resource;
++import org.apache.hadoop.yarn.api.records.YarnApplicationState;
++import org.apache.hadoop.yarn.conf.YarnConfiguration;
++import org.apache.hadoop.yarn.server.MiniYARNCluster;
++import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
++import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
++import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
++import org.junit.Assert;
++import org.junit.Test;
++
++import java.io.File;
++import java.io.IOException;
++import java.util.Random;
++
++public class TestHadoopArchiveLogs {
++
++  private static final long CLUSTER_TIMESTAMP = System.currentTimeMillis();
++  private static final int FILE_SIZE_INCREMENT = 4096;
++  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
++  static {
++    new Random().nextBytes(DUMMY_DATA);
++  }
++
++  @Test(timeout = 10000)
++  public void testCheckFiles() throws Exception {
++    Configuration conf = new Configuration();
++    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
++    FileSystem fs = FileSystem.getLocal(conf);
++    Path rootLogDir = new Path("target", "logs");
++    String suffix = "logs";
++    Path logDir = new Path(rootLogDir,
++        new Path(System.getProperty("user.name"), suffix));
++    fs.mkdirs(logDir);
++
++    Assert.assertEquals(0, hal.eligibleApplications.size());
++    ApplicationReport app1 = createAppReport(1);  // no files found
++    ApplicationReport app2 = createAppReport(2);  // too few files
++    Path app2Path = new Path(logDir, app2.getApplicationId().toString());
++    fs.mkdirs(app2Path);
++    createFile(fs, new Path(app2Path, "file1"), 1);
++    hal.minNumLogFiles = 2;
++    ApplicationReport app3 = createAppReport(3);  // too large
++    Path app3Path = new Path(logDir, app3.getApplicationId().toString());
++    fs.mkdirs(app3Path);
++    createFile(fs, new Path(app3Path, "file1"), 2);
++    createFile(fs, new Path(app3Path, "file2"), 5);
++    hal.maxTotalLogsSize = FILE_SIZE_INCREMENT * 6;
++    ApplicationReport app4 = createAppReport(4);  // has har already
++    Path app4Path = new Path(logDir, app4.getApplicationId().toString());
++    fs.mkdirs(app4Path);
++    createFile(fs, new Path(app4Path, app4.getApplicationId() + ".har"), 1);
++    ApplicationReport app5 = createAppReport(5);  // just right
++    Path app5Path = new Path(logDir, app5.getApplicationId().toString());
++    fs.mkdirs(app5Path);
++    createFile(fs, new Path(app5Path, "file1"), 2);
++    createFile(fs, new Path(app5Path, "file2"), 3);
++    hal.eligibleApplications.add(app1);
++    hal.eligibleApplications.add(app2);
++    hal.eligibleApplications.add(app3);
++    hal.eligibleApplications.add(app4);
++    hal.eligibleApplications.add(app5);
++
++    hal.checkFiles(fs, rootLogDir, suffix);
++    Assert.assertEquals(1, hal.eligibleApplications.size());
++    Assert.assertEquals(app5, hal.eligibleApplications.iterator().next());
++  }
++
++  @Test(timeout = 10000)
++  public void testCheckMaxEligible() throws Exception {
++    Configuration conf = new Configuration();
++    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
++    ApplicationReport app1 = createAppReport(1);
++    app1.setFinishTime(CLUSTER_TIMESTAMP - 5);
++    ApplicationReport app2 = createAppReport(2);
++    app2.setFinishTime(CLUSTER_TIMESTAMP - 10);
++    ApplicationReport app3 = createAppReport(3);
++    app3.setFinishTime(CLUSTER_TIMESTAMP + 5);
++    ApplicationReport app4 = createAppReport(4);
++    app4.setFinishTime(CLUSTER_TIMESTAMP + 10);
++    ApplicationReport app5 = createAppReport(5);
++    app5.setFinishTime(CLUSTER_TIMESTAMP);
++    Assert.assertEquals(0, hal.eligibleApplications.size());
++    hal.eligibleApplications.add(app1);
++    hal.eligibleApplications.add(app2);
++    hal.eligibleApplications.add(app3);
++    hal.eligibleApplications.add(app4);
++    hal.eligibleApplications.add(app5);
++    hal.maxEligible = -1;
++    hal.checkMaxEligible();
++    Assert.assertEquals(5, hal.eligibleApplications.size());
++
++    hal.maxEligible = 4;
++    hal.checkMaxEligible();
++    Assert.assertEquals(4, hal.eligibleApplications.size());
++    Assert.assertFalse(hal.eligibleApplications.contains(app4));
++
++    hal.maxEligible = 3;
++    hal.checkMaxEligible();
++    Assert.assertEquals(3, hal.eligibleApplications.size());
++    Assert.assertFalse(hal.eligibleApplications.contains(app3));
++
++    hal.maxEligible = 2;
++    hal.checkMaxEligible();
++    Assert.assertEquals(2, hal.eligibleApplications.size());
++    Assert.assertFalse(hal.eligibleApplications.contains(app5));
++
++    hal.maxEligible = 1;
++    hal.checkMaxEligible();
++    Assert.assertEquals(1, hal.eligibleApplications.size());
++    Assert.assertFalse(hal.eligibleApplications.contains(app1));
++  }
++
++  @Test(timeout = 10000)
++  public void testFindAggregatedApps() throws Exception {
++    MiniYARNCluster yarnCluster = null;
++    try {
++      Configuration conf = new Configuration();
++      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
++      yarnCluster =
++          new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1,
++              1, 1, 1);
++      yarnCluster.init(conf);
++      yarnCluster.start();
++      conf = yarnCluster.getConfig();
++
++      RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
++      RMAppImpl app1 = (RMAppImpl)createRMApp(1, conf, rmContext,
++          LogAggregationStatus.DISABLED);
++      RMAppImpl app2 = (RMAppImpl)createRMApp(2, conf, rmContext,
++          LogAggregationStatus.FAILED);
++      RMAppImpl app3 = (RMAppImpl)createRMApp(3, conf, rmContext,
++          LogAggregationStatus.NOT_START);
++      RMAppImpl app4 = (RMAppImpl)createRMApp(4, conf, rmContext,
++          LogAggregationStatus.SUCCEEDED);
++      RMAppImpl app5 = (RMAppImpl)createRMApp(5, conf, rmContext,
++          LogAggregationStatus.RUNNING);
++      RMAppImpl app6 = (RMAppImpl)createRMApp(6, conf, rmContext,
++          LogAggregationStatus.RUNNING_WITH_FAILURE);
++      RMAppImpl app7 = (RMAppImpl)createRMApp(7, conf, rmContext,
++          LogAggregationStatus.TIME_OUT);
++      rmContext.getRMApps().put(app1.getApplicationId(), app1);
++      rmContext.getRMApps().put(app2.getApplicationId(), app2);
++      rmContext.getRMApps().put(app3.getApplicationId(), app3);
++      rmContext.getRMApps().put(app4.getApplicationId(), app4);
++      rmContext.getRMApps().put(app5.getApplicationId(), app5);
++      rmContext.getRMApps().put(app6.getApplicationId(), app6);
++      rmContext.getRMApps().put(app7.getApplicationId(), app7);
++
++      HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
++      Assert.assertEquals(0, hal.eligibleApplications.size());
++      hal.findAggregatedApps();
++      Assert.assertEquals(2, hal.eligibleApplications.size());
++    } finally {
++      if (yarnCluster != null) {
++        yarnCluster.stop();
++      }
++    }
++  }
++
++  @Test(timeout = 10000)
++  public void testGenerateScript() throws Exception {
++    Configuration conf = new Configuration();
++    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
++    ApplicationReport app1 = createAppReport(1);
++    ApplicationReport app2 = createAppReport(2);
++    hal.eligibleApplications.add(app1);
++    hal.eligibleApplications.add(app2);
++
++    File localScript = new File("target", "script.sh");
++    Path workingDir = new Path("/tmp", "working");
++    Path remoteRootLogDir = new Path("/tmp", "logs");
++    String suffix = "logs";
++    localScript.delete();
++    Assert.assertFalse(localScript.exists());
++    hal.generateScript(localScript, workingDir, remoteRootLogDir, suffix);
++    Assert.assertTrue(localScript.exists());
++    String script = IOUtils.toString(localScript.toURI());
++    String[] lines = script.split(System.lineSeparator());
++    Assert.assertEquals(16, lines.length);
++    Assert.assertEquals("#!/bin/bash", lines[0]);
++    Assert.assertEquals("set -e", lines[1]);
++    Assert.assertEquals("set -x", lines[2]);
++    Assert.assertEquals("if [ \"$YARN_SHELL_ID\" == \"1\" ]; then", lines[3]);
++    if (lines[4].contains(app1.getApplicationId().toString())) {
++      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
++          + "\"", lines[4]);
++      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
++          + "\"", lines[7]);
++    } else {
++      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
++          + "\"", lines[4]);
++      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
++          + "\"", lines[7]);
++    }
++    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
++        lines[5]);
++    Assert.assertEquals("elif [ \"$YARN_SHELL_ID\" == \"2\" ]; then", lines[6]);
++    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
++        lines[8]);
++    Assert.assertEquals("else", lines[9]);
++    Assert.assertEquals("\techo \"Unknown Mapping!\"", lines[10]);
++    Assert.assertEquals("\texit 1", lines[11]);
++    Assert.assertEquals("fi", lines[12]);
++    Assert.assertEquals("export HADOOP_CLIENT_OPTS=\"-Xmx1024m\"", lines[13]);
++    Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
++    Assert.assertEquals("\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
++        "HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" -workingDir "
++        + workingDir.toString() + " -remoteRootLogDir " +
++        remoteRootLogDir.toString() + " -suffix " + suffix, lines[15]);
++  }
++
++  private static ApplicationReport createAppReport(int id) {
++    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
++    return ApplicationReport.newInstance(
++        appId,
++        ApplicationAttemptId.newInstance(appId, 1),
++        System.getProperty("user.name"),
++        null, null, null, 0, null, YarnApplicationState.FINISHED, null,
++        null, 0L, 0L, FinalApplicationStatus.SUCCEEDED, null, null, 100f,
++        null, null);
++  }
++
++  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
++      throws IOException {
++    FSDataOutputStream out = null;
++    try {
++      out = fs.create(p);
++      for (int i = 0 ; i < sizeMultiple; i++) {
++        out.write(DUMMY_DATA);
++      }
++    } finally {
++      if (out != null) {
++        out.close();
++      }
++    }
++  }
++
++  private static RMApp createRMApp(int id, Configuration conf, RMContext rmContext,
++       final LogAggregationStatus aggStatus) {
++    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
++    ApplicationSubmissionContext submissionContext =
++        ApplicationSubmissionContext.newInstance(appId, "test", "default",
++            Priority.newInstance(0), null, false, true,
++            2, Resource.newInstance(10, 2), "test");
++    return new RMAppImpl(appId, rmContext, conf, "test",
++        System.getProperty("user.name"), "default", submissionContext,
++        rmContext.getScheduler(),
++        rmContext.getApplicationMasterService(),
++        System.currentTimeMillis(), "test",
++        null, null) {
++      @Override
++      public ApplicationReport createAndGetApplicationReport(
++          String clientUserName, boolean allowAccess) {
++        ApplicationReport report =
++            super.createAndGetApplicationReport(clientUserName, allowAccess);
++        report.setLogAggregationStatus(aggStatus);
++        return report;
++      }
++    };
++  }
++}
+diff --git hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
+new file mode 100644
+index 0000000..af66f14
+--- /dev/null
++++ hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
+@@ -0,0 +1,143 @@
++/**
++ * Licensed to the Apache Software Foundation (ASF) under one
++ * or more contributor license agreements.  See the NOTICE file
++ * distributed with this work for additional information
++ * regarding copyright ownership.  The ASF licenses this file
++ * to you under the Apache License, Version 2.0 (the
++ * "License"); you may not use this file except in compliance
++ * with the License.  You may obtain a copy of the License at
++ *
++ *     http://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS,
++ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ * See the License for the specific language governing permissions and
++ * limitations under the License.
++ */
++
++package org.apache.hadoop.tools;
++
++import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.fs.FSDataOutputStream;
++import org.apache.hadoop.fs.FileStatus;
++import org.apache.hadoop.fs.FileSystem;
++import org.apache.hadoop.fs.HarFs;
++import org.apache.hadoop.fs.Path;
++import org.apache.hadoop.hdfs.MiniDFSCluster;
++import org.apache.hadoop.util.ToolRunner;
++import org.apache.hadoop.yarn.api.records.ApplicationId;
++import org.apache.hadoop.yarn.conf.YarnConfiguration;
++import org.apache.hadoop.yarn.server.MiniYARNCluster;
++import org.junit.Assert;
++import org.junit.Test;
++
++import java.io.IOException;
++import java.util.Arrays;
++import java.util.Comparator;
++import java.util.Random;
++
++import static org.junit.Assert.assertEquals;
++
++public class TestHadoopArchiveLogsRunner {
++
++  private static final int FILE_SIZE_INCREMENT = 4096;
++  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
++  static {
++    new Random().nextBytes(DUMMY_DATA);
++  }
++
++  @Test(timeout = 30000)
++  public void testHadoopArchiveLogs() throws Exception {
++    MiniYARNCluster yarnCluster = null;
++    MiniDFSCluster dfsCluster = null;
++    FileSystem fs = null;
++    try {
++      Configuration conf = new YarnConfiguration();
++      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
++      conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
++      yarnCluster =
++          new MiniYARNCluster(TestHadoopArchiveLogsRunner.class.getSimpleName(),
++              1, 2, 1, 1);
++      yarnCluster.init(conf);
++      yarnCluster.start();
++      conf = yarnCluster.getConfig();
++      dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
++
++      ApplicationId app1 =
++          ApplicationId.newInstance(System.currentTimeMillis(), 1);
++      fs = FileSystem.get(conf);
++      Path remoteRootLogDir = new Path(conf.get(
++          YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
++          YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
++      Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
++      String suffix = "logs";
++      Path logDir = new Path(remoteRootLogDir,
++          new Path(System.getProperty("user.name"), suffix));
++      fs.mkdirs(logDir);
++      Path app1Path = new Path(logDir, app1.toString());
++      fs.mkdirs(app1Path);
++      createFile(fs, new Path(app1Path, "log1"), 3);
++      createFile(fs, new Path(app1Path, "log2"), 4);
++      createFile(fs, new Path(app1Path, "log3"), 2);
++      FileStatus[] app1Files = fs.listStatus(app1Path);
++      Assert.assertEquals(3, app1Files.length);
++
++      String[] args = new String[]{
++          "-appId", app1.toString(),
++          "-user", System.getProperty("user.name"),
++          "-workingDir", workingDir.toString(),
++          "-remoteRootLogDir", remoteRootLogDir.toString(),
++          "-suffix", suffix};
++      final HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(conf);
++      assertEquals(0, ToolRunner.run(halr, args));
++
++      fs = FileSystem.get(conf);
++      app1Files = fs.listStatus(app1Path);
++      Assert.assertEquals(1, app1Files.length);
++      FileStatus harFile = app1Files[0];
++      Assert.assertEquals(app1.toString() + ".har", harFile.getPath().getName());
++      Path harPath = new Path("har:///" + harFile.getPath().toUri().getRawPath());
++      FileStatus[] harLogs = HarFs.get(harPath.toUri(), conf).listStatus(harPath);
++      Assert.assertEquals(3, harLogs.length);
++      Arrays.sort(harLogs, new Comparator<FileStatus>() {
++        @Override
++        public int compare(FileStatus o1, FileStatus o2) {
++          return o1.getPath().getName().compareTo(o2.getPath().getName());
++        }
++      });
++      Assert.assertEquals("log1", harLogs[0].getPath().getName());
++      Assert.assertEquals(3 * FILE_SIZE_INCREMENT, harLogs[0].getLen());
++      Assert.assertEquals("log2", harLogs[1].getPath().getName());
++      Assert.assertEquals(4 * FILE_SIZE_INCREMENT, harLogs[1].getLen());
++      Assert.assertEquals("log3", harLogs[2].getPath().getName());
++      Assert.assertEquals(2 * FILE_SIZE_INCREMENT, harLogs[2].getLen());
++      Assert.assertEquals(0, fs.listStatus(workingDir).length);
++    } finally {
++      if (yarnCluster != null) {
++        yarnCluster.stop();
++      }
++      if (fs != null) {
++        fs.close();
++      }
++      if (dfsCluster != null) {
++        dfsCluster.shutdown();
++      }
++    }
++  }
++
++  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
++      throws IOException {
++    FSDataOutputStream out = null;
++    try {
++      out = fs.create(p);
++      for (int i = 0 ; i < sizeMultiple; i++) {
++        out.write(DUMMY_DATA);
++      }
++    } finally {
++      if (out != null) {
++        out.close();
++      }
++    }
++  }
++}
+diff --git hadoop-tools/hadoop-tools-dist/pom.xml hadoop-tools/hadoop-tools-dist/pom.xml
+index 540401d..e6c458f 100644
+--- hadoop-tools/hadoop-tools-dist/pom.xml
++++ hadoop-tools/hadoop-tools-dist/pom.xml
+@@ -52,6 +52,11 @@
+     </dependency>
+     <dependency>
+       <groupId>org.apache.hadoop</groupId>
++      <artifactId>hadoop-archive-logs</artifactId>
++      <scope>compile</scope>
++    </dependency>
++    <dependency>
++      <groupId>org.apache.hadoop</groupId>
+       <artifactId>hadoop-rumen</artifactId>
+       <scope>compile</scope>
+     </dependency>
+diff --git hadoop-tools/pom.xml hadoop-tools/pom.xml
+index 5b35f46..0061bf0 100644
+--- hadoop-tools/pom.xml
++++ hadoop-tools/pom.xml
+@@ -34,6 +34,7 @@
+     <module>hadoop-streaming</module>
+     <module>hadoop-distcp</module>
+     <module>hadoop-archives</module>
++    <module>hadoop-archive-logs</module>
+     <module>hadoop-rumen</module>
+     <module>hadoop-gridmix</module>
+     <module>hadoop-datajoin</module>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
----------------------------------------------------------------------
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
index fa55703..3f646e6 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
@@ -52,6 +52,13 @@
       </includes>
     </fileSet>
     <fileSet>
+      <directory>../hadoop-archive-logs/target</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
+      <includes>
+        <include>*-sources.jar</include>
+      </includes>
+    </fileSet>
+    <fileSet>
       <directory>../hadoop-datajoin/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
       <includes>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5b5724b..428d37e 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -290,6 +290,9 @@ Release 2.8.0 - UNRELEASED
    MAPREDUCE-6304. Specifying node labels when submitting MR jobs.
    (Naganarasimha G R via wangda)
 
+   MAPREDUCE-6415. Create a tool to combine aggregated logs into HAR files. 
+   (Robert Kanter via kasha)
+
   IMPROVEMENTS
 
     MAPREDUCE-6291. Correct mapred queue usage command.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index 426af80..2d56a8d 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -20,6 +20,7 @@ MYNAME="${BASH_SOURCE-$0}"
 function hadoop_usage
 {
   hadoop_add_subcommand "archive" "create a hadoop archive"
+  hadoop_add_subcommand "archive-logs" "combine aggregated logs into hadoop archives"
   hadoop_add_subcommand "classpath" "prints the class path needed for running mapreduce subcommands"
   hadoop_add_subcommand "distcp" "copy file or directories recursively"
   hadoop_add_subcommand "historyserver" "run job history servers as a standalone daemon"
@@ -72,6 +73,13 @@ case ${COMMAND} in
     hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
     HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
   ;;
+  archive-logs)
+    CLASS=org.apache.hadoop.tools.HadoopArchiveLogs
+    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
+    hadoop_add_classpath "${TOOL_PATH}"
+    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+  ;;
   classpath)
     hadoop_do_classpath_subcommand CLASS "$@"
   ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 9863475..636e063 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -324,6 +324,11 @@
       </dependency>
       <dependency>
         <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-archive-logs</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-distcp</artifactId>
         <version>${project.version}</version>
       </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-archive-logs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/pom.xml b/hadoop-tools/hadoop-archive-logs/pom.xml
new file mode 100644
index 0000000..2a480a8
--- /dev/null
+++ b/hadoop-tools/hadoop-archive-logs/pom.xml
@@ -0,0 +1,171 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <modelVersion>4.0.0</modelVersion>
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-archive-logs</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <description>Apache Hadoop Archive Logs</description>
+  <name>Apache Hadoop Archive Logs</name>
+  <packaging>jar</packaging>
+
+  <properties>
+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
+  </properties>
+
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-tests</artifactId>
+      <type>test-jar</type>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-archives</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-api</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>commons-cli</groupId>
+      <artifactId>commons-cli</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-client</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+  </dependencies>
+
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>create-log-dir</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="${test.build.data}"/>
+                <mkdir dir="${test.build.data}"/>
+                <mkdir dir="${hadoop.log.dir}"/>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+         <configuration>
+          <archive>
+           <manifest>
+            <mainClass>org.apache.hadoop.tools.HadoopArchiveLogs</mainClass>
+           </manifest>
+         </archive>
+        </configuration>
+       </plugin>
+    </plugins>
+  </build>
+</project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
new file mode 100644
index 0000000..4778dcb
--- /dev/null
+++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
@@ -0,0 +1,403 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
+import org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster;
+import org.apache.hadoop.yarn.applications.distributedshell.Client;
+import org.apache.hadoop.yarn.client.api.YarnClient;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+
+
+/**
+ * This tool moves Aggregated Log files into HAR archives using the
+ * {@link HadoopArchives} tool and the Distributed Shell via the
+ * {@link HadoopArchiveLogsRunner}.
+ */
+public class HadoopArchiveLogs implements Tool {
+  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogs.class);
+
+  private static final String HELP_OPTION = "help";
+  private static final String MAX_ELIGIBLE_APPS_OPTION = "maxEligibleApps";
+  private static final String MIN_NUM_LOG_FILES_OPTION = "minNumberLogFiles";
+  private static final String MAX_TOTAL_LOGS_SIZE_OPTION = "maxTotalLogsSize";
+  private static final String MEMORY_OPTION = "memory";
+
+  private static final int DEFAULT_MAX_ELIGIBLE = -1;
+  private static final int DEFAULT_MIN_NUM_LOG_FILES = 20;
+  private static final long DEFAULT_MAX_TOTAL_LOGS_SIZE = 1024L;
+  private static final long DEFAULT_MEMORY = 1024L;
+
+  @VisibleForTesting
+  int maxEligible = DEFAULT_MAX_ELIGIBLE;
+  @VisibleForTesting
+  int minNumLogFiles = DEFAULT_MIN_NUM_LOG_FILES;
+  @VisibleForTesting
+  long maxTotalLogsSize = DEFAULT_MAX_TOTAL_LOGS_SIZE * 1024L * 1024L;
+  @VisibleForTesting
+  long memory = DEFAULT_MEMORY;
+
+  @VisibleForTesting
+  Set<ApplicationReport> eligibleApplications;
+
+  private JobConf conf;
+
+  public HadoopArchiveLogs(Configuration conf) {
+    setConf(conf);
+    eligibleApplications = new HashSet<>();
+  }
+
+  public static void main(String[] args) {
+    JobConf job = new JobConf(HadoopArchiveLogs.class);
+
+    HadoopArchiveLogs hal = new HadoopArchiveLogs(job);
+    int ret = 0;
+
+    try{
+      ret = ToolRunner.run(hal, args);
+    } catch(Exception e) {
+      LOG.debug("Exception", e);
+      System.err.println(e.getClass().getSimpleName());
+      final String s = e.getLocalizedMessage();
+      if (s != null) {
+        System.err.println(s);
+      } else {
+        e.printStackTrace(System.err);
+      }
+      System.exit(1);
+    }
+    System.exit(ret);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    handleOpts(args);
+
+    findAggregatedApps();
+
+    FileSystem fs = null;
+    Path remoteRootLogDir = new Path(conf.get(
+        YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
+        YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
+    String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
+    Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
+    try {
+      fs = FileSystem.get(conf);
+      checkFiles(fs, remoteRootLogDir, suffix);
+
+      // Prepare working directory
+      if (fs.exists(workingDir)) {
+        fs.delete(workingDir, true);
+      }
+      fs.mkdirs(workingDir);
+      fs.setPermission(workingDir,
+          new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+    }
+
+    checkMaxEligible();
+
+    if (eligibleApplications.isEmpty()) {
+      LOG.info("No eligible applications to process");
+      System.exit(0);
+    }
+
+    StringBuilder sb =
+        new StringBuilder("Will process the following applications:");
+    for (ApplicationReport report : eligibleApplications) {
+      sb.append("\n\t").append(report.getApplicationId());
+    }
+    LOG.info(sb.toString());
+
+    File localScript = File.createTempFile("hadoop-archive-logs-", ".sh");
+    generateScript(localScript, workingDir, remoteRootLogDir, suffix);
+
+    if (runDistributedShell(localScript)) {
+      return 0;
+    }
+    return -1;
+  }
+
+  private void handleOpts(String[] args) throws ParseException {
+    Options opts = new Options();
+    Option helpOpt = new Option(HELP_OPTION, false, "Prints this message");
+    Option maxEligibleOpt = new Option(MAX_ELIGIBLE_APPS_OPTION, true,
+        "The maximum number of eligible apps to process (default: "
+            + DEFAULT_MAX_ELIGIBLE + " (all))");
+    maxEligibleOpt.setArgName("n");
+    Option minNumLogFilesOpt = new Option(MIN_NUM_LOG_FILES_OPTION, true,
+        "The minimum number of log files required to be eligible (default: "
+            + DEFAULT_MIN_NUM_LOG_FILES + ")");
+    minNumLogFilesOpt.setArgName("n");
+    Option maxTotalLogsSizeOpt = new Option(MAX_TOTAL_LOGS_SIZE_OPTION, true,
+        "The maximum total logs size (in megabytes) required to be eligible" +
+            " (default: " + DEFAULT_MAX_TOTAL_LOGS_SIZE + ")");
+    maxTotalLogsSizeOpt.setArgName("megabytes");
+    Option memoryOpt = new Option(MEMORY_OPTION, true,
+        "The amount of memory (in megabytes) for each container (default: "
+            + DEFAULT_MEMORY + ")");
+    memoryOpt.setArgName("megabytes");
+    opts.addOption(helpOpt);
+    opts.addOption(maxEligibleOpt);
+    opts.addOption(minNumLogFilesOpt);
+    opts.addOption(maxTotalLogsSizeOpt);
+    opts.addOption(memoryOpt);
+
+    try {
+      CommandLineParser parser = new GnuParser();
+      CommandLine commandLine = parser.parse(opts, args);
+      if (commandLine.hasOption(HELP_OPTION)) {
+        HelpFormatter formatter = new HelpFormatter();
+        formatter.printHelp("yarn archive-logs", opts);
+        System.exit(0);
+      }
+      if (commandLine.hasOption(MAX_ELIGIBLE_APPS_OPTION)) {
+        maxEligible = Integer.parseInt(
+            commandLine.getOptionValue(MAX_ELIGIBLE_APPS_OPTION));
+        if (maxEligible == 0) {
+          LOG.info("Setting " + MAX_ELIGIBLE_APPS_OPTION + " to 0 accomplishes "
+              + "nothing. Please either set it to a negative value "
+              + "(default, all) or a more reasonable value.");
+          System.exit(0);
+        }
+      }
+      if (commandLine.hasOption(MIN_NUM_LOG_FILES_OPTION)) {
+        minNumLogFiles = Integer.parseInt(
+            commandLine.getOptionValue(MIN_NUM_LOG_FILES_OPTION));
+      }
+      if (commandLine.hasOption(MAX_TOTAL_LOGS_SIZE_OPTION)) {
+        maxTotalLogsSize = Long.parseLong(
+            commandLine.getOptionValue(MAX_TOTAL_LOGS_SIZE_OPTION));
+        maxTotalLogsSize *= 1024L * 1024L;
+      }
+      if (commandLine.hasOption(MEMORY_OPTION)) {
+        memory = Long.parseLong(commandLine.getOptionValue(MEMORY_OPTION));
+      }
+    } catch (ParseException pe) {
+      HelpFormatter formatter = new HelpFormatter();
+      formatter.printHelp("yarn archive-logs", opts);
+      throw pe;
+    }
+  }
+
+  @VisibleForTesting
+  void findAggregatedApps() throws IOException, YarnException {
+    YarnClient client = YarnClient.createYarnClient();
+    try {
+      client.init(getConf());
+      client.start();
+      List<ApplicationReport> reports = client.getApplications();
+      for (ApplicationReport report : reports) {
+        LogAggregationStatus aggStatus = report.getLogAggregationStatus();
+        if (aggStatus.equals(LogAggregationStatus.SUCCEEDED) ||
+            aggStatus.equals(LogAggregationStatus.FAILED)) {
+          eligibleApplications.add(report);
+        }
+      }
+    } finally {
+      if (client != null) {
+        client.stop();
+      }
+    }
+  }
+
+  @VisibleForTesting
+  void checkFiles(FileSystem fs, Path remoteRootLogDir, String suffix) {
+    for (Iterator<ApplicationReport> reportIt = eligibleApplications.iterator();
+         reportIt.hasNext(); ) {
+      ApplicationReport report = reportIt.next();
+      long totalFileSize = 0L;
+      try {
+        FileStatus[] files = fs.listStatus(
+            LogAggregationUtils.getRemoteAppLogDir(remoteRootLogDir,
+                report.getApplicationId(), report.getUser(), suffix));
+        if (files.length < minNumLogFiles) {
+          reportIt.remove();
+        } else {
+          for (FileStatus file : files) {
+            if (file.getPath().getName().equals(report.getApplicationId()
+                + ".har")) {
+              reportIt.remove();
+              break;
+            }
+            totalFileSize += file.getLen();
+          }
+          if (totalFileSize > maxTotalLogsSize) {
+            reportIt.remove();
+          }
+        }
+      } catch (IOException ioe) {
+        // If the user doesn't have permission or it doesn't exist, then skip it
+        reportIt.remove();
+      }
+    }
+  }
+
+  @VisibleForTesting
+  void checkMaxEligible() {
+    // If we have too many eligible apps, remove the newest ones first
+    if (maxEligible > 0 && eligibleApplications.size() > maxEligible) {
+      List<ApplicationReport> sortedApplications =
+          new ArrayList<ApplicationReport>(eligibleApplications);
+      Collections.sort(sortedApplications, new Comparator<ApplicationReport>() {
+        @Override
+        public int compare(ApplicationReport o1, ApplicationReport o2) {
+          return Long.compare(o1.getFinishTime(), o2.getFinishTime());
+        }
+      });
+      for (int i = maxEligible; i < sortedApplications.size(); i++) {
+        eligibleApplications.remove(sortedApplications.get(i));
+      }
+    }
+  }
+
+  /*
+  The generated script looks like this:
+  #!/bin/bash
+  set -e
+  set -x
+  if [ "$YARN_SHELL_ID" == "1" ]; then
+        appId="application_1440448768987_0001"
+        user="rkanter"
+  elif [ "$YARN_SHELL_ID" == "2" ]; then
+        appId="application_1440448768987_0002"
+        user="rkanter"
+  else
+        echo "Unknown Mapping!"
+        exit 1
+  fi
+  export HADOOP_CLIENT_OPTS="-Xmx1024m"
+  export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
+  "$HADOOP_HOME"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
+   */
+  @VisibleForTesting
+  void generateScript(File localScript, Path workingDir,
+        Path remoteRootLogDir, String suffix) throws IOException {
+    LOG.info("Generating script at: " + localScript.getAbsolutePath());
+    String halrJarPath = HadoopArchiveLogsRunner.class.getProtectionDomain()
+        .getCodeSource().getLocation().getPath();
+    String harJarPath = HadoopArchives.class.getProtectionDomain()
+        .getCodeSource().getLocation().getPath();
+    String classpath = halrJarPath + File.pathSeparator + harJarPath;
+    FileWriter fw = null;
+    try {
+      fw = new FileWriter(localScript);
+      fw.write("#!/bin/bash\nset -e\nset -x\n");
+      int containerCount = 1;
+      for (ApplicationReport report : eligibleApplications) {
+        fw.write("if [ \"$YARN_SHELL_ID\" == \"");
+        fw.write(Integer.toString(containerCount));
+        fw.write("\" ]; then\n\tappId=\"");
+        fw.write(report.getApplicationId().toString());
+        fw.write("\"\n\tuser=\"");
+        fw.write(report.getUser());
+        fw.write("\"\nel");
+        containerCount++;
+      }
+      fw.write("se\n\techo \"Unknown Mapping!\"\n\texit 1\nfi\n");
+      fw.write("export HADOOP_CLIENT_OPTS=\"-Xmx");
+      fw.write(Long.toString(memory));
+      fw.write("m\"\n");
+      fw.write("export HADOOP_CLASSPATH=");
+      fw.write(classpath);
+      fw.write("\n\"$HADOOP_HOME\"/bin/hadoop ");
+      fw.write(HadoopArchiveLogsRunner.class.getName());
+      fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
+      fw.write(workingDir.toString());
+      fw.write(" -remoteRootLogDir ");
+      fw.write(remoteRootLogDir.toString());
+      fw.write(" -suffix ");
+      fw.write(suffix);
+      fw.write("\n");
+    } finally {
+      if (fw != null) {
+        fw.close();
+      }
+    }
+  }
+
+  private boolean runDistributedShell(File localScript) throws Exception {
+    String[] dsArgs = {
+        "--appname",
+        "ArchiveLogs",
+        "--jar",
+        ApplicationMaster.class.getProtectionDomain().getCodeSource()
+            .getLocation().getPath(),
+        "--num_containers",
+        Integer.toString(eligibleApplications.size()),
+        "--container_memory",
+        Long.toString(memory),
+        "--shell_script",
+        localScript.getAbsolutePath()
+    };
+    final Client dsClient = new Client(new Configuration(conf));
+    dsClient.init(dsArgs);
+    return dsClient.run();
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    if (conf instanceof JobConf) {
+      this.conf = (JobConf) conf;
+    } else {
+      this.conf = new JobConf(conf, HadoopArchiveLogs.class);
+    }
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
new file mode 100644
index 0000000..347e5fb
--- /dev/null
+++ b/hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
@@ -0,0 +1,180 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.GnuParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.PathFilter;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.File;
+
+/**
+ * This is a child program designed to be used by the {@link HadoopArchiveLogs}
+ * tool via the Distributed Shell.  It's not meant to be run directly.
+ */
+public class HadoopArchiveLogsRunner implements Tool {
+  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogsRunner.class);
+
+  private static final String APP_ID_OPTION = "appId";
+  private static final String USER_OPTION = "user";
+  private static final String WORKING_DIR_OPTION = "workingDir";
+  private static final String REMOTE_ROOT_LOG_DIR = "remoteRootLogDir";
+  private static final String SUFFIX_OPTION = "suffix";
+
+  private String appId;
+  private String user;
+  private String workingDir;
+  private String remoteLogDir;
+  private String suffix;
+
+  private JobConf conf;
+
+  public HadoopArchiveLogsRunner(Configuration conf) {
+    setConf(conf);
+  }
+
+  public static void main(String[] args) {
+    JobConf job = new JobConf(HadoopArchiveLogsRunner.class);
+
+    HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(job);
+    int ret = 0;
+
+    try{
+      ret = ToolRunner.run(halr, args);
+    } catch(Exception e) {
+      LOG.debug("Exception", e);
+      System.err.println(e.getClass().getSimpleName());
+      final String s = e.getLocalizedMessage();
+      if (s != null) {
+        System.err.println(s);
+      } else {
+        e.printStackTrace(System.err);
+      }
+      System.exit(1);
+    }
+    System.exit(ret);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    handleOpts(args);
+    String remoteAppLogDir = remoteLogDir + File.separator + user
+        + File.separator + suffix + File.separator + appId;
+
+    // Run 'hadoop archives' command in local mode
+    Configuration haConf = new Configuration(getConf());
+    haConf.set("mapreduce.framework.name", "local");
+    HadoopArchives ha = new HadoopArchives(haConf);
+    String[] haArgs = {
+        "-archiveName",
+        appId + ".har",
+        "-p",
+        remoteAppLogDir,
+        "*",
+        workingDir
+    };
+    StringBuilder sb = new StringBuilder("Executing 'hadoop archives'");
+    for (String haArg : haArgs) {
+      sb.append("\n\t").append(haArg);
+    }
+    LOG.info(sb.toString());
+    ha.run(haArgs);
+
+    FileSystem fs = null;
+    // Move har file to correct location and delete original logs
+    try {
+      fs = FileSystem.get(conf);
+      LOG.info("Moving har to original location");
+      fs.rename(new Path(workingDir, appId + ".har"),
+          new Path(remoteAppLogDir, appId + ".har"));
+      LOG.info("Deleting original logs");
+      for (FileStatus original : fs.listStatus(new Path(remoteAppLogDir),
+          new PathFilter() {
+            @Override
+            public boolean accept(Path path) {
+              return !path.getName().endsWith(".har");
+            }
+          })) {
+        fs.delete(original.getPath(), false);
+      }
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+    }
+
+    return 0;
+  }
+
+  private void handleOpts(String[] args) throws ParseException {
+    Options opts = new Options();
+    Option appIdOpt = new Option(APP_ID_OPTION, true, "Application ID");
+    appIdOpt.setRequired(true);
+    Option userOpt = new Option(USER_OPTION, true, "User");
+    userOpt.setRequired(true);
+    Option workingDirOpt = new Option(WORKING_DIR_OPTION, true,
+        "Working Directory");
+    workingDirOpt.setRequired(true);
+    Option remoteLogDirOpt = new Option(REMOTE_ROOT_LOG_DIR, true,
+        "Remote Root Log Directory");
+    remoteLogDirOpt.setRequired(true);
+    Option suffixOpt = new Option(SUFFIX_OPTION, true, "Suffix");
+    suffixOpt.setRequired(true);
+    opts.addOption(appIdOpt);
+    opts.addOption(userOpt);
+    opts.addOption(workingDirOpt);
+    opts.addOption(remoteLogDirOpt);
+    opts.addOption(suffixOpt);
+
+    CommandLineParser parser = new GnuParser();
+    CommandLine commandLine = parser.parse(opts, args);
+    appId = commandLine.getOptionValue(APP_ID_OPTION);
+    user = commandLine.getOptionValue(USER_OPTION);
+    workingDir = commandLine.getOptionValue(WORKING_DIR_OPTION);
+    remoteLogDir = commandLine.getOptionValue(REMOTE_ROOT_LOG_DIR);
+    suffix = commandLine.getOptionValue(SUFFIX_OPTION);
+  }
+
+  @Override
+  public void setConf(Configuration conf) {
+    if (conf instanceof JobConf) {
+      this.conf = (JobConf) conf;
+    } else {
+      this.conf = new JobConf(conf, HadoopArchiveLogsRunner.class);
+    }
+  }
+
+  @Override
+  public Configuration getConf() {
+    return this.conf;
+  }
+}


[24/50] [abbrv] hadoop git commit: YARN-4115. Reduce loglevel of ContainerManagementProtocolProxy to Debug (adhoot via rkanter)

Posted by ec...@apache.org.
YARN-4115. Reduce loglevel of ContainerManagementProtocolProxy to Debug (adhoot via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b84fb41b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b84fb41b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b84fb41b

Branch: refs/heads/HADOOP-11890
Commit: b84fb41bb6ca2d69153cf5bd61f88492538ee713
Parents: 15a557f
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Sep 11 11:41:39 2015 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Sep 11 11:46:10 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                       |  3 +++
 .../api/impl/ContainerManagementProtocolProxy.java    | 14 ++++++++++----
 2 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84fb41b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cc833e2..bf753f1 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -818,6 +818,9 @@ Release 2.8.0 - UNRELEASED
     YARN-4106. NodeLabels for NM in distributed mode is not updated even after
     clusterNodelabel addition in RM. (Bibin A Chundatt via wangda)
 
+    YARN-4115. Reduce loglevel of ContainerManagementProtocolProxy to Debug
+    (adhoot via rkanter)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b84fb41b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index 94ebf0d..b2bce22 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -106,8 +106,10 @@ public class ContainerManagementProtocolProxy {
     while (proxy != null
         && !proxy.token.getIdentifier().equals(
             nmTokenCache.getToken(containerManagerBindAddr).getIdentifier())) {
-      LOG.info("Refreshing proxy as NMToken got updated for node : "
-          + containerManagerBindAddr);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Refreshing proxy as NMToken got updated for node : "
+            + containerManagerBindAddr);
+      }
       // Token is updated. check if anyone has already tried closing it.
       if (!proxy.scheduledForClose) {
         // try closing the proxy. Here if someone is already using it
@@ -187,7 +189,9 @@ public class ContainerManagementProtocolProxy {
       ContainerManagementProtocolProxyData proxy) {
     proxy.activeCallers--;
     if (proxy.scheduledForClose && proxy.activeCallers < 0) {
-      LOG.info("Closing proxy : " + proxy.containerManagerBindAddr);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Closing proxy : " + proxy.containerManagerBindAddr);
+      }
       cmProxy.remove(proxy.containerManagerBindAddr);
       try {
         rpc.stopProxy(proxy.getContainerManagementProtocol(), conf);
@@ -257,7 +261,9 @@ public class ContainerManagementProtocolProxy {
       
       final InetSocketAddress cmAddr =
           NetUtils.createSocketAddr(containerManagerBindAddr);
-      LOG.info("Opening proxy : " + containerManagerBindAddr);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Opening proxy : " + containerManagerBindAddr);
+      }
       // the user in createRemoteUser in this context has to be ContainerID
       UserGroupInformation user =
           UserGroupInformation.createRemoteUser(containerId


[29/50] [abbrv] hadoop git commit: fix trunk/hadoop-common CHANGES.TXT to be the reference across trunk & branch-2

Posted by ec...@apache.org.
fix trunk/hadoop-common CHANGES.TXT to be the reference across trunk & branch-2


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c054412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c054412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c054412

Branch: refs/heads/HADOOP-11890
Commit: 8c054412e1ed5fcdd614a6c7712afee1f940f727
Parents: c715650
Author: Steve Loughran <st...@apache.org>
Authored: Sat Sep 12 18:13:54 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sat Sep 12 18:13:54 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 23 +++++++++++++-------
 1 file changed, 15 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c054412/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 37c1cc8..d695c53 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -29,6 +29,9 @@ Trunk (Unreleased)
     HADOOP-11698. Remove DistCpV1 and Logalyzer.
     (Brahma Reddy Battula via aajisaka)
 
+    HADOOP-11356. Removed deprecated o.a.h.fs.permission.AccessControlException.
+    (Li Lu via wheat9)
+
   NEW FEATURES
 
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via
@@ -503,6 +506,9 @@ Trunk (Unreleased)
 
     HADOOP-12244. recover broken rebase during precommit (aw)
 
+    HADOOP-11942. Add links to SLGUserGuide to site index.
+    (Masatake Iwasaki via xyao)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -605,8 +611,7 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11917. test-patch.sh should work with ${BASEDIR}/patchprocess
     setups (aw)
 
-    HADOOP-11942. Add links to SLGUserGuide to site index.
-    (Masatake Iwasaki via xyao)
+    HADOOP-11925. backport trunk's smart-apply-patch.sh to branch-2 (aw)
 
     HADOOP-11906. test-patch.sh should use 'file' command for patch
     determinism (Sean Busbey via aw)
@@ -742,6 +747,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
     Yoder via atm)
 
+    HADOOP-9891. CLIMiniCluster instructions fail with MiniYarnCluster
+    ClassNotFoundException (Darrell Taylor via aw)
+
     HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
 
     HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
@@ -808,6 +816,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
     before deleting (Casey Brotherton via harsh)
 
+    HADOOP-11568. Description on usage of classpath in hadoop command is
+    incomplete. ( Archana T via vinayakumarb )
+
     HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
     jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
 
@@ -832,8 +843,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11859. PseudoAuthenticationHandler fails with httpcomponents v4.4.
     (Eugene Koifman via jitendra)
 
-    HADOOP-11848. Incorrect arguments to sizeof in DomainSocket.c (Malcolm
-    Kavalsky via Colin P. McCabe)
+    HADOOP-11848. Incorrect arguments to sizeof in DomainSocket.c
+    (Malcolm Kavalsky via Colin P. McCabe)
 
     HADOOP-11861. test-patch.sh rewrite addendum patch.
     (Allen Wittenauer via cnauroth)
@@ -1240,9 +1251,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11301. [optionally] update jmx cache to drop old metrics
     (Maysam Yabandeh via stack)
 
-    HADOOP-11356. Removed deprecated o.a.h.fs.permission.AccessControlException.
-    (Li Lu via wheat9)
-
     HADOOP-11313. Adding a document about NativeLibraryChecker.
     (Tsuyoshi OZAWA via cnauroth)
 
@@ -5993,7 +6001,6 @@ Release 0.23.1 - 2012-02-17
     HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
     (jitendra)
 
-
   OPTIMIZATIONS
 
   BUG FIXES


[49/50] [abbrv] hadoop git commit: HDFS-8953. DataNode Metrics logging (Contributed by Kanaka Kumar Avvaru)

Posted by ec...@apache.org.
HDFS-8953. DataNode Metrics logging (Contributed by Kanaka Kumar Avvaru)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce69c9b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce69c9b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce69c9b5

Branch: refs/heads/HADOOP-11890
Commit: ce69c9b54c642cfbe789fc661cfc7dcbb07b4ac5
Parents: ae5308f
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Sep 16 00:18:29 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Sep 16 00:18:29 2015 +0530

----------------------------------------------------------------------
 .../src/main/conf/log4j.properties              |  14 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   4 +
 .../hdfs/server/common/MetricsLoggerTask.java   | 174 ++++++++++++++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  50 ++++-
 .../hadoop/hdfs/server/namenode/NameNode.java   | 134 +----------
 .../src/main/resources/hdfs-default.xml         |  12 +
 .../hdfs/server/datanode/DataNodeTestUtils.java |  81 ++++++-
 .../datanode/TestDataNodeMetricsLogger.java     | 224 +++++++++++++++++++
 .../src/test/resources/log4j.properties         |  13 ++
 10 files changed, 575 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index c26fed4..299caa8 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -163,6 +163,20 @@ log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
 log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
 
 #
+# DataNode metrics logging.
+# The default is to retain two datanode-metrics.log files up to 64MB each.
+#
+datanode.metrics.logger=INFO,NullAppender
+log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
+log4j.additivity.DataNodeMetricsLog=false
+log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
+log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
+
+#
 # mapred audit logging
 #
 mapred.audit.logger=INFO,NullAppender

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fef8ee5..6da3cff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -918,6 +918,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9008. Balancer#Parameters class could use a builder pattern.
     (Chris Trezzo via mingma)
 
+    HDFS-8953. DataNode Metrics logging (Kanaka Kumar Avvaru via vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0498450..28ea866 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -364,6 +364,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.metrics.logger.period.seconds";
   public static final int     DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
       600;
+  public static final String DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY =
+      "dfs.datanode.metrics.logger.period.seconds";
+  public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
+      600;
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
   public static final long    DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
new file mode 100644
index 0000000..40c048c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java
@@ -0,0 +1,174 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.common;
+
+import java.lang.management.ManagementFactory;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import javax.management.Attribute;
+import javax.management.AttributeList;
+import javax.management.MBeanAttributeInfo;
+import javax.management.MBeanInfo;
+import javax.management.MBeanServer;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AsyncAppender;
+
+/**
+ * MetricsLoggerTask can be used as utility to dump metrics to log.
+ */
+public class MetricsLoggerTask implements Runnable {
+
+  public static final Log LOG = LogFactory.getLog(MetricsLoggerTask.class);
+
+  private static ObjectName objectName = null;
+
+  static {
+    try {
+      objectName = new ObjectName("Hadoop:*");
+    } catch (MalformedObjectNameException m) {
+      // This should not occur in practice since we pass
+      // a valid pattern to the constructor above.
+    }
+  }
+
+  private Log metricsLog;
+  private String nodeName;
+  private short maxLogLineLength;
+
+  public MetricsLoggerTask(Log metricsLog, String nodeName,
+      short maxLogLineLength) {
+    this.metricsLog = metricsLog;
+    this.nodeName = nodeName;
+    this.maxLogLineLength = maxLogLineLength;
+  }
+
+  /**
+   * Write metrics to the metrics appender when invoked.
+   */
+  @Override
+  public void run() {
+    // Skip querying metrics if there are no known appenders.
+    if (!metricsLog.isInfoEnabled() || !hasAppenders(metricsLog)
+        || objectName == null) {
+      return;
+    }
+
+    metricsLog.info(" >> Begin " + nodeName + " metrics dump");
+    final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
+
+    // Iterate over each MBean.
+    for (final ObjectName mbeanName : server.queryNames(objectName, null)) {
+      try {
+        MBeanInfo mBeanInfo = server.getMBeanInfo(mbeanName);
+        final String mBeanNameName = MBeans.getMbeanNameName(mbeanName);
+        final Set<String> attributeNames = getFilteredAttributes(mBeanInfo);
+
+        final AttributeList attributes = server.getAttributes(mbeanName,
+            attributeNames.toArray(new String[attributeNames.size()]));
+
+        for (Object o : attributes) {
+          final Attribute attribute = (Attribute) o;
+          final Object value = attribute.getValue();
+          final String valueStr = (value != null) ? value.toString() : "null";
+          // Truncate the value if it is too long
+          metricsLog.info(mBeanNameName + ":" + attribute.getName() + "="
+              + trimLine(valueStr));
+        }
+      } catch (Exception e) {
+        metricsLog.error("Failed to get " + nodeName + " metrics for mbean "
+            + mbeanName.toString(), e);
+      }
+    }
+    metricsLog.info(" << End " + nodeName + " metrics dump");
+  }
+
+  private String trimLine(String valueStr) {
+    if (maxLogLineLength <= 0) {
+      return valueStr;
+    }
+
+    return (valueStr.length() < maxLogLineLength ? valueStr : valueStr
+        .substring(0, maxLogLineLength) + "...");
+  }
+
+  private static boolean hasAppenders(Log logger) {
+    if (!(logger instanceof Log4JLogger)) {
+      // Don't bother trying to determine the presence of appenders.
+      return true;
+    }
+    Log4JLogger log4JLogger = ((Log4JLogger) logger);
+    return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
+  }
+
+  /**
+   * Get the list of attributes for the MBean, filtering out a few attribute
+   * types.
+   */
+  private static Set<String> getFilteredAttributes(MBeanInfo mBeanInfo) {
+    Set<String> attributeNames = new HashSet<>();
+    for (MBeanAttributeInfo attributeInfo : mBeanInfo.getAttributes()) {
+      if (!attributeInfo.getType().equals(
+          "javax.management.openmbean.TabularData")
+          && !attributeInfo.getType().equals(
+              "javax.management.openmbean.CompositeData")
+          && !attributeInfo.getType().equals(
+              "[Ljavax.management.openmbean.CompositeData;")) {
+        attributeNames.add(attributeInfo.getName());
+      }
+    }
+    return attributeNames;
+  }
+
+  /**
+   * Make the metrics logger async and add all pre-existing appenders to the
+   * async appender.
+   */
+  public static void makeMetricsLoggerAsync(Log metricsLog) {
+    if (!(metricsLog instanceof Log4JLogger)) {
+      LOG.warn("Metrics logging will not be async since "
+          + "the logger is not log4j");
+      return;
+    }
+    org.apache.log4j.Logger logger = ((Log4JLogger) metricsLog).getLogger();
+    logger.setAdditivity(false); // Don't pollute actual logs with metrics dump
+
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    // failsafe against trying to async it more than once
+    if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
+      AsyncAppender asyncAppender = new AsyncAppender();
+      // change logger to have an async appender containing all the
+      // previously configured appenders
+      for (Appender appender : appenders) {
+        logger.removeAppender(appender);
+        asyncAppender.addAppender(appender);
+      }
+      logger.addAppender(asyncAppender);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d51d0a5..2aad83d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -46,6 +46,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.BufferedOutputStream;
@@ -85,6 +87,8 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
+import java.util.concurrent.ScheduledThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.management.ObjectName;
@@ -148,6 +152,7 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.MetricsLoggerTask;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
@@ -277,6 +282,8 @@ public class DataNode extends ReconfigurableBase
       Collections.unmodifiableList(
           Arrays.asList(DFS_DATANODE_DATA_DIR_KEY));
 
+  public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
+
   /**
    * Use {@link NetUtils#createSocketAddr(String)} instead.
    */
@@ -363,6 +370,8 @@ public class DataNode extends ReconfigurableBase
 
   private long[] oobTimeouts; /** timeout value of each OOB type */
 
+  private ScheduledThreadPoolExecutor metricsLoggerTimer;
+
   /**
    * Creates a dummy DataNode for testing purpose.
    */
@@ -382,7 +391,7 @@ public class DataNode extends ReconfigurableBase
 
   /**
    * Create the DataNode given a configuration, an array of dataDirs,
-   * and a namenode proxy
+   * and a namenode proxy.
    */
   DataNode(final Configuration conf,
            final List<StorageLocation> dataDirs,
@@ -1166,6 +1175,7 @@ public class DataNode extends ReconfigurableBase
     saslClient = new SaslDataTransferClient(dnConf.conf, 
         dnConf.saslPropsResolver, dnConf.trustedChannelResolver);
     saslServer = new SaslDataTransferServer(dnConf, blockPoolTokenSecretManager);
+    startMetricsLogger(conf);
   }
 
   /**
@@ -1649,6 +1659,7 @@ public class DataNode extends ReconfigurableBase
    * Otherwise, deadlock might occur.
    */
   public void shutdown() {
+    stopMetricsLogger();
     if (plugins != null) {
       for (ServicePlugin p : plugins) {
         try {
@@ -3276,4 +3287,41 @@ public class DataNode extends ReconfigurableBase
 
     return oobTimeouts[status.getNumber() - Status.OOB_RESTART_VALUE];
   }
+
+  /**
+   * Start a timer to periodically write DataNode metrics to the log file. This
+   * behavior can be disabled by configuration.
+   *
+   * @param metricConf
+   */
+  protected void startMetricsLogger(Configuration metricConf) {
+    long metricsLoggerPeriodSec = metricConf.getInt(
+        DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
+        DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT);
+
+    if (metricsLoggerPeriodSec <= 0) {
+      return;
+    }
+
+    MetricsLoggerTask.makeMetricsLoggerAsync(METRICS_LOG);
+
+    // Schedule the periodic logging.
+    metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
+    metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(METRICS_LOG,
+        "DataNode", (short) 0), metricsLoggerPeriodSec, metricsLoggerPeriodSec,
+        TimeUnit.SECONDS);
+  }
+
+  protected void stopMetricsLogger() {
+    if (metricsLoggerTimer != null) {
+      metricsLoggerTimer.shutdown();
+      metricsLoggerTimer = null;
+    }
+  }
+
+  @VisibleForTesting
+  ScheduledThreadPoolExecutor getMetricsLoggerTimer() {
+    return metricsLoggerTimer;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 683112b..df25d59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -23,7 +23,6 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -45,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.MetricsLoggerTask;
 import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
@@ -78,32 +78,20 @@ import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.JvmPauseMonitor;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.log4j.Appender;
-import org.apache.log4j.AsyncAppender;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.management.Attribute;
-import javax.management.AttributeList;
-import javax.management.MBeanAttributeInfo;
-import javax.management.MBeanInfo;
-import javax.management.MBeanServer;
-import javax.management.MalformedObjectNameException;
 import javax.management.ObjectName;
 
 import java.io.IOException;
 import java.io.PrintStream;
-import java.lang.management.ManagementFactory;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Collections;
-import java.util.HashSet;
 import java.util.List;
-import java.util.Set;
 import java.util.concurrent.ScheduledThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -711,46 +699,19 @@ public class NameNode implements NameNodeStatusMXBean {
       return;
     }
 
-    makeMetricsLoggerAsync();
+    MetricsLoggerTask.makeMetricsLoggerAsync(MetricsLog);
 
     // Schedule the periodic logging.
     metricsLoggerTimer = new ScheduledThreadPoolExecutor(1);
     metricsLoggerTimer.setExecuteExistingDelayedTasksAfterShutdownPolicy(
         false);
-    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(),
+    metricsLoggerTimer.scheduleWithFixedDelay(new MetricsLoggerTask(MetricsLog,
+        "NameNode", (short) 128),
         metricsLoggerPeriodSec,
         metricsLoggerPeriodSec,
         TimeUnit.SECONDS);
   }
 
-  /**
-   * Make the metrics logger async and add all pre-existing appenders
-   * to the async appender.
-   */
-  private static void makeMetricsLoggerAsync() {
-    if (!(MetricsLog instanceof Log4JLogger)) {
-      LOG.warn(
-          "Metrics logging will not be async since the logger is not log4j");
-      return;
-    }
-    org.apache.log4j.Logger logger = ((Log4JLogger) MetricsLog).getLogger();
-    logger.setAdditivity(false);  // Don't pollute NN logs with metrics dump
-
-    @SuppressWarnings("unchecked")
-    List<Appender> appenders = Collections.list(logger.getAllAppenders());
-    // failsafe against trying to async it more than once
-    if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) {
-      AsyncAppender asyncAppender = new AsyncAppender();
-      // change logger to have an async appender containing all the
-      // previously configured appenders
-      for (Appender appender : appenders) {
-        logger.removeAppender(appender);
-        asyncAppender.addAppender(appender);
-      }
-      logger.addAppender(asyncAppender);
-    }
-  }
-
   protected void stopMetricsLogger() {
     if (metricsLoggerTimer != null) {
       metricsLoggerTimer.shutdown();
@@ -1925,91 +1886,4 @@ public class NameNode implements NameNodeStatusMXBean {
       break;
     }
   }
-
-  private static class MetricsLoggerTask implements Runnable {
-    private static final int MAX_LOGGED_VALUE_LEN = 128;
-    private static ObjectName OBJECT_NAME = null;
-
-    static {
-      try {
-        OBJECT_NAME = new ObjectName("Hadoop:*");
-      } catch (MalformedObjectNameException m) {
-        // This should not occur in practice since we pass
-        // a valid pattern to the constructor above.
-      }
-    }
-
-    /**
-     * Write NameNode metrics to the metrics appender when invoked.
-     */
-    @Override
-    public void run() {
-      // Skip querying metrics if there are no known appenders.
-      if (!MetricsLog.isInfoEnabled() ||
-          !hasAppenders(MetricsLog) ||
-          OBJECT_NAME == null) {
-        return;
-      }
-
-      MetricsLog.info(" >> Begin NameNode metrics dump");
-      final MBeanServer server = ManagementFactory.getPlatformMBeanServer();
-
-      // Iterate over each MBean.
-      for (final ObjectName mbeanName : server.queryNames(OBJECT_NAME, null)) {
-        try {
-          MBeanInfo mBeanInfo = server.getMBeanInfo(mbeanName);
-          final String mBeanNameName = MBeans.getMbeanNameName(mbeanName);
-          final Set<String> attributeNames = getFilteredAttributes(mBeanInfo);
-
-          final AttributeList attributes =
-              server.getAttributes(mbeanName,
-                  attributeNames.toArray(new String[attributeNames.size()]));
-
-          for (Object o : attributes) {
-            final Attribute attribute = (Attribute) o;
-            final Object value = attribute.getValue();
-            final String valueStr =
-                (value != null) ? value.toString() : "null";
-            // Truncate the value if it is too long
-            MetricsLog.info(mBeanNameName + ":" + attribute.getName() + "=" +
-                (valueStr.length() < MAX_LOGGED_VALUE_LEN ? valueStr :
-                    valueStr.substring(0, MAX_LOGGED_VALUE_LEN) + "..."));
-          }
-        } catch (Exception e) {
-          MetricsLog.error("Failed to get NameNode metrics for mbean " +
-              mbeanName.toString(), e);
-        }
-      }
-      MetricsLog.info(" << End NameNode metrics dump");
-    }
-
-    private static boolean hasAppenders(Log logger) {
-      if (!(logger instanceof Log4JLogger)) {
-        // Don't bother trying to determine the presence of appenders.
-        return true;
-      }
-      Log4JLogger log4JLogger = ((Log4JLogger) MetricsLog);
-      return log4JLogger.getLogger().getAllAppenders().hasMoreElements();
-    }
-
-    /**
-     * Get the list of attributes for the MBean, filtering out a few
-     * attribute types.
-     */
-    private static Set<String> getFilteredAttributes(
-        MBeanInfo mBeanInfo) {
-      Set<String> attributeNames = new HashSet<>();
-      for (MBeanAttributeInfo attributeInfo : mBeanInfo.getAttributes()) {
-        if (!attributeInfo.getType().equals(
-                "javax.management.openmbean.TabularData") &&
-            !attributeInfo.getType().equals(
-                "javax.management.openmbean.CompositeData") &&
-            !attributeInfo.getType().equals(
-                "[Ljavax.management.openmbean.CompositeData;")) {
-          attributeNames.add(attributeInfo.getName());
-        }
-      }
-      return attributeNames;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index e9b62c7..072b7a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1600,6 +1600,18 @@
 </property>
 
 <property>
+  <name>dfs.datanode.metrics.logger.period.seconds</name>
+  <value>600</value>
+  <description>
+    This setting controls how frequently the DataNode logs its metrics. The
+    logging configuration must also define one or more appenders for
+    DataNodeMetricsLog for the metrics to be logged.
+    DataNode metrics logging is disabled if this value is set to zero or
+    less than zero.
+  </description>
+</property>
+
+<property>
   <name>dfs.metrics.percentiles.intervals</name>
   <value></value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index b4071de..8b43787 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -19,21 +19,38 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.io.File;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
+import org.junit.Assert;
 import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import com.google.common.base.Preconditions;
 
@@ -44,7 +61,10 @@ import com.google.common.base.Preconditions;
 public class DataNodeTestUtils {
   private static final String DIR_FAILURE_SUFFIX = ".origin";
 
-  public static DatanodeRegistration 
+  public final static String TEST_CLUSTER_ID = "testClusterID";
+  public final static String TEST_POOL_ID = "BP-TEST";
+
+  public static DatanodeRegistration
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
     return dn.getDNRegistrationForBP(bpid);
   }
@@ -231,4 +251,61 @@ public class DataNodeTestUtils {
       dn.getDirectoryScanner().reconcile();
     }
   }
+
+  /**
+   * Starts an instance of DataNode with NN mocked. Called should ensure to
+   * shutdown the DN
+   *
+   * @throws IOException
+   */
+  public static DataNode startDNWithMockNN(Configuration conf,
+      final InetSocketAddress nnSocketAddr, final String dnDataDir)
+      throws IOException {
+
+    FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":"
+        + nnSocketAddr.getPort());
+    ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
+    File dataDir = new File(dnDataDir);
+    FileUtil.fullyDelete(dataDir);
+    dataDir.mkdirs();
+    StorageLocation location = StorageLocation.parse(dataDir.getPath());
+    locations.add(location);
+
+    final DatanodeProtocolClientSideTranslatorPB namenode =
+        mock(DatanodeProtocolClientSideTranslatorPB.class);
+
+    Mockito.doAnswer(new Answer<DatanodeRegistration>() {
+      @Override
+      public DatanodeRegistration answer(InvocationOnMock invocation)
+          throws Throwable {
+        return (DatanodeRegistration) invocation.getArguments()[0];
+      }
+    }).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
+
+    when(namenode.versionRequest()).thenReturn(
+        new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, 1L));
+
+    when(
+        namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class),
+            Mockito.any(StorageReport[].class), Mockito.anyLong(),
+            Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(),
+            Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class),
+            Mockito.anyBoolean())).thenReturn(
+        new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(
+            HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current()
+            .nextLong() | 1L));
+
+    DataNode dn = new DataNode(conf, locations, null) {
+      @Override
+      DatanodeProtocolClientSideTranslatorPB connectToNN(
+          InetSocketAddress nnAddr) throws IOException {
+        Assert.assertEquals(nnSocketAddr, nnAddr);
+        return namenode;
+      }
+    };
+    // Trigger a heartbeat so that it acknowledges the NN as active.
+    dn.getAllBpOs().get(0).triggerHeartbeatForTests();
+
+    return dn;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
new file mode 100644
index 0000000..1177a45
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java
@@ -0,0 +1,224 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Collections;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.TimeoutException;
+import java.util.regex.Pattern;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.AsyncAppender;
+import org.apache.log4j.spi.LoggingEvent;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Test periodic logging of DataNode metrics.
+ */
+public class TestDataNodeMetricsLogger {
+  static final Log LOG = LogFactory.getLog(TestDataNodeMetricsLogger.class);
+
+  private static final String DATA_DIR = MiniDFSCluster.getBaseDirectory()
+      + "data";
+
+  private final static InetSocketAddress NN_ADDR = new InetSocketAddress(
+      "localhost", 5020);
+
+  private DataNode dn;
+
+  static final Random random = new Random(System.currentTimeMillis());
+
+  @Rule
+  public Timeout timeout = new Timeout(300000);
+
+  /**
+   * Starts an instance of DataNode
+   *
+   * @throws IOException
+   */
+  public void startDNForTest(boolean enableMetricsLogging) throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, DATA_DIR);
+    conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
+    conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
+    conf.setInt(DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
+        enableMetricsLogging ? 1 : 0); // If enabled, log early and log often
+
+    dn = DataNodeTestUtils.startDNWithMockNN(conf, NN_ADDR, DATA_DIR);
+  }
+
+  /**
+   * Cleans the resources and closes the instance of datanode
+   *
+   * @throws IOException
+   *           if an error occurred
+   */
+  @After
+  public void tearDown() throws IOException {
+    if (dn != null) {
+      try {
+        dn.shutdown();
+      } catch (Exception e) {
+        LOG.error("Cannot close: ", e);
+      } finally {
+        File dir = new File(DATA_DIR);
+        if (dir.exists())
+          Assert.assertTrue("Cannot delete data-node dirs",
+              FileUtil.fullyDelete(dir));
+      }
+    }
+    dn = null;
+  }
+
+  @Test
+  public void testMetricsLoggerOnByDefault() throws IOException {
+    startDNForTest(true);
+    assertNotNull(dn);
+    assertNotNull(dn.getMetricsLoggerTimer());
+  }
+
+  @Test
+  public void testDisableMetricsLogger() throws IOException {
+    startDNForTest(false);
+    assertNotNull(dn);
+    assertNull(dn.getMetricsLoggerTimer());
+  }
+
+  @Test
+  public void testMetricsLoggerIsAsync() throws IOException {
+    startDNForTest(true);
+    assertNotNull(dn);
+    org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG)
+        .getLogger();
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    assertTrue(appenders.get(0) instanceof AsyncAppender);
+  }
+
+  /**
+   * Publish a fake metric under the "Hadoop:" domain and ensure it is logged by
+   * the metrics logger.
+   */
+  @Test
+  public void testMetricsLogOutput() throws IOException, InterruptedException,
+      TimeoutException {
+    TestFakeMetric metricsProvider = new TestFakeMetric();
+    MBeans.register(this.getClass().getSimpleName(), "DummyMetrics",
+        metricsProvider);
+    startDNForTest(true);
+    assertNotNull(dn);
+    final PatternMatchingAppender appender = new PatternMatchingAppender(
+        "^.*FakeMetric.*$");
+    addAppender(DataNode.METRICS_LOG, appender);
+
+    // Ensure that the supplied pattern was matched.
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return appender.isMatched();
+      }
+    }, 1000, 60000);
+
+    dn.shutdown();
+  }
+
+  private void addAppender(Log log, Appender appender) {
+    org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
+    @SuppressWarnings("unchecked")
+    List<Appender> appenders = Collections.list(logger.getAllAppenders());
+    ((AsyncAppender) appenders.get(0)).addAppender(appender);
+  }
+
+  public interface TestFakeMetricMXBean {
+    int getFakeMetric();
+  }
+
+  /**
+   * MBean for testing
+   */
+  public static class TestFakeMetric implements TestFakeMetricMXBean {
+    @Override
+    public int getFakeMetric() {
+      return 0;
+    }
+  }
+
+  /**
+   * An appender that matches logged messages against the given regular
+   * expression.
+   */
+  public static class PatternMatchingAppender extends AppenderSkeleton {
+    private final Pattern pattern;
+    private volatile boolean matched;
+
+    public PatternMatchingAppender(String pattern) {
+      this.pattern = Pattern.compile(pattern);
+      this.matched = false;
+    }
+
+    public boolean isMatched() {
+      return matched;
+    }
+
+    @Override
+    protected void append(LoggingEvent event) {
+      if (pattern.matcher(event.getMessage().toString()).matches()) {
+        matched = true;
+      }
+    }
+
+    @Override
+    public void close() {
+    }
+
+    @Override
+    public boolean requiresLayout() {
+      return false;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce69c9b5/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
index 1dd459f..7378846 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/log4j.properties
@@ -34,3 +34,16 @@ log4j.appender.NNMETRICSRFA.layout=org.apache.log4j.PatternLayout
 log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
 log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
 log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
+
+#
+# DataNode metrics logging.
+# The default is to retain two datanode-metrics.log files up to 64MB each.
+#
+log4j.logger.DataNodeMetricsLog=INFO,DNMETRICSRFA
+log4j.additivity.DataNodeMetricsLog=false
+log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
+log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.DNMETRICSRFA.MaxFileSize=64MB


[35/50] [abbrv] hadoop git commit: YARN-3697. FairScheduler: ContinuousSchedulingThread can fail to shutdown. (Zhihai Xu via kasha)

Posted by ec...@apache.org.
YARN-3697. FairScheduler: ContinuousSchedulingThread can fail to shutdown. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/332b520a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/332b520a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/332b520a

Branch: refs/heads/HADOOP-11890
Commit: 332b520a480994b7bd56c135f7941aad30b05e9c
Parents: 81df7b5
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Sep 13 18:07:43 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Sun Sep 13 18:07:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../hadoop/yarn/event/TestAsyncDispatcher.java  |  2 ++
 .../scheduler/fair/FairScheduler.java           |  7 +++++
 .../scheduler/fair/TestFairScheduler.java       | 31 ++++++++++++++++++++
 4 files changed, 43 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/332b520a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4a3a666..e4255c0 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -900,6 +900,9 @@ Release 2.7.2 - UNRELEASED
     YARN-4136. LinuxContainerExecutor loses info when forwarding
     ResourceHandlerException. (Bibin A Chundatt via vvasudev)
 
+    YARN-3697. FairScheduler: ContinuousSchedulingThread can fail to shutdown.
+    (Zhihai Xu via kasha)
+
 
 Release 2.7.1 - 2015-07-06
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/332b520a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
index ba0deff..018096b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/event/TestAsyncDispatcher.java
@@ -50,7 +50,9 @@ public class TestAsyncDispatcher {
     disp.waitForEventThreadToWait();
     try {
       disp.getEventHandler().handle(event);
+      Assert.fail("Expected YarnRuntimeException");
     } catch (YarnRuntimeException e) {
+      Assert.assertTrue(e.getCause() instanceof InterruptedException);
     }
     // Queue should be empty and dispatcher should not hang on close
     Assert.assertTrue("Event Queue should have been empty",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/332b520a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 5243fb3..3a39799 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1043,6 +1043,13 @@ public class FairScheduler extends
       } catch (Throwable ex) {
         LOG.error("Error while attempting scheduling for node " + node +
             ": " + ex.toString(), ex);
+        if ((ex instanceof YarnRuntimeException) &&
+            (ex.getCause() instanceof InterruptedException)) {
+          // AsyncDispatcher translates InterruptedException to
+          // YarnRuntimeException with cause InterruptedException.
+          // Need to throw InterruptedException to stop schedulingThread.
+          throw (InterruptedException)ex.getCause();
+        }
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/332b520a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index c352cc9..a02cf18 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -27,7 +27,10 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 
 import java.io.File;
@@ -4320,6 +4323,34 @@ public class TestFairScheduler extends FairSchedulerTestBase {
   }
 
   @Test
+  public void testContinuousSchedulingInterruptedException()
+      throws Exception {
+    scheduler.init(conf);
+    scheduler.start();
+    FairScheduler spyScheduler = spy(scheduler);
+    Assert.assertTrue("Continuous scheduling should be disabled.",
+        !spyScheduler.isContinuousSchedulingEnabled());
+    // Add one nodes
+    RMNode node1 =
+        MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1,
+            "127.0.0.1");
+    NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
+    spyScheduler.handle(nodeEvent1);
+    Assert.assertEquals("We should have one alive node.",
+        1, spyScheduler.getNumClusterNodes());
+    InterruptedException ie = new InterruptedException();
+    doThrow(new YarnRuntimeException(ie)).when(spyScheduler).
+        attemptScheduling(isA(FSSchedulerNode.class));
+    // Invoke the continuous scheduling once
+    try {
+      spyScheduler.continuousSchedulingAttempt();
+      fail("Expected InterruptedException to stop schedulingThread");
+    } catch (InterruptedException e) {
+      Assert.assertEquals(ie, e);
+    }
+  }
+
+  @Test
   public void testSchedulingOnRemovedNode() throws Exception {
     // Disable continuous scheduling, will invoke continuous scheduling manually
     scheduler.init(conf);


[42/50] [abbrv] hadoop git commit: HDFS-9065. Include commas on # of files, blocks, total filesystem objects in NN Web UI. Contributed by Daniel Templeton.

Posted by ec...@apache.org.
HDFS-9065. Include commas on # of files, blocks, total filesystem objects in NN Web UI. Contributed by Daniel Templeton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d777757d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d777757d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d777757d

Branch: refs/heads/HADOOP-11890
Commit: d777757d21c15942275bff6bb98876637950d73f
Parents: 76957a4
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Sep 14 20:26:56 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Sep 14 20:26:56 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html             | 2 +-
 .../hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js               | 3 +++
 .../hadoop-hdfs/src/main/webapps/static/dfs-dust.js              | 4 ++++
 4 files changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777757d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35dcd80..f0bc026 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -912,6 +912,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.
     DFS_NAMENODE_RPC_PORT_DEFAULT config key. (Mingliang Liu via wheat9)
 
+    HDFS-9065. Include commas on # of files, blocks, total filesystem objects
+    in NN Web UI. (Daniel Templeton via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777757d/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
index 36f8bfe..ad3ac0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
@@ -145,7 +145,7 @@
 
 <p>
   {#fs}
-  {FilesTotal} files and directories, {BlocksTotal} blocks = {@math key="{FilesTotal}" method="add" operand="{BlocksTotal}"/} total filesystem object(s).
+  {FilesTotal|fmt_number} files and directories, {BlocksTotal|fmt_number} blocks = {ObjectsTotal|fmt_number} total filesystem object(s).
   {#helper_fs_max_objects/}
   {/fs}
 </p>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777757d/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
index 9bc1b5d..cc3afcd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
@@ -103,6 +103,9 @@
           b.capacityUsedPercentage = b.capacityUsed * 100.0 / b.capacityTotal;
           b.capacityRemainingPercentage = b.capacityRemaining * 100.0 / b.capacityTotal;
         }
+
+        data.fs.ObjectsTotal = data.fs.FilesTotal + data.fs.BlocksTotal;
+
         render();
       }),
       function (url, jqxhr, text, err) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d777757d/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
index 466e058..1f37d21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dfs-dust.js
@@ -92,6 +92,10 @@
 
     'helper_to_acl_bit': function (v) {
       return v ? '+' : "";
+    },
+
+    'fmt_number': function (v) {
+      return v.toLocaleString();
     }
   };
   $.extend(dust.filters, filters);


[40/50] [abbrv] hadoop git commit: HDFS-8829. Make SO_RCVBUF and SO_SNDBUF size configurable for DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi via Colin P. McCabe)

Posted by ec...@apache.org.
HDFS-8829. Make SO_RCVBUF and SO_SNDBUF size configurable for DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi via Colin P. McCabe)

Change-Id: I77dc71aaf9e14ef743f2a2cbebeec04a4f628c78


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b5cf535
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b5cf535
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b5cf535

Branch: refs/heads/HADOOP-11890
Commit: 7b5cf5352efedc7d7ebdbb6b58f1b9a688812e75
Parents: e2a0270
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Sep 14 15:56:04 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Sep 14 16:02:10 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  4 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 16 ++++-
 .../hadoop/hdfs/net/DomainPeerServer.java       |  5 ++
 .../org/apache/hadoop/hdfs/net/PeerServer.java  |  9 ++-
 .../apache/hadoop/hdfs/net/TcpPeerServer.java   |  5 ++
 .../hadoop/hdfs/server/datanode/DNConf.java     | 22 +++++-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 13 +++-
 .../hdfs/server/datanode/DataXceiver.java       |  7 +-
 .../hdfs/server/datanode/DataXceiverServer.java |  7 +-
 .../src/main/resources/hdfs-default.xml         | 22 ++++++
 .../TestDataNodeTransferSocketSize.java         | 71 ++++++++++++++++++++
 11 files changed, 169 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1b21c4d..270f30b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -928,6 +928,10 @@ Release 2.8.0 - UNRELEASED
     HDFS-8929. Add a metric to expose the timestamp of the last journal
     (surendra singh lilhore via vinayakumarb)
 
+    HDFS-8829. Make SO_RCVBUF and SO_SNDBUF size configurable for
+    DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi
+    via Colin P. McCabe)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 62abc35..0498450 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import org.apache.hadoop.http.HttpConfig;
@@ -769,9 +770,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
     false;
 
+  public static final String
+      DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY =
+      "dfs.datanode.transfer.socket.send.buffer.size";
+  public static final int
+      DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT =
+      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE;
+
+  public static final String
+      DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY =
+      "dfs.datanode.transfer.socket.recv.buffer.size";
+  public static final int
+      DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT =
+      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE;
 
-  
-  
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
index 95a1388..5425bd5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
@@ -50,6 +50,11 @@ public class DomainPeerServer implements PeerServer {
   }
 
   @Override
+  public int getReceiveBufferSize() throws IOException {
+    return sock.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
+  }
+
+  @Override
   public Peer accept() throws IOException, SocketTimeoutException {
     DomainSocket connSock = sock.accept();
     Peer peer = null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
index c7b6b14..72974e2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
@@ -32,7 +32,14 @@ public interface PeerServer extends Closeable {
   public void setReceiveBufferSize(int size) throws IOException;
 
   /**
-   * Listens for a connection to be made to this server and accepts 
+   * Get the receive buffer size of the PeerServer.
+   *
+   * @return     The receive buffer size.
+   */
+  int getReceiveBufferSize() throws IOException;
+
+  /**
+   * Listens for a connection to be made to this server and accepts
    * it. The method blocks until a connection is made.
    *
    * @exception IOException  if an I/O error occurs when waiting for a

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
index e31e46a..8858de8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
@@ -74,6 +74,11 @@ public class TcpPeerServer implements PeerServer {
   }
 
   @Override
+  public int getReceiveBufferSize() throws IOException {
+    return this.serverSocket.getReceiveBufferSize();
+  }
+
+  @Override
   public Peer accept() throws IOException, SocketTimeoutException {
     Peer peer = DFSUtilClient.peerFromSocket(serverSocket.accept());
     return peer;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
index 9c25f5e..bd4943d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
@@ -71,7 +71,9 @@ public class DNConf {
   final int socketTimeout;
   final int socketWriteTimeout;
   final int socketKeepaliveTimeout;
-  
+  private final int transferSocketSendBufferSize;
+  private final int transferSocketRecvBufferSize;
+
   final boolean transferToAllowed;
   final boolean dropCacheBehindWrites;
   final boolean syncBehindWrites;
@@ -114,8 +116,14 @@ public class DNConf {
     socketKeepaliveTimeout = conf.getInt(
         DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
         DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
-    
-    /* Based on results on different platforms, we might need set the default 
+    this.transferSocketSendBufferSize = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY,
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT);
+    this.transferSocketRecvBufferSize = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY,
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT);
+
+    /* Based on results on different platforms, we might need set the default
      * to false on some of them. */
     transferToAllowed = conf.getBoolean(
         DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
@@ -279,4 +287,12 @@ public class DNConf {
   public boolean getAllowNonLocalLazyPersist() {
     return allowNonLocalLazyPersist;
   }
+
+  public int getTransferSocketRecvBufferSize() {
+    return transferSocketRecvBufferSize;
+  }
+
+  public int getTransferSocketSendBufferSize() {
+    return transferSocketSendBufferSize;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 0b0a0e8..d51d0a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -910,7 +910,10 @@ public class DataNode extends ReconfigurableBase
       tcpPeerServer = new TcpPeerServer(dnConf.socketWriteTimeout,
           DataNode.getStreamingAddr(conf));
     }
-    tcpPeerServer.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
+    if (dnConf.getTransferSocketRecvBufferSize() > 0) {
+      tcpPeerServer.setReceiveBufferSize(
+          dnConf.getTransferSocketRecvBufferSize());
+    }
     streamingAddr = tcpPeerServer.getStreamingAddr();
     LOG.info("Opened streaming server at " + streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
@@ -958,8 +961,12 @@ public class DataNode extends ReconfigurableBase
     }
     DomainPeerServer domainPeerServer =
       new DomainPeerServer(domainSocketPath, port);
-    domainPeerServer.setReceiveBufferSize(
-        HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
+    int recvBufferSize = conf.getInt(
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY,
+        DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT);
+    if (recvBufferSize > 0) {
+      domainPeerServer.setReceiveBufferSize(recvBufferSize);
+    }
     return domainPeerServer;
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index efd2217..4f6dc96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -709,8 +709,11 @@ class DataXceiver extends Receiver implements Runnable {
               (HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
           NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
           mirrorSock.setSoTimeout(timeoutValue);
-          mirrorSock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
-          
+          if (dnConf.getTransferSocketSendBufferSize() > 0) {
+            mirrorSock.setSendBufferSize(
+                dnConf.getTransferSocketSendBufferSize());
+          }
+
           OutputStream unbufMirrorOut = NetUtils.getOutputStream(mirrorSock,
               writeTimeout);
           InputStream unbufMirrorIn = NetUtils.getInputStream(mirrorSock);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index caf6eaa..8d312a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -278,7 +278,12 @@ class DataXceiverServer implements Runnable {
   synchronized int getNumPeersXceiver() {
     return peersXceiver.size();
   }
-  
+
+  @VisibleForTesting
+  PeerServer getPeerServer() {
+    return peerServer;
+  }
+
   synchronized void releasePeer(Peer peer) {
     peers.remove(peer);
     peersXceiver.remove(peer);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 62665fc..e9b62c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2424,4 +2424,26 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.datanode.transfer.socket.send.buffer.size</name>
+  <value>131072</value>
+  <description>
+    Socket send buffer size for DataXceiver (mirroring packets to downstream
+    in pipeline). This may affect TCP connection throughput.
+    If it is set to zero or negative value, no buffer size will be set
+    explicitly, thus enable tcp auto-tuning on some system.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.transfer.socket.recv.buffer.size</name>
+  <value>131072</value>
+  <description>
+    Socket receive buffer size for DataXceiver (receiving packets from client
+    during block writing). This may affect TCP connection throughput.
+    If it is set to zero or negative value, no buffer size will be set
+    explicitly, thus enable tcp auto-tuning on some system.
+  </description>
+</property>
+
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5cf535/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
new file mode 100644
index 0000000..0e98b86
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeTransferSocketSize.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.Test;
+
+public class TestDataNodeTransferSocketSize {
+
+  @Test
+  public void testSpecifiedDataSocketSize() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(
+      DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY, 4 * 1024);
+    SimulatedFSDataset.setFactory(conf);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      DataNode datanode = datanodes.get(0);
+      assertEquals("Receive buffer size should be 4K",
+        4 * 1024, datanode.getXferServer().getPeerServer().getReceiveBufferSize());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
+  public void testAutoTuningDataSocketSize() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(
+      DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY, 0);
+    SimulatedFSDataset.setFactory(conf);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      DataNode datanode = datanodes.get(0);
+      assertTrue(
+        "Receive buffer size should be a default value (determined by kernel)",
+        datanode.getXferServer().getPeerServer().getReceiveBufferSize() > 0);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}


[08/50] [abbrv] hadoop git commit: HDFS-7116. Add a command to get the balancer bandwidth (Contributed by Rakesh R)

Posted by ec...@apache.org.
HDFS-7116. Add a command to get the balancer bandwidth (Contributed by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f0e897b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f0e897b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f0e897b

Branch: refs/heads/HADOOP-11890
Commit: 0f0e897bf1ff8383f3f524ba83161969531b0772
Parents: 4d13335
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Sep 9 22:58:50 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Sep 9 22:58:50 2015 +0530

----------------------------------------------------------------------
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  7 +++
 .../ClientDatanodeProtocolTranslatorPB.java     | 17 ++++++
 .../src/main/proto/ClientDatanodeProtocol.proto | 16 ++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 ...tDatanodeProtocolServerSideTranslatorPB.java | 16 ++++++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  8 +--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 42 ++++++++++++++-
 .../src/site/markdown/HDFSCommands.md           |  4 +-
 .../hadoop/hdfs/TestBalancerBandwidth.java      | 55 +++++++++++++++++++-
 9 files changed, 159 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 3374868..c7e3f19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -149,4 +149,11 @@ public interface ClientDatanodeProtocol {
    */
   void triggerBlockReport(BlockReportOptions options)
     throws IOException;
+
+  /**
+   * Get current value of the balancer bandwidth in bytes per second.
+   *
+   * @return balancer bandwidth
+   */
+  long getBalancerBandwidth() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 311fcea..f764275 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
@@ -98,6 +100,9 @@ public class ClientDatanodeProtocolTranslatorPB implements
   private static final ListReconfigurablePropertiesRequestProto
       VOID_LIST_RECONFIGURABLE_PROPERTIES =
       ListReconfigurablePropertiesRequestProto.newBuilder().build();
+  private static final GetBalancerBandwidthRequestProto
+      VOID_GET_BALANCER_BANDWIDTH =
+      GetBalancerBandwidthRequestProto.newBuilder().build();
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
       Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
@@ -323,4 +328,16 @@ public class ClientDatanodeProtocolTranslatorPB implements
       throw ProtobufHelper.getRemoteException(e);
     }
   }
+
+  @Override
+  public long getBalancerBandwidth() throws IOException {
+    GetBalancerBandwidthResponseProto response;
+    try {
+      response = rpcProxy.getBalancerBandwidth(NULL_CONTROLLER,
+          VOID_GET_BALANCER_BANDWIDTH);
+      return response.getBandwidth();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
index 6824b48..dd39546 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
@@ -162,6 +162,16 @@ message ListReconfigurablePropertiesResponseProto {
   repeated string name = 1;
 }
 
+message GetBalancerBandwidthRequestProto {
+}
+
+/**
+ * bandwidth - balancer bandwidth value of the datanode.
+ */
+message GetBalancerBandwidthResponseProto {
+  required uint64 bandwidth = 1;
+}
+
 /**
  * Protocol used from client to the Datanode.
  * See the request and response for details of rpc call.
@@ -211,4 +221,10 @@ service ClientDatanodeProtocolService {
 
   rpc triggerBlockReport(TriggerBlockReportRequestProto)
       returns(TriggerBlockReportResponseProto);
+
+  /**
+   * Returns the balancer bandwidth value of datanode.
+   */
+  rpc getBalancerBandwidth(GetBalancerBandwidthRequestProto)
+      returns(GetBalancerBandwidthResponseProto);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77f3b3e..8edc389 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -908,6 +908,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8716. Introduce a new config specifically for safe mode block count
     (Chang Li via kihwal)
 
+    HDFS-7116. Add a command to get the balancer bandwidth
+    (Rakesh R via vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
index 5efcf67..3adb4a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
@@ -231,4 +233,18 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
     }
     return TRIGGER_BLOCK_REPORT_RESP;
   }
+
+  @Override
+  public GetBalancerBandwidthResponseProto getBalancerBandwidth(
+      RpcController controller, GetBalancerBandwidthRequestProto request)
+      throws ServiceException {
+    long bandwidth;
+    try {
+      bandwidth = impl.getBalancerBandwidth();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetBalancerBandwidthResponseProto.newBuilder()
+        .setBandwidth(bandwidth).build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index f72455d..0b0a0e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3128,12 +3128,8 @@ public class DataNode extends ReconfigurableBase
     blockPoolTokenSecretManager.clearAllKeysForTesting();
   }
 
-  /**
-   * Get current value of the max balancer bandwidth in bytes per second.
-   *
-   * @return Balancer bandwidth in bytes per second for this datanode.
-   */
-  public Long getBalancerBandwidth() {
+  @Override // ClientDatanodeProtocol
+  public long getBalancerBandwidth() {
     DataXceiverServer dxcs =
                        (DataXceiverServer) this.dataXceiverServer.getRunnable();
     return dxcs.balanceThrottler.getBandwidth();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 2e64c44..6ccd604 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -419,6 +419,7 @@ public class DFSAdmin extends FsShell {
     "\t[-refreshNamenodes datanode_host:ipc_port]\n"+
     "\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+
     "\t[-setBalancerBandwidth <bandwidth in bytes per second>]\n" +
+    "\t[-getBalancerBandwidth <datanode_host:ipc_port>]\n" +
     "\t[-fetchImage <local directory>]\n" +
     "\t[-allowSnapshot <snapshotDir>]\n" +
     "\t[-disallowSnapshot <snapshotDir>]\n" +
@@ -888,6 +889,26 @@ public class DFSAdmin extends FsShell {
   }
 
   /**
+   * Command to get balancer bandwidth for the given datanode. Usage: hdfs
+   * dfsadmin -getBalancerBandwidth {@literal <datanode_host:ipc_port>}
+   * @param argv List of of command line parameters.
+   * @param idx The index of the command that is being processed.
+   * @exception IOException
+   */
+  public int getBalancerBandwidth(String[] argv, int idx) throws IOException {
+    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[idx]);
+    try {
+      long bandwidth = dnProxy.getBalancerBandwidth();
+      System.out.println("Balancer bandwidth is " + bandwidth
+          + " bytes per second.");
+    } catch (IOException ioe) {
+      System.err.println("Datanode unreachable.");
+      return -1;
+    }
+    return 0;
+  }
+
+  /**
    * Download the most recent fsimage from the name node, and save it to a local
    * file in the given directory.
    * 
@@ -1025,7 +1046,13 @@ public class DFSAdmin extends FsShell {
       "\t\tthat will be used by each datanode. This value overrides\n" +
       "\t\tthe dfs.balance.bandwidthPerSec parameter.\n\n" +
       "\t\t--- NOTE: The new value is not persistent on the DataNode.---\n";
-    
+
+    String getBalancerBandwidth = "-getBalancerBandwidth <datanode_host:ipc_port>:\n" +
+        "\tGet the network bandwidth for the given datanode.\n" +
+        "\tThis is the maximum network bandwidth used by the datanode\n" +
+        "\tduring HDFS block balancing.\n\n" +
+        "\t--- NOTE: This value is not persistent on the DataNode.---\n";
+
     String fetchImage = "-fetchImage <local directory>:\n" +
       "\tDownloads the most recent fsimage from the Name Node and saves it in" +
       "\tthe specified local directory.\n";
@@ -1103,6 +1130,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(deleteBlockPool);
     } else if ("setBalancerBandwidth".equals(cmd)) {
       System.out.println(setBalancerBandwidth);
+    } else if ("getBalancerBandwidth".equals(cmd)) {
+      System.out.println(getBalancerBandwidth);
     } else if ("fetchImage".equals(cmd)) {
       System.out.println(fetchImage);
     } else if ("allowSnapshot".equalsIgnoreCase(cmd)) {
@@ -1140,6 +1169,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshNamenodes);
       System.out.println(deleteBlockPool);
       System.out.println(setBalancerBandwidth);
+      System.out.println(getBalancerBandwidth);
       System.out.println(fetchImage);
       System.out.println(allowSnapshot);
       System.out.println(disallowSnapshot);
@@ -1682,6 +1712,9 @@ public class DFSAdmin extends FsShell {
     } else if ("-setBalancerBandwidth".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
                   + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
+    } else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) {
+      System.err.println("Usage: hdfs dfsadmin"
+          + " [-getBalancerBandwidth <datanode_host:ipc_port>]");
     } else if ("-fetchImage".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [-fetchImage <local directory>]");
@@ -1817,6 +1850,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-fetchImage".equals(cmd)) {
       if (argv.length != 2) {
         printUsage(cmd);
@@ -1902,6 +1940,8 @@ public class DFSAdmin extends FsShell {
         exitCode = deleteBlockPool(argv, i);
       } else if ("-setBalancerBandwidth".equals(cmd)) {
         exitCode = setBalancerBandwidth(argv, i);
+      } else if ("-getBalancerBandwidth".equals(cmd)) {
+        exitCode = getBalancerBandwidth(argv, i);
       } else if ("-fetchImage".equals(cmd)) {
         exitCode = fetchImage(argv, i);
       } else if ("-shutdownDatanode".equals(cmd)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 8bbcbb8..718463a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -338,6 +338,7 @@ Usage:
               [-refreshNamenodes datanodehost:port]
               [-deleteBlockPool datanode-host:port blockpoolId [force]]
               [-setBalancerBandwidth <bandwidth in bytes per second>]
+              [-getBalancerBandwidth <datanode_host:ipc_port>]
               [-allowSnapshot <snapshotDir>]
               [-disallowSnapshot <snapshotDir>]
               [-fetchImage <local directory>]
@@ -370,7 +371,8 @@ Usage:
 | `-printTopology` | Print a tree of the racks and their nodes as reported by the Namenode |
 | `-refreshNamenodes` datanodehost:port | For the given datanode, reloads the configuration files, stops serving the removed block-pools and starts serving new block-pools. |
 | `-deleteBlockPool` datanode-host:port blockpoolId [force] | If force is passed, block pool directory for the given blockpool id on the given datanode is deleted along with its contents, otherwise the directory is deleted only if it is empty. The command will fail if datanode is still serving the block pool. Refer to refreshNamenodes to shutdown a block pool service on a datanode. |
-| `-setBalancerBandwidth` \<bandwidth in bytes per second\> | Changes the network bandwidth used by each datanode during HDFS block balancing. \<bandwidth\> is the maximum number of bytes per second that will be used by each datanode. This value overrides the dfs.balance.bandwidthPerSec parameter. NOTE: The new value is not persistent on the DataNode. |
+| `-setBalancerBandwidth` \<bandwidth in bytes per second\> | Changes the network bandwidth used by each datanode during HDFS block balancing. \<bandwidth\> is the maximum number of bytes per second that will be used by each datanode. This value overrides the dfs.balance.bandwidthPerSec parameter. NOTE: The new value is not persistent on the DataNode. |
+| `-getBalancerBandwidth` \<datanode\_host:ipc\_port\> | Get the network bandwidth(in bytes per second) for the given datanode. This is the maximum network bandwidth used by the datanode during HDFS block balancing.|
 | `-allowSnapshot` \<snapshotDir\> | Allowing snapshots of a directory to be created. If the operation completes successfully, the directory becomes snapshottable. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for more information. |
 | `-disallowSnapshot` \<snapshotDir\> | Disallowing snapshots of a directory to be created. All snapshots of the directory must be deleted before disallowing snapshots. See the [HDFS Snapshot Documentation](./HdfsSnapshots.html) for more information. |
 | `-fetchImage` \<local directory\> | Downloads the most recent fsimage from the NameNode and saves it in the specified local directory. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f0e897b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
index 29869b1..6e6bbee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
@@ -18,13 +18,19 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.nio.charset.Charset;
 import java.util.ArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.junit.Test;
 
 /**
@@ -36,6 +42,9 @@ public class TestBalancerBandwidth {
   final static private int NUM_OF_DATANODES = 2;
   final static private int DEFAULT_BANDWIDTH = 1024*1024;
   public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class);
+  private static final Charset UTF8 = Charset.forName("UTF-8");
+  private final ByteArrayOutputStream outContent = new ByteArrayOutputStream();
+  private final PrintStream outStream = new PrintStream(outContent);
 
   @Test
   public void testBalancerBandwidth() throws Exception {
@@ -56,6 +65,23 @@ public class TestBalancerBandwidth {
       // Ensure value from the configuration is reflected in the datanodes.
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(0).getBalancerBandwidth());
       assertEquals(DEFAULT_BANDWIDTH, (long) datanodes.get(1).getBalancerBandwidth());
+      ClientDatanodeProtocol dn1Proxy = DFSUtilClient
+          .createClientDatanodeProtocolProxy(datanodes.get(0).getDatanodeId(),
+              conf, 60000, false);
+      ClientDatanodeProtocol dn2Proxy = DFSUtilClient
+          .createClientDatanodeProtocolProxy(datanodes.get(1).getDatanodeId(),
+              conf, 60000, false);
+      DFSAdmin admin = new DFSAdmin(conf);
+      String dn1Address = datanodes.get(0).ipcServer.getListenerAddress()
+          .getHostName() + ":" + datanodes.get(0).getIpcPort();
+      String dn2Address = datanodes.get(1).ipcServer.getListenerAddress()
+          .getHostName() + ":" + datanodes.get(1).getIpcPort();
+
+      // verifies the dfsadmin command execution
+      String[] args = new String[] { "-getBalancerBandwidth", dn1Address };
+      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, DEFAULT_BANDWIDTH);
+      args = new String[] { "-getBalancerBandwidth", dn2Address };
+      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, DEFAULT_BANDWIDTH);
 
       // Dynamically change balancer bandwidth and ensure the updated value
       // is reflected on the datanodes.
@@ -69,6 +95,11 @@ public class TestBalancerBandwidth {
 
       assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
       assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
+      // verifies the dfsadmin command execution
+      args = new String[] { "-getBalancerBandwidth", dn1Address };
+      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      args = new String[] { "-getBalancerBandwidth", dn2Address };
+      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
 
       // Dynamically change balancer bandwidth to 0. Balancer bandwidth on the
       // datanodes should remain as it was.
@@ -81,11 +112,33 @@ public class TestBalancerBandwidth {
 
       assertEquals(newBandwidth, (long) datanodes.get(0).getBalancerBandwidth());
       assertEquals(newBandwidth, (long) datanodes.get(1).getBalancerBandwidth());
-    }finally {
+      // verifies the dfsadmin command execution
+      args = new String[] { "-getBalancerBandwidth", dn1Address };
+      runGetBalancerBandwidthCmd(admin, args, dn1Proxy, newBandwidth);
+      args = new String[] { "-getBalancerBandwidth", dn2Address };
+      runGetBalancerBandwidthCmd(admin, args, dn2Proxy, newBandwidth);
+    } finally {
       cluster.shutdown();
     }
   }
 
+  private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
+      ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
+    PrintStream initialStdOut = System.out;
+    outContent.reset();
+    try {
+      System.setOut(outStream);
+      int exitCode = admin.run(args);
+      assertEquals("DFSAdmin should return 0", 0, exitCode);
+      String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
+          + " bytes per second.";
+      String strOut = new String(outContent.toByteArray(), UTF8);
+      assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
+    } finally {
+      System.setOut(initialStdOut);
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     new TestBalancerBandwidth().testBalancerBandwidth();
   }


[20/50] [abbrv] hadoop git commit: YARN-4136. LinuxContainerExecutor loses info when forwarding ResourceHandlerException. Contributed by Bibin A Chundatt.

Posted by ec...@apache.org.
YARN-4136. LinuxContainerExecutor loses info when forwarding ResourceHandlerException. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/486d5cb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/486d5cb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/486d5cb8

Branch: refs/heads/HADOOP-11890
Commit: 486d5cb803efec7b4db445ee65a3df83392940a3
Parents: f103a70
Author: Varun Vasudev <vv...@apache.org>
Authored: Fri Sep 11 14:37:10 2015 +0530
Committer: Varun Vasudev <vv...@apache.org>
Committed: Fri Sep 11 14:37:48 2015 +0530

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                  | 4 ++++
 .../hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java   | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/486d5cb8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5a706a3..cc833e2 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -889,6 +889,10 @@ Release 2.7.2 - UNRELEASED
     YARN-4096. App local logs are leaked if log aggregation fails to initialize
     for the app. (Jason Lowe via zxu)
 
+    YARN-4136. LinuxContainerExecutor loses info when forwarding
+    ResourceHandlerException. (Bibin A Chundatt via vvasudev)
+
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/486d5cb8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 0670d95..0a51301 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -340,7 +340,7 @@ public class LinuxContainerExecutor extends ContainerExecutor {
       }
     } catch (ResourceHandlerException e) {
       LOG.error("ResourceHandlerChain.preStart() failed!", e);
-      throw new IOException("ResourceHandlerChain.preStart() failed!");
+      throw new IOException("ResourceHandlerChain.preStart() failed!", e);
     }
 
     try {


[36/50] [abbrv] hadoop git commit: YARN-4126. RM should not issue delegation tokens in unsecure mode. Contributed by Bibin A Chundatt

Posted by ec...@apache.org.
YARN-4126. RM should not issue delegation tokens in unsecure mode. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1b1d7e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1b1d7e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1b1d7e4

Branch: refs/heads/HADOOP-11890
Commit: e1b1d7e4aebfed0dec4d7df21561ab37f73ef1d7
Parents: 332b520
Author: Jian He <ji...@apache.org>
Authored: Mon Sep 14 14:09:19 2015 +0800
Committer: Jian He <ji...@apache.org>
Committed: Mon Sep 14 14:09:19 2015 +0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   2 +
 .../server/resourcemanager/ClientRMService.java |   2 +-
 .../resourcemanager/TestClientRMService.java    | 255 ----------------
 .../TestTokenClientRMService.java               | 300 +++++++++++++++++++
 .../security/TestRMDelegationTokens.java        |   3 +
 .../TestRMWebServicesDelegationTokens.java      |   6 +-
 6 files changed, 311 insertions(+), 257 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e4255c0..7a9d156 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -903,6 +903,8 @@ Release 2.7.2 - UNRELEASED
     YARN-3697. FairScheduler: ContinuousSchedulingThread can fail to shutdown.
     (Zhihai Xu via kasha)
 
+    YARN-4126. RM should not issue delegation tokens in unsecure mode.
+    (Bibin A Chundatt via jianhe)
 
 Release 2.7.1 - 2015-07-06
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index cce0fe5..02c6a5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1100,7 +1100,7 @@ public class ClientRMService extends AbstractService implements
           .contains(UserGroupInformation.getCurrentUser()
                   .getRealAuthenticationMethod());
     } else {
-      return true;
+      return false;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 6a0b99c..0be8bc2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -30,7 +30,6 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -47,14 +46,10 @@ import java.util.concurrent.CyclicBarrier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
-import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptReportResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationAttemptsRequest;
@@ -79,7 +74,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.MoveApplicationAcrossQueuesRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
@@ -123,11 +117,9 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -144,17 +136,13 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.security.QueueACLsManager;
-import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.Clock;
-import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.UTCClock;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.ImmutableSet;
@@ -168,31 +156,10 @@ public class TestClientRMService {
       .getRecordFactory(null);
 
   private String appType = "MockApp";
-
-  private static RMDelegationTokenSecretManager dtsm;
   
   private final static String QUEUE_1 = "Q-1";
   private final static String QUEUE_2 = "Q-2";
-  private final static String kerberosRule = "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT";
-  static {
-    KerberosName.setRules(kerberosRule);
-  }
-  
-  @BeforeClass
-  public static void setupSecretManager() throws IOException {
-    RMContext rmContext = mock(RMContext.class);
-    when(rmContext.getStateStore()).thenReturn(new NullRMStateStore());
-    dtsm = new RMDelegationTokenSecretManager(60000, 60000, 60000, 60000, rmContext);
-    dtsm.startThreads();  
-  }
 
-  @AfterClass
-  public static void teardownSecretManager() {
-    if (dtsm != null) {
-      dtsm.stopThreads();
-    }
-  }
-  
   @Test
   public void testGetClusterNodes() throws Exception {
     MockRM rm = new MockRM() {
@@ -617,229 +584,7 @@ public class TestClientRMService {
     Assert.assertEquals(0, applications1.size());
   }
 
-  private static final UserGroupInformation owner =
-      UserGroupInformation.createRemoteUser("owner");
-  private static final UserGroupInformation other =
-      UserGroupInformation.createRemoteUser("other");
-  private static final UserGroupInformation tester =
-      UserGroupInformation.createRemoteUser("tester");
-  private static final String testerPrincipal = "tester@EXAMPLE.COM";
-  private static final String ownerPrincipal = "owner@EXAMPLE.COM";
-  private static final String otherPrincipal = "other@EXAMPLE.COM";
-  private static final UserGroupInformation testerKerb =
-      UserGroupInformation.createRemoteUser(testerPrincipal);
-  private static final UserGroupInformation ownerKerb =
-      UserGroupInformation.createRemoteUser(ownerPrincipal);
-  private static final UserGroupInformation otherKerb =
-      UserGroupInformation.createRemoteUser(otherPrincipal);
-  
-  @Test
-  public void testTokenRenewalByOwner() throws Exception {
-    owner.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenRenewal(owner, owner);
-        return null;
-      }
-    });
-  }
   
-  @Test
-  public void testTokenRenewalWrongUser() throws Exception {
-    try {
-      owner.doAs(new PrivilegedExceptionAction<Void>() {
-        @Override
-        public Void run() throws Exception {
-          try {
-            checkTokenRenewal(owner, other);
-            return null;
-          } catch (YarnException ex) {
-            Assert.assertTrue(ex.getMessage().contains(owner.getUserName() +
-                " tries to renew a token with renewer " +
-                other.getUserName()));
-            throw ex;
-          }
-        }
-      });
-    } catch (Exception e) {
-      return;
-    }
-    Assert.fail("renew should have failed");
-  }
-
-  @Test
-  public void testTokenRenewalByLoginUser() throws Exception {
-    UserGroupInformation.getLoginUser().doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenRenewal(owner, owner);
-        checkTokenRenewal(owner, other);
-        return null;
-      }
-    });
-  }
-
-  private void checkTokenRenewal(UserGroupInformation owner,
-      UserGroupInformation renewer) throws IOException, YarnException {
-    RMDelegationTokenIdentifier tokenIdentifier =
-        new RMDelegationTokenIdentifier(
-            new Text(owner.getUserName()), new Text(renewer.getUserName()), null);
-    Token<?> token =
-        new Token<RMDelegationTokenIdentifier>(tokenIdentifier, dtsm);
-    org.apache.hadoop.yarn.api.records.Token dToken = BuilderUtils.newDelegationToken(
-        token.getIdentifier(), token.getKind().toString(),
-        token.getPassword(), token.getService().toString());
-    RenewDelegationTokenRequest request =
-        Records.newRecord(RenewDelegationTokenRequest.class);
-    request.setDelegationToken(dToken);
-
-    RMContext rmContext = mock(RMContext.class);
-    ClientRMService rmService = new ClientRMService(
-        rmContext, null, null, null, null, dtsm);
-    rmService.renewDelegationToken(request);
-  }
-
-  @Test
-  public void testTokenCancellationByOwner() throws Exception {
-    // two tests required - one with a kerberos name
-    // and with a short name
-    RMContext rmContext = mock(RMContext.class);
-    final ClientRMService rmService =
-        new ClientRMService(rmContext, null, null, null, null, dtsm);
-    testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenCancellation(rmService, testerKerb, other);
-        return null;
-      }
-    });
-    owner.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenCancellation(owner, other);
-        return null;
-      }
-    });
-  }
-
-  @Test
-  public void testTokenCancellationByRenewer() throws Exception {
-    // two tests required - one with a kerberos name
-    // and with a short name
-    RMContext rmContext = mock(RMContext.class);
-    final ClientRMService rmService =
-        new ClientRMService(rmContext, null, null, null, null, dtsm);
-    testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenCancellation(rmService, owner, testerKerb);
-        return null;
-      }
-    });
-    other.doAs(new PrivilegedExceptionAction<Void>() {
-      @Override
-      public Void run() throws Exception {
-        checkTokenCancellation(owner, other);
-        return null;
-      }
-    });
-  }
-
-  @Test
-  public void testTokenCancellationByWrongUser() {
-    // two sets to test -
-    // 1. try to cancel tokens of short and kerberos users as a kerberos UGI
-    // 2. try to cancel tokens of short and kerberos users as a simple auth UGI
-
-    RMContext rmContext = mock(RMContext.class);
-    final ClientRMService rmService =
-        new ClientRMService(rmContext, null, null, null, null, dtsm);
-    UserGroupInformation[] kerbTestOwners =
-        { owner, other, tester, ownerKerb, otherKerb };
-    UserGroupInformation[] kerbTestRenewers =
-        { owner, other, ownerKerb, otherKerb };
-    for (final UserGroupInformation tokOwner : kerbTestOwners) {
-      for (final UserGroupInformation tokRenewer : kerbTestRenewers) {
-        try {
-          testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override
-            public Void run() throws Exception {
-              try {
-                checkTokenCancellation(rmService, tokOwner, tokRenewer);
-                Assert.fail("We should not reach here; token owner = "
-                    + tokOwner.getUserName() + ", renewer = "
-                    + tokRenewer.getUserName());
-                return null;
-              } catch (YarnException e) {
-                Assert.assertTrue(e.getMessage().contains(
-                  testerKerb.getUserName()
-                      + " is not authorized to cancel the token"));
-                return null;
-              }
-            }
-          });
-        } catch (Exception e) {
-          Assert.fail("Unexpected exception; " + e.getMessage());
-        }
-      }
-    }
-
-    UserGroupInformation[] simpleTestOwners =
-        { owner, other, ownerKerb, otherKerb, testerKerb };
-    UserGroupInformation[] simpleTestRenewers =
-        { owner, other, ownerKerb, otherKerb };
-    for (final UserGroupInformation tokOwner : simpleTestOwners) {
-      for (final UserGroupInformation tokRenewer : simpleTestRenewers) {
-        try {
-          tester.doAs(new PrivilegedExceptionAction<Void>() {
-            @Override
-            public Void run() throws Exception {
-              try {
-                checkTokenCancellation(tokOwner, tokRenewer);
-                Assert.fail("We should not reach here; token owner = "
-                    + tokOwner.getUserName() + ", renewer = "
-                    + tokRenewer.getUserName());
-                return null;
-              } catch (YarnException ex) {
-                Assert.assertTrue(ex.getMessage().contains(
-                  tester.getUserName()
-                      + " is not authorized to cancel the token"));
-                return null;
-              }
-            }
-          });
-        } catch (Exception e) {
-          Assert.fail("Unexpected exception; " + e.getMessage());
-        }
-      }
-    }
-  }
-
-  private void checkTokenCancellation(UserGroupInformation owner,
-      UserGroupInformation renewer) throws IOException, YarnException {
-    RMContext rmContext = mock(RMContext.class);
-    final ClientRMService rmService =
-        new ClientRMService(rmContext, null, null, null, null, dtsm);
-    checkTokenCancellation(rmService, owner, renewer);
-  }
-
-  private void checkTokenCancellation(ClientRMService rmService,
-      UserGroupInformation owner, UserGroupInformation renewer)
-      throws IOException, YarnException {
-    RMDelegationTokenIdentifier tokenIdentifier =
-        new RMDelegationTokenIdentifier(new Text(owner.getUserName()),
-          new Text(renewer.getUserName()), null);
-    Token<?> token =
-        new Token<RMDelegationTokenIdentifier>(tokenIdentifier, dtsm);
-    org.apache.hadoop.yarn.api.records.Token dToken =
-        BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
-          .toString(), token.getPassword(), token.getService().toString());
-    CancelDelegationTokenRequest request =
-        Records.newRecord(CancelDelegationTokenRequest.class);
-    request.setDelegationToken(dToken);
-    rmService.cancelDelegationToken(request);
-  }
-
   @Test (timeout = 30000)
   @SuppressWarnings ("rawtypes")
   public void testAppSubmit() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
new file mode 100644
index 0000000..351f068
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestTokenClientRMService.java
@@ -0,0 +1,300 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager;
+
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
+import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestTokenClientRMService {
+
+  private final static String kerberosRule =
+      "RULE:[1:$1@$0](.*@EXAMPLE.COM)s/@.*//\nDEFAULT";
+  private static RMDelegationTokenSecretManager dtsm;
+  static {
+    KerberosName.setRules(kerberosRule);
+  }
+
+  private static final UserGroupInformation owner = UserGroupInformation
+      .createRemoteUser("owner", AuthMethod.KERBEROS);
+  private static final UserGroupInformation other = UserGroupInformation
+      .createRemoteUser("other", AuthMethod.KERBEROS);
+  private static final UserGroupInformation tester = UserGroupInformation
+      .createRemoteUser("tester", AuthMethod.KERBEROS);
+  private static final String testerPrincipal = "tester@EXAMPLE.COM";
+  private static final String ownerPrincipal = "owner@EXAMPLE.COM";
+  private static final String otherPrincipal = "other@EXAMPLE.COM";
+  private static final UserGroupInformation testerKerb = UserGroupInformation
+      .createRemoteUser(testerPrincipal, AuthMethod.KERBEROS);
+  private static final UserGroupInformation ownerKerb = UserGroupInformation
+      .createRemoteUser(ownerPrincipal, AuthMethod.KERBEROS);
+  private static final UserGroupInformation otherKerb = UserGroupInformation
+      .createRemoteUser(otherPrincipal, AuthMethod.KERBEROS);
+
+  @BeforeClass
+  public static void setupSecretManager() throws IOException {
+    RMContext rmContext = mock(RMContext.class);
+    when(rmContext.getStateStore()).thenReturn(new NullRMStateStore());
+    dtsm =
+        new RMDelegationTokenSecretManager(60000, 60000, 60000, 60000,
+            rmContext);
+    dtsm.startThreads();
+    Configuration conf = new Configuration();
+    conf.set("hadoop.security.authentication", "kerberos");
+    conf.set("hadoop.security.auth_to_local", kerberosRule);
+    UserGroupInformation.setConfiguration(conf);
+  }
+
+  @AfterClass
+  public static void teardownSecretManager() {
+    if (dtsm != null) {
+      dtsm.stopThreads();
+    }
+  }
+
+  @Test
+  public void testTokenCancellationByOwner() throws Exception {
+    // two tests required - one with a kerberos name
+    // and with a short name
+    RMContext rmContext = mock(RMContext.class);
+    final ClientRMService rmService =
+        new ClientRMService(rmContext, null, null, null, null, dtsm);
+    testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        checkTokenCancellation(rmService, testerKerb, other);
+        return null;
+      }
+    });
+    owner.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        checkTokenCancellation(owner, other);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testTokenRenewalWrongUser() throws Exception {
+    try {
+      owner.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws Exception {
+          try {
+            checkTokenRenewal(owner, other);
+            return null;
+          } catch (YarnException ex) {
+            Assert.assertTrue(ex.getMessage().contains(
+                owner.getUserName() + " tries to renew a token with renewer "
+                    + other.getUserName()));
+            throw ex;
+          }
+        }
+      });
+    } catch (Exception e) {
+      return;
+    }
+    Assert.fail("renew should have failed");
+  }
+
+  @Test
+  public void testTokenRenewalByLoginUser() throws Exception {
+    UserGroupInformation.getLoginUser().doAs(
+        new PrivilegedExceptionAction<Void>() {
+          @Override
+          public Void run() throws Exception {
+            checkTokenRenewal(owner, owner);
+            checkTokenRenewal(owner, other);
+            return null;
+          }
+        });
+  }
+
+  private void checkTokenRenewal(UserGroupInformation owner,
+      UserGroupInformation renewer) throws IOException, YarnException {
+    RMDelegationTokenIdentifier tokenIdentifier =
+        new RMDelegationTokenIdentifier(new Text(owner.getUserName()),
+            new Text(renewer.getUserName()), null);
+    Token<?> token =
+        new Token<RMDelegationTokenIdentifier>(tokenIdentifier, dtsm);
+    org.apache.hadoop.yarn.api.records.Token dToken =
+        BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
+            .toString(), token.getPassword(), token.getService().toString());
+    RenewDelegationTokenRequest request =
+        Records.newRecord(RenewDelegationTokenRequest.class);
+    request.setDelegationToken(dToken);
+
+    RMContext rmContext = mock(RMContext.class);
+    ClientRMService rmService =
+        new ClientRMService(rmContext, null, null, null, null, dtsm);
+    rmService.renewDelegationToken(request);
+  }
+
+  @Test
+  public void testTokenCancellationByRenewer() throws Exception {
+    // two tests required - one with a kerberos name
+    // and with a short name
+    RMContext rmContext = mock(RMContext.class);
+    final ClientRMService rmService =
+        new ClientRMService(rmContext, null, null, null, null, dtsm);
+    testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        checkTokenCancellation(rmService, owner, testerKerb);
+        return null;
+      }
+    });
+    other.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        checkTokenCancellation(owner, other);
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testTokenCancellationByWrongUser() {
+    // two sets to test -
+    // 1. try to cancel tokens of short and kerberos users as a kerberos UGI
+    // 2. try to cancel tokens of short and kerberos users as a simple auth UGI
+
+    RMContext rmContext = mock(RMContext.class);
+    final ClientRMService rmService =
+        new ClientRMService(rmContext, null, null, null, null, dtsm);
+    UserGroupInformation[] kerbTestOwners =
+        { owner, other, tester, ownerKerb, otherKerb };
+    UserGroupInformation[] kerbTestRenewers =
+        { owner, other, ownerKerb, otherKerb };
+    for (final UserGroupInformation tokOwner : kerbTestOwners) {
+      for (final UserGroupInformation tokRenewer : kerbTestRenewers) {
+        try {
+          testerKerb.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+              try {
+                checkTokenCancellation(rmService, tokOwner, tokRenewer);
+                Assert.fail("We should not reach here; token owner = "
+                    + tokOwner.getUserName() + ", renewer = "
+                    + tokRenewer.getUserName());
+                return null;
+              } catch (YarnException e) {
+                Assert.assertTrue(e.getMessage().contains(
+                    testerKerb.getUserName()
+                        + " is not authorized to cancel the token"));
+                return null;
+              }
+            }
+          });
+        } catch (Exception e) {
+          Assert.fail("Unexpected exception; " + e.getMessage());
+        }
+      }
+    }
+
+    UserGroupInformation[] simpleTestOwners =
+        { owner, other, ownerKerb, otherKerb, testerKerb };
+    UserGroupInformation[] simpleTestRenewers =
+        { owner, other, ownerKerb, otherKerb };
+    for (final UserGroupInformation tokOwner : simpleTestOwners) {
+      for (final UserGroupInformation tokRenewer : simpleTestRenewers) {
+        try {
+          tester.doAs(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+              try {
+                checkTokenCancellation(tokOwner, tokRenewer);
+                Assert.fail("We should not reach here; token owner = "
+                    + tokOwner.getUserName() + ", renewer = "
+                    + tokRenewer.getUserName());
+                return null;
+              } catch (YarnException ex) {
+                Assert.assertTrue(ex.getMessage().contains(
+                    tester.getUserName()
+                        + " is not authorized to cancel the token"));
+                return null;
+              }
+            }
+          });
+        } catch (Exception e) {
+          Assert.fail("Unexpected exception; " + e.getMessage());
+        }
+      }
+    }
+  }
+
+  private void checkTokenCancellation(UserGroupInformation owner,
+      UserGroupInformation renewer) throws IOException, YarnException {
+    RMContext rmContext = mock(RMContext.class);
+    final ClientRMService rmService =
+        new ClientRMService(rmContext, null, null, null, null, dtsm);
+    checkTokenCancellation(rmService, owner, renewer);
+  }
+
+  private void checkTokenCancellation(ClientRMService rmService,
+      UserGroupInformation owner, UserGroupInformation renewer)
+      throws IOException, YarnException {
+    RMDelegationTokenIdentifier tokenIdentifier =
+        new RMDelegationTokenIdentifier(new Text(owner.getUserName()),
+            new Text(renewer.getUserName()), null);
+    Token<?> token =
+        new Token<RMDelegationTokenIdentifier>(tokenIdentifier, dtsm);
+    org.apache.hadoop.yarn.api.records.Token dToken =
+        BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
+            .toString(), token.getPassword(), token.getService().toString());
+    CancelDelegationTokenRequest request =
+        Records.newRecord(CancelDelegationTokenRequest.class);
+    request.setDelegationToken(dToken);
+    rmService.cancelDelegationToken(request);
+  }
+
+  @Test
+  public void testTokenRenewalByOwner() throws Exception {
+    owner.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        checkTokenRenewal(owner, owner);
+        return null;
+      }
+    });
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
index 2847a89..068d008 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestRMDelegationTokens.java
@@ -70,6 +70,9 @@ public class TestRMDelegationTokens {
   // Test the DT mast key in the state-store when the mast key is being rolled.
   @Test(timeout = 15000)
   public void testRMDTMasterKeyStateOnRollingMasterKey() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set("hadoop.security.authentication", "kerberos");
+    UserGroupInformation.setConfiguration(conf);
     MemoryRMStateStore memStore = new MemoryRMStateStore();
     memStore.init(conf);
     RMState rmState = memStore.getState();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1b1d7e4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
index dab8343..a6d7744 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesDelegationTokens.java
@@ -38,6 +38,7 @@ import javax.xml.parsers.ParserConfigurationException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.minikdc.MiniKdc;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.KerberosTestUtils;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
@@ -84,7 +85,6 @@ import com.sun.jersey.api.client.ClientResponse;
 import com.sun.jersey.api.client.ClientResponse.Status;
 import com.sun.jersey.api.client.filter.LoggingFilter;
 import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
-import com.sun.jersey.test.framework.JerseyTest;
 import com.sun.jersey.test.framework.WebAppDescriptor;
 
 @RunWith(Parameterized.class)
@@ -246,6 +246,9 @@ public class TestRMWebServicesDelegationTokens extends JerseyTestBase {
     super.setUp();
     httpSpnegoKeytabFile.deleteOnExit();
     testRootDir.deleteOnExit();
+    Configuration conf = new Configuration();
+    conf.set("hadoop.security.authentication", "kerberos");
+    UserGroupInformation.setConfiguration(conf);
   }
 
   @AfterClass
@@ -260,6 +263,7 @@ public class TestRMWebServicesDelegationTokens extends JerseyTestBase {
   public void tearDown() throws Exception {
     rm.stop();
     super.tearDown();
+    UserGroupInformation.setConfiguration(new Configuration());
   }
 
   // Simple test - try to create a delegation token via web services and check


[38/50] [abbrv] hadoop git commit: HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe Zhang via Colin P. McCabe)

Posted by ec...@apache.org.
HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe Zhang via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53bad4eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53bad4eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53bad4eb

Branch: refs/heads/HADOOP-11890
Commit: 53bad4eb008ec553dcdbe01e7ae975dcecde6590
Parents: 6955771
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Mon Sep 14 15:20:51 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Mon Sep 14 15:22:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/namenode/FSEditLogTestUtil.java |  2 +-
 .../hadoop/hdfs/qjournal/server/Journal.java    |  8 +--
 .../server/namenode/EditLogFileInputStream.java | 61 +++----------------
 .../hdfs/server/namenode/FSEditLogLoader.java   | 63 ++++++--------------
 .../server/namenode/FileJournalManager.java     | 32 +++++-----
 .../TestCheckPointForSecurityTokens.java        |  4 +-
 .../hdfs/server/namenode/TestEditLog.java       |  2 +-
 .../server/namenode/TestFSEditLogLoader.java    | 14 ++---
 9 files changed, 58 insertions(+), 131 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cb0fae9..1b21c4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -906,6 +906,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9027. Refactor o.a.h.hdfs.DataStreamer#isLazyPersist() method.
     (Mingliang Liu via Arpit Agarwal)
 
+    HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe
+    Zhang via Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
index e5b9d01..7a7af06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
@@ -34,7 +34,7 @@ public class FSEditLogTestUtil {
   public static long countTransactionsInStream(EditLogInputStream in) 
       throws IOException {
     FSEditLogLoader.EditLogValidation validation =
-        FSEditLogLoader.validateEditLog(in, Long.MAX_VALUE);
+        FSEditLogLoader.scanEditLog(in, Long.MAX_VALUE);
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index b94cd8c..de052c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -192,7 +192,7 @@ public class Journal implements Closeable {
     
     while (!files.isEmpty()) {
       EditLogFile latestLog = files.remove(files.size() - 1);
-      latestLog.scanLog();
+      latestLog.scanLog(Long.MAX_VALUE, false);
       LOG.info("Latest log is " + latestLog);
       if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
         // the log contains no transactions
@@ -542,7 +542,7 @@ public class Journal implements Closeable {
       // If it's in-progress, it should only contain one transaction,
       // because the "startLogSegment" transaction is written alone at the
       // start of each segment. 
-      existing.scanLog();
+      existing.scanLog(Long.MAX_VALUE, false);
       if (existing.getLastTxId() != existing.getFirstTxId()) {
         throw new IllegalStateException("The log file " +
             existing + " seems to contain valid transactions");
@@ -605,7 +605,7 @@ public class Journal implements Closeable {
       if (needsValidation) {
         LOG.info("Validating log segment " + elf.getFile() + " about to be " +
             "finalized");
-        elf.scanLog();
+        elf.scanLog(Long.MAX_VALUE, false);
   
         checkSync(elf.getLastTxId() == endTxId,
             "Trying to finalize in-progress log segment %s to end at " +
@@ -693,7 +693,7 @@ public class Journal implements Closeable {
       return null;
     }
     if (elf.isInProgress()) {
-      elf.scanLog();
+      elf.scanLog(Long.MAX_VALUE, false);
     }
     if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
       LOG.info("Edit log file " + elf + " appears to be empty. " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index 3bf0ab4..48df8d6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -301,74 +301,31 @@ public class EditLogFileInputStream extends EditLogInputStream {
   }
 
   /**
-   * @param file File being validated.
-   * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
-   *                          returns after reading this or a higher ID.
-   *                          The file portion beyond this ID is potentially
-   *                          being updated.
+   * @param file          File being scanned and validated.
+   * @param maxTxIdToScan Maximum Tx ID to try to scan.
+   *                      The scan returns after reading this or a higher
+   *                      ID. The file portion beyond this ID is
+   *                      potentially being updated.
    * @return Result of the validation
    * @throws IOException
    */
-  static FSEditLogLoader.EditLogValidation validateEditLog(File file,
-      long maxTxIdToValidate) throws IOException {
-    EditLogFileInputStream in;
-    try {
-      in = new EditLogFileInputStream(file);
-      in.getVersion(true); // causes us to read the header
-    } catch (LogHeaderCorruptException e) {
-      // If the header is malformed or the wrong value, this indicates a corruption
-      LOG.warn("Log file " + file + " has no valid header", e);
-      return new FSEditLogLoader.EditLogValidation(0,
-          HdfsServerConstants.INVALID_TXID, true);
-    }
-    
-    try {
-      return FSEditLogLoader.validateEditLog(in, maxTxIdToValidate);
-    } finally {
-      IOUtils.closeStream(in);
-    }
-  }
-
-  static FSEditLogLoader.EditLogValidation scanEditLog(File file)
+  static FSEditLogLoader.EditLogValidation scanEditLog(File file,
+      long maxTxIdToScan, boolean verifyVersion)
       throws IOException {
     EditLogFileInputStream in;
     try {
       in = new EditLogFileInputStream(file);
       // read the header, initialize the inputstream, but do not check the
       // layoutversion
-      in.getVersion(false);
+      in.getVersion(verifyVersion);
     } catch (LogHeaderCorruptException e) {
       LOG.warn("Log file " + file + " has no valid header", e);
       return new FSEditLogLoader.EditLogValidation(0,
           HdfsServerConstants.INVALID_TXID, true);
     }
 
-    long lastPos = 0;
-    long lastTxId = HdfsServerConstants.INVALID_TXID;
-    long numValid = 0;
     try {
-      while (true) {
-        long txid = HdfsServerConstants.INVALID_TXID;
-        lastPos = in.getPosition();
-        try {
-          if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) {
-            break;
-          }
-        } catch (Throwable t) {
-          FSImage.LOG.warn("Caught exception after scanning through "
-              + numValid + " ops from " + in
-              + " while determining its valid length. Position was "
-              + lastPos, t);
-          in.resync();
-          FSImage.LOG.warn("After resync, position is " + in.getPosition());
-          continue;
-        }
-        if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) {
-          lastTxId = txid;
-        }
-        numValid++;
-      }
-      return new EditLogValidation(lastPos, lastTxId, false);
+      return FSEditLogLoader.scanEditLog(in, maxTxIdToScan);
     } finally {
       IOUtils.closeStream(in);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index bb36ca2..c2cccb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -1110,70 +1110,41 @@ public class FSEditLogLoader {
   /**
    * Find the last valid transaction ID in the stream.
    * If there are invalid or corrupt transactions in the middle of the stream,
-   * validateEditLog will skip over them.
+   * scanEditLog will skip over them.
    * This reads through the stream but does not close it.
    *
-   * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
-   *                          returns after reading this or a higher ID.
-   *                          The file portion beyond this ID is potentially
-   *                          being updated.
+   * @param maxTxIdToScan Maximum Tx ID to try to scan.
+   *                      The scan returns after reading this or a higher ID.
+   *                      The file portion beyond this ID is potentially being
+   *                      updated.
    */
-  static EditLogValidation validateEditLog(EditLogInputStream in,
-      long maxTxIdToValidate) {
-    long lastPos = 0;
+  static EditLogValidation scanEditLog(EditLogInputStream in,
+      long maxTxIdToScan) {
+    long lastPos;
     long lastTxId = HdfsServerConstants.INVALID_TXID;
     long numValid = 0;
-    FSEditLogOp op = null;
     while (true) {
+      long txid;
       lastPos = in.getPosition();
       try {
-        if ((op = in.readOp()) == null) {
+        if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) {
           break;
         }
       } catch (Throwable t) {
-        FSImage.LOG.warn("Caught exception after reading " + numValid +
-            " ops from " + in + " while determining its valid length." +
-            "Position was " + lastPos, t);
+        FSImage.LOG.warn("Caught exception after scanning through "
+            + numValid + " ops from " + in
+            + " while determining its valid length. Position was "
+            + lastPos, t);
         in.resync();
         FSImage.LOG.warn("After resync, position is " + in.getPosition());
         continue;
       }
-      if (lastTxId == HdfsServerConstants.INVALID_TXID
-          || op.getTransactionId() > lastTxId) {
-        lastTxId = op.getTransactionId();
+      if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) {
+        lastTxId = txid;
       }
-      if (lastTxId >= maxTxIdToValidate) {
+      if (lastTxId >= maxTxIdToScan) {
         break;
       }
-
-      numValid++;
-    }
-    return new EditLogValidation(lastPos, lastTxId, false);
-  }
-
-  static EditLogValidation scanEditLog(EditLogInputStream in) {
-    long lastPos = 0;
-    long lastTxId = HdfsServerConstants.INVALID_TXID;
-    long numValid = 0;
-    FSEditLogOp op = null;
-    while (true) {
-      lastPos = in.getPosition();
-      try {
-        if ((op = in.readOp()) == null) { // TODO
-          break;
-        }
-      } catch (Throwable t) {
-        FSImage.LOG.warn("Caught exception after reading " + numValid +
-            " ops from " + in + " while determining its valid length." +
-            "Position was " + lastPos, t);
-        in.resync();
-        FSImage.LOG.warn("After resync, position is " + in.getPosition());
-        continue;
-      }
-      if (lastTxId == HdfsServerConstants.INVALID_TXID
-          || op.getTransactionId() > lastTxId) {
-        lastTxId = op.getTransactionId();
-      }
       numValid++;
     }
     return new EditLogValidation(lastPos, lastTxId, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index a1488eb..ff6376e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -211,7 +211,7 @@ public class FileJournalManager implements JournalManager {
       }
       if (elf.isInProgress()) {
         try {
-          elf.validateLog(getLastReadableTxId());
+          elf.scanLog(getLastReadableTxId(), true);
         } catch (IOException e) {
           LOG.error("got IOException while trying to validate header of " +
               elf + ".  Skipping.", e);
@@ -348,8 +348,8 @@ public class FileJournalManager implements JournalManager {
   }
   
   static void addStreamsToCollectionFromFiles(Collection<EditLogFile> elfs,
-      Collection<EditLogInputStream> streams, long fromTxId, long maxTxIdToValidate,
-      boolean inProgressOk) {
+      Collection<EditLogInputStream> streams, long fromTxId,
+      long maxTxIdToScan, boolean inProgressOk) {
     for (EditLogFile elf : elfs) {
       if (elf.isInProgress()) {
         if (!inProgressOk) {
@@ -360,7 +360,7 @@ public class FileJournalManager implements JournalManager {
           continue;
         }
         try {
-          elf.validateLog(maxTxIdToValidate);
+          elf.scanLog(maxTxIdToScan, true);
         } catch (IOException e) {
           LOG.error("got IOException while trying to validate header of " +
               elf + ".  Skipping.", e);
@@ -404,7 +404,7 @@ public class FileJournalManager implements JournalManager {
           continue;
         }
 
-        elf.validateLog(getLastReadableTxId());
+        elf.scanLog(getLastReadableTxId(), true);
 
         if (elf.hasCorruptHeader()) {
           elf.moveAsideCorruptFile();
@@ -536,20 +536,16 @@ public class FileJournalManager implements JournalManager {
      * Find out where the edit log ends.
      * This will update the lastTxId of the EditLogFile or
      * mark it as corrupt if it is.
-     * @param maxTxIdToValidate Maximum Tx ID to try to validate. Validation
-     *                          returns after reading this or a higher ID.
-     *                          The file portion beyond this ID is potentially
-     *                          being updated.
+     * @param maxTxIdToScan Maximum Tx ID to try to scan.
+     *                      The scan returns after reading this or a higher ID.
+     *                      The file portion beyond this ID is potentially being
+     *                      updated.
+     * @param verifyVersion Whether the scan should verify the layout version
      */
-    public void validateLog(long maxTxIdToValidate) throws IOException {
-      EditLogValidation val = EditLogFileInputStream.validateEditLog(file,
-          maxTxIdToValidate);
-      this.lastTxId = val.getEndTxId();
-      this.hasCorruptHeader = val.hasCorruptHeader();
-    }
-
-    public void scanLog() throws IOException {
-      EditLogValidation val = EditLogFileInputStream.scanEditLog(file);
+    public void scanLog(long maxTxIdToScan, boolean verifyVersion)
+        throws IOException {
+      EditLogValidation val = EditLogFileInputStream.scanEditLog(file,
+          maxTxIdToScan, verifyVersion);
       this.lastTxId = val.getEndTxId();
       this.hasCorruptHeader = val.hasCorruptHeader();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
index d5e64ae..cff4e1f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
@@ -88,7 +88,7 @@ public class TestCheckPointForSecurityTokens {
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
-        log.validateLog(Long.MAX_VALUE);
+        log.scanLog(Long.MAX_VALUE, true);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should have 5 transactions",
                      5, numTransactions);;
@@ -105,7 +105,7 @@ public class TestCheckPointForSecurityTokens {
       for (StorageDirectory sd : nn.getFSImage().getStorage().dirIterable(null)) {
         EditLogFile log = FSImageTestUtil.findLatestEditsLog(sd);
         assertTrue(log.isInProgress());
-        log.validateLog(Long.MAX_VALUE);
+        log.scanLog(Long.MAX_VALUE, true);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should only have START txn",
             1, numTransactions);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 0495860..7bb39a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -1229,7 +1229,7 @@ public class TestEditLog {
 
     for (EditLogInputStream edits : editStreams) {
       FSEditLogLoader.EditLogValidation val =
-          FSEditLogLoader.validateEditLog(edits, Long.MAX_VALUE);
+          FSEditLogLoader.scanEditLog(edits, Long.MAX_VALUE);
       long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
       LOG.info("Loading edits " + edits + " read " + read);
       assertEquals(startTxId, edits.getFirstTxId());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53bad4eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 3c3423a..47a60b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -319,7 +319,7 @@ public class TestFSEditLogLoader {
       rwf.close();
     }
     EditLogValidation validation =
-        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
+        EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true);
     assertTrue(validation.hasCorruptHeader());
   }
 
@@ -334,7 +334,7 @@ public class TestFSEditLogLoader {
     File logFileBak = new File(testDir, logFile.getName() + ".bak");
     Files.copy(logFile, logFileBak);
     EditLogValidation validation =
-        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
+        EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true);
     assertTrue(!validation.hasCorruptHeader());
     // We expect that there will be an OP_START_LOG_SEGMENT, followed by
     // NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT.
@@ -347,8 +347,8 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       corruptByteInFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile,
-          Long.MAX_VALUE);
+      validation = EditLogFileInputStream.scanEditLog(logFile,
+          Long.MAX_VALUE, true);
       long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
           NUM_TXNS : (NUM_TXNS + 1);
       assertEquals("Failed when corrupting txn opcode at " + txOffset,
@@ -365,8 +365,8 @@ public class TestFSEditLogLoader {
       // Restore backup, corrupt the txn opcode
       Files.copy(logFileBak, logFile);
       truncateFile(logFile, txOffset);
-      validation = EditLogFileInputStream.validateEditLog(logFile,
-          Long.MAX_VALUE);
+      validation = EditLogFileInputStream.scanEditLog(logFile,
+          Long.MAX_VALUE, true);
       long expectedEndTxId = (txId == 0) ?
           HdfsServerConstants.INVALID_TXID : (txId - 1);
       assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
@@ -384,7 +384,7 @@ public class TestFSEditLogLoader {
     // layout flags section.
     truncateFile(logFile, 8);
     EditLogValidation validation =
-        EditLogFileInputStream.validateEditLog(logFile, Long.MAX_VALUE);
+        EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true);
     assertTrue(!validation.hasCorruptHeader());
     assertEquals(HdfsServerConstants.INVALID_TXID, validation.getEndTxId());
   }


[15/50] [abbrv] hadoop git commit: HDFS-6763. Initialize file system-wide quota once on transitioning to active. Contributed by Kihwal Lee

Posted by ec...@apache.org.
HDFS-6763. Initialize file system-wide quota once on transitioning to active. Contributed by Kihwal Lee


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a40342b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a40342b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a40342b0

Branch: refs/heads/HADOOP-11890
Commit: a40342b0dab1f9137ae4b3679a5aca7f2a57d23d
Parents: 7b5b2c5
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Sep 10 09:16:29 2015 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Thu Sep 10 09:16:29 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hdfs/server/namenode/BackupImage.java       |   8 +-
 .../hdfs/server/namenode/FSDirectory.java       | 128 +++++++++++++++++++
 .../hadoop/hdfs/server/namenode/FSImage.java    | 126 ------------------
 .../hdfs/server/namenode/FSNamesystem.java      |   2 +
 .../namenode/TestDiskspaceQuotaUpdate.java      |   9 +-
 .../namenode/TestFSImageWithSnapshot.java       |   3 +-
 7 files changed, 138 insertions(+), 141 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 445c50f..e241460 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -914,6 +914,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8974. Convert docs in xdoc format to markdown.
     (Masatake Iwasaki via aajisaka)
 
+    HDFS-6763. Initialize file system-wide quota once on transitioning to active
+    (kihwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index 8aee0bb..c6ae0d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -24,7 +24,6 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@@ -94,9 +93,6 @@ public class BackupImage extends FSImage {
     super(conf);
     storage.setDisablePreUpgradableLayoutCheck(true);
     bnState = BNState.DROP_UNTIL_NEXT_ROLL;
-    quotaInitThreads = conf.getInt(
-        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
   }
 
   synchronized FSNamesystem getNamesystem() {
@@ -222,9 +218,7 @@ public class BackupImage extends FSImage {
       }
       lastAppliedTxId = logLoader.getLastAppliedTxId();
 
-      FSImage.updateCountForQuota(
-          getNamesystem().dir.getBlockStoragePolicySuite(),
-          getNamesystem().dir.rootDir, quotaInitThreads);
+      getNamesystem().dir.updateCountForQuota();
     } finally {
       backupInputStream.clear();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 8c74e48..e25e0e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -57,8 +57,10 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo.UpdatedReplicationInfo;
 import org.apache.hadoop.hdfs.util.ByteArray;
 import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Time;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -68,6 +70,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.RecursiveAction;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
@@ -138,6 +142,7 @@ public class FSDirectory implements Closeable {
   private final long contentSleepMicroSec;
   private final INodeMap inodeMap; // Synchronized by dirLock
   private long yieldCount = 0; // keep track of lock yield count.
+  private int quotaInitThreads;
 
   private final int inodeXAttrsLimit; //inode xattrs max limit
 
@@ -312,6 +317,10 @@ public class FSDirectory implements Closeable {
     namesystem = ns;
     this.editLog = ns.getEditLog();
     ezManager = new EncryptionZoneManager(this, conf);
+
+    this.quotaInitThreads = conf.getInt(
+        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
+        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
   }
     
   FSNamesystem getFSNamesystem() {
@@ -503,6 +512,125 @@ public class FSDirectory implements Closeable {
     }
   }
 
+  /**
+   * Update the count of each directory with quota in the namespace.
+   * A directory's count is defined as the total number inodes in the tree
+   * rooted at the directory.
+   *
+   * This is an update of existing state of the filesystem and does not
+   * throw QuotaExceededException.
+   */
+  void updateCountForQuota(int initThreads) {
+    writeLock();
+    try {
+      int threads = (initThreads < 1) ? 1 : initThreads;
+      LOG.info("Initializing quota with " + threads + " thread(s)");
+      long start = Time.now();
+      QuotaCounts counts = new QuotaCounts.Builder().build();
+      ForkJoinPool p = new ForkJoinPool(threads);
+      RecursiveAction task = new InitQuotaTask(getBlockStoragePolicySuite(),
+          rootDir.getStoragePolicyID(), rootDir, counts);
+      p.execute(task);
+      task.join();
+      p.shutdown();
+      LOG.info("Quota initialization completed in " + (Time.now() - start) +
+          " milliseconds\n" + counts);
+    } finally {
+      writeUnlock();
+    }
+  }
+
+  void updateCountForQuota() {
+    updateCountForQuota(quotaInitThreads);
+  }
+
+  /**
+   * parallel initialization using fork-join.
+   */
+  private static class InitQuotaTask extends RecursiveAction {
+    private final INodeDirectory dir;
+    private final QuotaCounts counts;
+    private final BlockStoragePolicySuite bsps;
+    private final byte blockStoragePolicyId;
+
+    public InitQuotaTask(BlockStoragePolicySuite bsps,
+        byte blockStoragePolicyId, INodeDirectory dir, QuotaCounts counts) {
+      this.dir = dir;
+      this.counts = counts;
+      this.bsps = bsps;
+      this.blockStoragePolicyId = blockStoragePolicyId;
+    }
+
+    public void compute() {
+      QuotaCounts myCounts =  new QuotaCounts.Builder().build();
+      dir.computeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId,
+          myCounts);
+
+      ReadOnlyList<INode> children =
+          dir.getChildrenList(CURRENT_STATE_ID);
+
+      if (children.size() > 0) {
+        List<InitQuotaTask> subtasks = new ArrayList<InitQuotaTask>();
+        for (INode child : children) {
+          final byte childPolicyId =
+              child.getStoragePolicyIDForQuota(blockStoragePolicyId);
+          if (child.isDirectory()) {
+            subtasks.add(new InitQuotaTask(bsps, childPolicyId,
+                child.asDirectory(), myCounts));
+          } else {
+            // file or symlink. count using the local counts variable
+            myCounts.add(child.computeQuotaUsage(bsps, childPolicyId, false,
+                CURRENT_STATE_ID));
+          }
+        }
+        // invoke and wait for completion
+        invokeAll(subtasks);
+      }
+
+      if (dir.isQuotaSet()) {
+        // check if quota is violated. It indicates a software bug.
+        final QuotaCounts q = dir.getQuotaCounts();
+
+        final long nsConsumed = myCounts.getNameSpace();
+        final long nsQuota = q.getNameSpace();
+        if (Quota.isViolated(nsQuota, nsConsumed)) {
+          LOG.warn("Namespace quota violation in image for "
+              + dir.getFullPathName()
+              + " quota = " + nsQuota + " < consumed = " + nsConsumed);
+        }
+
+        final long ssConsumed = myCounts.getStorageSpace();
+        final long ssQuota = q.getStorageSpace();
+        if (Quota.isViolated(ssQuota, ssConsumed)) {
+          LOG.warn("Storagespace quota violation in image for "
+              + dir.getFullPathName()
+              + " quota = " + ssQuota + " < consumed = " + ssConsumed);
+        }
+
+        final EnumCounters<StorageType> tsConsumed = myCounts.getTypeSpaces();
+        for (StorageType t : StorageType.getTypesSupportingQuota()) {
+          final long typeSpace = tsConsumed.get(t);
+          final long typeQuota = q.getTypeSpaces().get(t);
+          if (Quota.isViolated(typeQuota, typeSpace)) {
+            LOG.warn("Storage type quota violation in image for "
+                + dir.getFullPathName()
+                + " type = " + t.toString() + " quota = "
+                + typeQuota + " < consumed " + typeSpace);
+          }
+        }
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Setting quota for " + dir + "\n" + myCounts);
+        }
+        dir.getDirectoryWithQuotaFeature().setSpaceConsumed(nsConsumed,
+            ssConsumed, tsConsumed);
+      }
+
+      synchronized(counts) {
+        counts.add(myCounts);
+      }
+    }
+  }
+
   /** Updates namespace, storagespace and typespaces consumed for all
    * directories until the parent directory of file represented by path.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 0dd007d..93dc097 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -27,8 +27,6 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.RecursiveAction;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -42,12 +40,10 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
@@ -61,7 +57,6 @@ import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
@@ -70,9 +65,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.util.Canceler;
-import org.apache.hadoop.hdfs.util.EnumCounters;
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
-import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.util.Time;
 
@@ -147,12 +140,7 @@ public class FSImage implements Closeable {
       storage.setRestoreFailedStorage(true);
     }
 
-    this.quotaInitThreads = conf.getInt(
-        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY,
-        DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
-
     this.editLog = new FSEditLog(conf, storage, editsDirs);
-    
     archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
   }
  
@@ -853,126 +841,12 @@ public class FSImage implements Closeable {
       }
     } finally {
       FSEditLog.closeAllStreams(editStreams);
-      // update the counts
-      updateCountForQuota(target.getBlockManager().getStoragePolicySuite(),
-          target.dir.rootDir, quotaInitThreads);
     }
     prog.endPhase(Phase.LOADING_EDITS);
     return lastAppliedTxId - prevLastAppliedTxId;
   }
 
   /**
-   * Update the count of each directory with quota in the namespace.
-   * A directory's count is defined as the total number inodes in the tree
-   * rooted at the directory.
-   * 
-   * This is an update of existing state of the filesystem and does not
-   * throw QuotaExceededException.
-   */
-  static void updateCountForQuota(BlockStoragePolicySuite bsps,
-      INodeDirectory root, int threads) {
-    threads = (threads < 1) ? 1 : threads;
-    LOG.info("Initializing quota with " + threads + " thread(s)");
-    long start = Time.now();
-    QuotaCounts counts = new QuotaCounts.Builder().build();
-    ForkJoinPool p = new ForkJoinPool(threads);
-    RecursiveAction task = new InitQuotaTask(bsps, root.getStoragePolicyID(),
-        root, counts);
-    p.execute(task);
-    task.join();
-    p.shutdown();
-    LOG.info("Quota initialization completed in " + (Time.now() - start) +
-        " milliseconds\n" + counts);
-  }
-
-  /**
-   * parallel initialization using fork-join.
-   */
-  private static class InitQuotaTask extends RecursiveAction {
-    private final INodeDirectory dir;
-    private final QuotaCounts counts;
-    private final BlockStoragePolicySuite bsps;
-    private final byte blockStoragePolicyId;
-
-    public InitQuotaTask(BlockStoragePolicySuite bsps,
-        byte blockStoragePolicyId, INodeDirectory dir, QuotaCounts counts) {
-      this.dir = dir;
-      this.counts = counts;
-      this.bsps = bsps;
-      this.blockStoragePolicyId = blockStoragePolicyId;
-    }
-
-    public void compute() {
-      QuotaCounts myCounts =  new QuotaCounts.Builder().build();
-      dir.computeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId,
-          myCounts);
-
-      ReadOnlyList<INode> children =
-          dir.getChildrenList(Snapshot.CURRENT_STATE_ID);
-
-      if (children.size() > 0) {
-        List<InitQuotaTask> subtasks = new ArrayList<InitQuotaTask>();
-        for (INode child : children) {
-          final byte childPolicyId =
-              child.getStoragePolicyIDForQuota(blockStoragePolicyId);
-          if (child.isDirectory()) {
-            subtasks.add(new InitQuotaTask(bsps, childPolicyId,
-                child.asDirectory(), myCounts));
-          } else {
-            // file or symlink. count using the local counts variable
-            myCounts.add(child.computeQuotaUsage(bsps, childPolicyId, false,
-                Snapshot.CURRENT_STATE_ID));
-          }
-        }
-        // invoke and wait for completion
-        invokeAll(subtasks);
-      }
-
-      if (dir.isQuotaSet()) {
-        // check if quota is violated. It indicates a software bug.
-        final QuotaCounts q = dir.getQuotaCounts();
-
-        final long nsConsumed = myCounts.getNameSpace();
-        final long nsQuota = q.getNameSpace();
-        if (Quota.isViolated(nsQuota, nsConsumed)) {
-          LOG.warn("Namespace quota violation in image for "
-              + dir.getFullPathName()
-              + " quota = " + nsQuota + " < consumed = " + nsConsumed);
-        }
-
-        final long ssConsumed = myCounts.getStorageSpace();
-        final long ssQuota = q.getStorageSpace();
-        if (Quota.isViolated(ssQuota, ssConsumed)) {
-          LOG.warn("Storagespace quota violation in image for "
-              + dir.getFullPathName()
-              + " quota = " + ssQuota + " < consumed = " + ssConsumed);
-        }
-
-        final EnumCounters<StorageType> tsConsumed = myCounts.getTypeSpaces();
-        for (StorageType t : StorageType.getTypesSupportingQuota()) {
-          final long typeSpace = tsConsumed.get(t);
-          final long typeQuota = q.getTypeSpaces().get(t);
-          if (Quota.isViolated(typeQuota, typeSpace)) {
-            LOG.warn("Storage type quota violation in image for "
-                + dir.getFullPathName()
-                + " type = " + t.toString() + " quota = "
-                + typeQuota + " < consumed " + typeSpace);
-          }
-        }
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Setting quota for " + dir + "\n" + myCounts);
-        }
-        dir.getDirectoryWithQuotaFeature().setSpaceConsumed(nsConsumed,
-            ssConsumed, tsConsumed);
-      }
-
-      synchronized(counts) {
-        counts.add(myCounts);
-      }
-    }
-  }
-
-  /**
    * Load the image namespace from the given image file, verifying
    * it against the MD5 sum stored in its associated .md5 file.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7d766b9..328c29d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -1117,6 +1117,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         getFSImage().editLog.openForWrite(getEffectiveLayoutVersion());
       }
 
+      // Initialize the quota.
+      dir.updateCountForQuota();
       // Enable quota checks.
       dir.enableQuotaChecks();
       if (haEnabled) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 0765a22..cf64638 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -339,16 +339,13 @@ public class TestDiskspaceQuotaUpdate {
     HashMap<String, Long> dsMap = new HashMap<String, Long>();
     scanDirsWithQuota(root, nsMap, dsMap, false);
 
-    FSImage.updateCountForQuota(
-        fsdir.getBlockManager().getStoragePolicySuite(), root, 1);
+    fsdir.updateCountForQuota(1);
     scanDirsWithQuota(root, nsMap, dsMap, true);
 
-    FSImage.updateCountForQuota(
-        fsdir.getBlockManager().getStoragePolicySuite(), root, 2);
+    fsdir.updateCountForQuota(2);
     scanDirsWithQuota(root, nsMap, dsMap, true);
 
-    FSImage.updateCountForQuota(
-        fsdir.getBlockManager().getStoragePolicySuite(), root, 4);
+    fsdir.updateCountForQuota(4);
     scanDirsWithQuota(root, nsMap, dsMap, true);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a40342b0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index 61b7f7c..1ff18a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -158,8 +158,7 @@ public class TestFSImageWithSnapshot {
     fsn.getFSDirectory().writeLock();
     try {
       loader.load(imageFile, false);
-      FSImage.updateCountForQuota(fsn.getBlockManager().getStoragePolicySuite(),
-          INodeDirectory.valueOf(fsn.getFSDirectory().getINode("/"), "/"), 4);
+      fsn.getFSDirectory().updateCountForQuota();
     } finally {
       fsn.getFSDirectory().writeUnlock();
       fsn.writeUnlock();


[10/50] [abbrv] hadoop git commit: MAPREDUCE-6415. Create a tool to combine aggregated logs into HAR files. (Robert Kanter via kasha)

Posted by ec...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
new file mode 100644
index 0000000..c8ff201
--- /dev/null
+++ b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
@@ -0,0 +1,293 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
+import org.apache.hadoop.yarn.api.records.Priority;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Random;
+
+public class TestHadoopArchiveLogs {
+
+  private static final long CLUSTER_TIMESTAMP = System.currentTimeMillis();
+  private static final int FILE_SIZE_INCREMENT = 4096;
+  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
+  static {
+    new Random().nextBytes(DUMMY_DATA);
+  }
+
+  @Test(timeout = 10000)
+  public void testCheckFiles() throws Exception {
+    Configuration conf = new Configuration();
+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
+    FileSystem fs = FileSystem.getLocal(conf);
+    Path rootLogDir = new Path("target", "logs");
+    String suffix = "logs";
+    Path logDir = new Path(rootLogDir,
+        new Path(System.getProperty("user.name"), suffix));
+    fs.mkdirs(logDir);
+
+    Assert.assertEquals(0, hal.eligibleApplications.size());
+    ApplicationReport app1 = createAppReport(1);  // no files found
+    ApplicationReport app2 = createAppReport(2);  // too few files
+    Path app2Path = new Path(logDir, app2.getApplicationId().toString());
+    fs.mkdirs(app2Path);
+    createFile(fs, new Path(app2Path, "file1"), 1);
+    hal.minNumLogFiles = 2;
+    ApplicationReport app3 = createAppReport(3);  // too large
+    Path app3Path = new Path(logDir, app3.getApplicationId().toString());
+    fs.mkdirs(app3Path);
+    createFile(fs, new Path(app3Path, "file1"), 2);
+    createFile(fs, new Path(app3Path, "file2"), 5);
+    hal.maxTotalLogsSize = FILE_SIZE_INCREMENT * 6;
+    ApplicationReport app4 = createAppReport(4);  // has har already
+    Path app4Path = new Path(logDir, app4.getApplicationId().toString());
+    fs.mkdirs(app4Path);
+    createFile(fs, new Path(app4Path, app4.getApplicationId() + ".har"), 1);
+    ApplicationReport app5 = createAppReport(5);  // just right
+    Path app5Path = new Path(logDir, app5.getApplicationId().toString());
+    fs.mkdirs(app5Path);
+    createFile(fs, new Path(app5Path, "file1"), 2);
+    createFile(fs, new Path(app5Path, "file2"), 3);
+    hal.eligibleApplications.add(app1);
+    hal.eligibleApplications.add(app2);
+    hal.eligibleApplications.add(app3);
+    hal.eligibleApplications.add(app4);
+    hal.eligibleApplications.add(app5);
+
+    hal.checkFiles(fs, rootLogDir, suffix);
+    Assert.assertEquals(1, hal.eligibleApplications.size());
+    Assert.assertEquals(app5, hal.eligibleApplications.iterator().next());
+  }
+
+  @Test(timeout = 10000)
+  public void testCheckMaxEligible() throws Exception {
+    Configuration conf = new Configuration();
+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
+    ApplicationReport app1 = createAppReport(1);
+    app1.setFinishTime(CLUSTER_TIMESTAMP - 5);
+    ApplicationReport app2 = createAppReport(2);
+    app2.setFinishTime(CLUSTER_TIMESTAMP - 10);
+    ApplicationReport app3 = createAppReport(3);
+    app3.setFinishTime(CLUSTER_TIMESTAMP + 5);
+    ApplicationReport app4 = createAppReport(4);
+    app4.setFinishTime(CLUSTER_TIMESTAMP + 10);
+    ApplicationReport app5 = createAppReport(5);
+    app5.setFinishTime(CLUSTER_TIMESTAMP);
+    Assert.assertEquals(0, hal.eligibleApplications.size());
+    hal.eligibleApplications.add(app1);
+    hal.eligibleApplications.add(app2);
+    hal.eligibleApplications.add(app3);
+    hal.eligibleApplications.add(app4);
+    hal.eligibleApplications.add(app5);
+    hal.maxEligible = -1;
+    hal.checkMaxEligible();
+    Assert.assertEquals(5, hal.eligibleApplications.size());
+
+    hal.maxEligible = 4;
+    hal.checkMaxEligible();
+    Assert.assertEquals(4, hal.eligibleApplications.size());
+    Assert.assertFalse(hal.eligibleApplications.contains(app4));
+
+    hal.maxEligible = 3;
+    hal.checkMaxEligible();
+    Assert.assertEquals(3, hal.eligibleApplications.size());
+    Assert.assertFalse(hal.eligibleApplications.contains(app3));
+
+    hal.maxEligible = 2;
+    hal.checkMaxEligible();
+    Assert.assertEquals(2, hal.eligibleApplications.size());
+    Assert.assertFalse(hal.eligibleApplications.contains(app5));
+
+    hal.maxEligible = 1;
+    hal.checkMaxEligible();
+    Assert.assertEquals(1, hal.eligibleApplications.size());
+    Assert.assertFalse(hal.eligibleApplications.contains(app1));
+  }
+
+  @Test(timeout = 10000)
+  public void testFindAggregatedApps() throws Exception {
+    MiniYARNCluster yarnCluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+      yarnCluster =
+          new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1,
+              1, 1, 1);
+      yarnCluster.init(conf);
+      yarnCluster.start();
+      conf = yarnCluster.getConfig();
+
+      RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
+      RMAppImpl app1 = (RMAppImpl)createRMApp(1, conf, rmContext,
+          LogAggregationStatus.DISABLED);
+      RMAppImpl app2 = (RMAppImpl)createRMApp(2, conf, rmContext,
+          LogAggregationStatus.FAILED);
+      RMAppImpl app3 = (RMAppImpl)createRMApp(3, conf, rmContext,
+          LogAggregationStatus.NOT_START);
+      RMAppImpl app4 = (RMAppImpl)createRMApp(4, conf, rmContext,
+          LogAggregationStatus.SUCCEEDED);
+      RMAppImpl app5 = (RMAppImpl)createRMApp(5, conf, rmContext,
+          LogAggregationStatus.RUNNING);
+      RMAppImpl app6 = (RMAppImpl)createRMApp(6, conf, rmContext,
+          LogAggregationStatus.RUNNING_WITH_FAILURE);
+      RMAppImpl app7 = (RMAppImpl)createRMApp(7, conf, rmContext,
+          LogAggregationStatus.TIME_OUT);
+      rmContext.getRMApps().put(app1.getApplicationId(), app1);
+      rmContext.getRMApps().put(app2.getApplicationId(), app2);
+      rmContext.getRMApps().put(app3.getApplicationId(), app3);
+      rmContext.getRMApps().put(app4.getApplicationId(), app4);
+      rmContext.getRMApps().put(app5.getApplicationId(), app5);
+      rmContext.getRMApps().put(app6.getApplicationId(), app6);
+      rmContext.getRMApps().put(app7.getApplicationId(), app7);
+
+      HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
+      Assert.assertEquals(0, hal.eligibleApplications.size());
+      hal.findAggregatedApps();
+      Assert.assertEquals(2, hal.eligibleApplications.size());
+    } finally {
+      if (yarnCluster != null) {
+        yarnCluster.stop();
+      }
+    }
+  }
+
+  @Test(timeout = 10000)
+  public void testGenerateScript() throws Exception {
+    Configuration conf = new Configuration();
+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
+    ApplicationReport app1 = createAppReport(1);
+    ApplicationReport app2 = createAppReport(2);
+    hal.eligibleApplications.add(app1);
+    hal.eligibleApplications.add(app2);
+
+    File localScript = new File("target", "script.sh");
+    Path workingDir = new Path("/tmp", "working");
+    Path remoteRootLogDir = new Path("/tmp", "logs");
+    String suffix = "logs";
+    localScript.delete();
+    Assert.assertFalse(localScript.exists());
+    hal.generateScript(localScript, workingDir, remoteRootLogDir, suffix);
+    Assert.assertTrue(localScript.exists());
+    String script = IOUtils.toString(localScript.toURI());
+    String[] lines = script.split(System.lineSeparator());
+    Assert.assertEquals(16, lines.length);
+    Assert.assertEquals("#!/bin/bash", lines[0]);
+    Assert.assertEquals("set -e", lines[1]);
+    Assert.assertEquals("set -x", lines[2]);
+    Assert.assertEquals("if [ \"$YARN_SHELL_ID\" == \"1\" ]; then", lines[3]);
+    if (lines[4].contains(app1.getApplicationId().toString())) {
+      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
+          + "\"", lines[4]);
+      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
+          + "\"", lines[7]);
+    } else {
+      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
+          + "\"", lines[4]);
+      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
+          + "\"", lines[7]);
+    }
+    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
+        lines[5]);
+    Assert.assertEquals("elif [ \"$YARN_SHELL_ID\" == \"2\" ]; then", lines[6]);
+    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
+        lines[8]);
+    Assert.assertEquals("else", lines[9]);
+    Assert.assertEquals("\techo \"Unknown Mapping!\"", lines[10]);
+    Assert.assertEquals("\texit 1", lines[11]);
+    Assert.assertEquals("fi", lines[12]);
+    Assert.assertEquals("export HADOOP_CLIENT_OPTS=\"-Xmx1024m\"", lines[13]);
+    Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
+    Assert.assertEquals("\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
+        "HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" -workingDir "
+        + workingDir.toString() + " -remoteRootLogDir " +
+        remoteRootLogDir.toString() + " -suffix " + suffix, lines[15]);
+  }
+
+  private static ApplicationReport createAppReport(int id) {
+    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
+    return ApplicationReport.newInstance(
+        appId,
+        ApplicationAttemptId.newInstance(appId, 1),
+        System.getProperty("user.name"),
+        null, null, null, 0, null, YarnApplicationState.FINISHED, null,
+        null, 0L, 0L, FinalApplicationStatus.SUCCEEDED, null, null, 100f,
+        null, null);
+  }
+
+  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
+      throws IOException {
+    FSDataOutputStream out = null;
+    try {
+      out = fs.create(p);
+      for (int i = 0 ; i < sizeMultiple; i++) {
+        out.write(DUMMY_DATA);
+      }
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+    }
+  }
+
+  private static RMApp createRMApp(int id, Configuration conf, RMContext rmContext,
+       final LogAggregationStatus aggStatus) {
+    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
+    ApplicationSubmissionContext submissionContext =
+        ApplicationSubmissionContext.newInstance(appId, "test", "default",
+            Priority.newInstance(0), null, false, true,
+            2, Resource.newInstance(10, 2), "test");
+    return new RMAppImpl(appId, rmContext, conf, "test",
+        System.getProperty("user.name"), "default", submissionContext,
+        rmContext.getScheduler(),
+        rmContext.getApplicationMasterService(),
+        System.currentTimeMillis(), "test",
+        null, null) {
+      @Override
+      public ApplicationReport createAndGetApplicationReport(
+          String clientUserName, boolean allowAccess) {
+        ApplicationReport report =
+            super.createAndGetApplicationReport(clientUserName, allowAccess);
+        report.setLogAggregationStatus(aggStatus);
+        return report;
+      }
+    };
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
new file mode 100644
index 0000000..af66f14
--- /dev/null
+++ b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.tools;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.HarFs;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.MiniYARNCluster;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.Random;
+
+import static org.junit.Assert.assertEquals;
+
+public class TestHadoopArchiveLogsRunner {
+
+  private static final int FILE_SIZE_INCREMENT = 4096;
+  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
+  static {
+    new Random().nextBytes(DUMMY_DATA);
+  }
+
+  @Test(timeout = 30000)
+  public void testHadoopArchiveLogs() throws Exception {
+    MiniYARNCluster yarnCluster = null;
+    MiniDFSCluster dfsCluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new YarnConfiguration();
+      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+      conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
+      yarnCluster =
+          new MiniYARNCluster(TestHadoopArchiveLogsRunner.class.getSimpleName(),
+              1, 2, 1, 1);
+      yarnCluster.init(conf);
+      yarnCluster.start();
+      conf = yarnCluster.getConfig();
+      dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+
+      ApplicationId app1 =
+          ApplicationId.newInstance(System.currentTimeMillis(), 1);
+      fs = FileSystem.get(conf);
+      Path remoteRootLogDir = new Path(conf.get(
+          YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
+          YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
+      Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
+      String suffix = "logs";
+      Path logDir = new Path(remoteRootLogDir,
+          new Path(System.getProperty("user.name"), suffix));
+      fs.mkdirs(logDir);
+      Path app1Path = new Path(logDir, app1.toString());
+      fs.mkdirs(app1Path);
+      createFile(fs, new Path(app1Path, "log1"), 3);
+      createFile(fs, new Path(app1Path, "log2"), 4);
+      createFile(fs, new Path(app1Path, "log3"), 2);
+      FileStatus[] app1Files = fs.listStatus(app1Path);
+      Assert.assertEquals(3, app1Files.length);
+
+      String[] args = new String[]{
+          "-appId", app1.toString(),
+          "-user", System.getProperty("user.name"),
+          "-workingDir", workingDir.toString(),
+          "-remoteRootLogDir", remoteRootLogDir.toString(),
+          "-suffix", suffix};
+      final HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(conf);
+      assertEquals(0, ToolRunner.run(halr, args));
+
+      fs = FileSystem.get(conf);
+      app1Files = fs.listStatus(app1Path);
+      Assert.assertEquals(1, app1Files.length);
+      FileStatus harFile = app1Files[0];
+      Assert.assertEquals(app1.toString() + ".har", harFile.getPath().getName());
+      Path harPath = new Path("har:///" + harFile.getPath().toUri().getRawPath());
+      FileStatus[] harLogs = HarFs.get(harPath.toUri(), conf).listStatus(harPath);
+      Assert.assertEquals(3, harLogs.length);
+      Arrays.sort(harLogs, new Comparator<FileStatus>() {
+        @Override
+        public int compare(FileStatus o1, FileStatus o2) {
+          return o1.getPath().getName().compareTo(o2.getPath().getName());
+        }
+      });
+      Assert.assertEquals("log1", harLogs[0].getPath().getName());
+      Assert.assertEquals(3 * FILE_SIZE_INCREMENT, harLogs[0].getLen());
+      Assert.assertEquals("log2", harLogs[1].getPath().getName());
+      Assert.assertEquals(4 * FILE_SIZE_INCREMENT, harLogs[1].getLen());
+      Assert.assertEquals("log3", harLogs[2].getPath().getName());
+      Assert.assertEquals(2 * FILE_SIZE_INCREMENT, harLogs[2].getLen());
+      Assert.assertEquals(0, fs.listStatus(workingDir).length);
+    } finally {
+      if (yarnCluster != null) {
+        yarnCluster.stop();
+      }
+      if (fs != null) {
+        fs.close();
+      }
+      if (dfsCluster != null) {
+        dfsCluster.shutdown();
+      }
+    }
+  }
+
+  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
+      throws IOException {
+    FSDataOutputStream out = null;
+    try {
+      out = fs.create(p);
+      for (int i = 0 ; i < sizeMultiple; i++) {
+        out.write(DUMMY_DATA);
+      }
+    } finally {
+      if (out != null) {
+        out.close();
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/hadoop-tools-dist/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml
index 540401d..e6c458f 100644
--- a/hadoop-tools/hadoop-tools-dist/pom.xml
+++ b/hadoop-tools/hadoop-tools-dist/pom.xml
@@ -52,6 +52,11 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-archive-logs</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-rumen</artifactId>
       <scope>compile</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/119cc75e/hadoop-tools/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml
index 5b35f46..0061bf0 100644
--- a/hadoop-tools/pom.xml
+++ b/hadoop-tools/pom.xml
@@ -34,6 +34,7 @@
     <module>hadoop-streaming</module>
     <module>hadoop-distcp</module>
     <module>hadoop-archives</module>
+    <module>hadoop-archive-logs</module>
     <module>hadoop-rumen</module>
     <module>hadoop-gridmix</module>
     <module>hadoop-datajoin</module>


[09/50] [abbrv] hadoop git commit: HDFS-8581. ContentSummary on / skips further counts on yielding lock (contributed by J.Andreina)

Posted by ec...@apache.org.
HDFS-8581. ContentSummary on / skips further counts on yielding lock (contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4014ce59
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4014ce59
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4014ce59

Branch: refs/heads/HADOOP-11890
Commit: 4014ce5990bff9b0ecb3d38a633d40eaf6cf07a7
Parents: 0f0e897
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Sep 10 00:08:19 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Thu Sep 10 00:08:19 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/INodeDirectory.java    |  2 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java  | 32 ++++++++++++++++++++
 3 files changed, 36 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4014ce59/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8edc389..bbb6066 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1326,6 +1326,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2.
     (Chris Nauroth via jghoman)
 
+    HDFS-8581. ContentSummary on / skips further counts on yielding lock
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4014ce59/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 5c33c02..21fe313 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -660,7 +660,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
         continue;
       }
       // The locks were released and reacquired. Check parent first.
-      if (getParent() == null) {
+      if (!isRoot() && getParent() == null) {
         // Stop further counting and return whatever we have so far.
         break;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4014ce59/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index e339049..00ff07f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -24,12 +24,14 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.ByteArrayOutputStream;
+import java.io.IOException;
 import java.io.OutputStream;
 import java.io.PrintStream;
 import java.security.PrivilegedExceptionAction;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
@@ -1007,4 +1009,34 @@ public class TestQuota {
     assertTrue(errOutput.contains(StorageType.getTypesSupportingQuota()
         .toString()));
   }
+
+   /**
+   * File count on root , should return total value of files in Filesystem
+   * when one folder contains files more than "dfs.content-summary.limit".
+   */
+  @Test
+  public void testHugeFileCount() throws IOException {
+    MiniDFSCluster cluster = null;
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.content-summary.limit", 4);
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      for (int i = 1; i <= 5; i++) {
+        FSDataOutputStream out =
+            dfs.create(new Path("/Folder1/" + "file" + i),(short)1);
+        out.close();
+      }
+      FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1);
+      out.close();
+      ContentSummary contentSummary = dfs.getContentSummary(new Path("/"));
+      assertEquals(6, contentSummary.getFileCount());
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+        cluster = null;
+      }
+    }
+  }
+
 }


[44/50] [abbrv] hadoop git commit: HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter is failing in trunk (Contributed by Surendra Singh Lilhore)

Posted by ec...@apache.org.
HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter is failing in trunk (Contributed by Surendra Singh Lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4405674
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4405674
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4405674

Branch: refs/heads/HADOOP-11890
Commit: a4405674919d14be89bc4da22db2f417b5ae6ac3
Parents: 5468baa
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Sep 15 17:19:59 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Tue Sep 15 17:30:46 2015 +0530

----------------------------------------------------------------------
 .../hadoop/metrics2/impl/MetricsConfig.java     |  3 +
 .../hadoop/metrics2/impl/MetricsSystemImpl.java | 24 +++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     | 19 +++++
 .../fsdataset/impl/LazyPersistTestCase.java     | 15 +---
 .../datanode/fsdataset/impl/TestLazyWriter.java |  1 +
 .../test/resources/hadoop-metrics2.properties   | 85 ++++++++++++++++++++
 7 files changed, 126 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
index cbe60b5..58d2aa3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
@@ -58,6 +58,9 @@ class MetricsConfig extends SubsetConfiguration {
   static final String PERIOD_KEY = "period";
   static final int PERIOD_DEFAULT = 10; // seconds
 
+  // For testing, this will have the priority.
+  static final String PERIOD_MILLIS_KEY = "periodMillis";
+
   static final String QUEUE_CAPACITY_KEY = "queue.capacity";
   static final int QUEUE_CAPACITY_DEFAULT = 1;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index 15914d6..513d6d7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -105,7 +105,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
   private boolean monitoring = false;
   private Timer timer;
-  private int period; // seconds
+  private long period; // milliseconds
   private long logicalTime; // number of timer invocations * period
   private ObjectName mbeanName;
   private boolean publishSelfMetrics = true;
@@ -262,7 +262,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
     checkNotNull(config, "config");
     MetricsConfig conf = sourceConfigs.get(name);
     MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc,
-        source, injectedTags, period * 1000L, conf != null ? conf
+        source, injectedTags, period, conf != null ? conf
             : config.subset(SOURCE_KEY));
     sources.put(name, sa);
     sa.start();
@@ -359,7 +359,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
       return;
     }
     logicalTime = 0;
-    long millis = period * 1000L;
+    long millis = period;
     timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
     timer.scheduleAtFixedRate(new TimerTask() {
           @Override
@@ -371,7 +371,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
             }
           }
         }, millis, millis);
-    LOG.info("Scheduled snapshot period at "+ period +" second(s).");
+    LOG.info("Scheduled snapshot period at "+ (period/1000) +" second(s).");
   }
 
   synchronized void onTimerEvent() {
@@ -485,12 +485,15 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
 
   private synchronized void configureSinks() {
     sinkConfigs = config.getInstanceConfigs(SINK_KEY);
-    int confPeriod = 0;
+    long confPeriodMillis = 0;
     for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
       MetricsConfig conf = entry.getValue();
       int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
-      confPeriod = confPeriod == 0 ? sinkPeriod
-                                   : ArithmeticUtils.gcd(confPeriod, sinkPeriod);
+      // Support configuring periodMillis for testing.
+      long sinkPeriodMillis =
+          conf.getLong(PERIOD_MILLIS_KEY, sinkPeriod * 1000);
+      confPeriodMillis = confPeriodMillis == 0 ? sinkPeriodMillis
+          : ArithmeticUtils.gcd(confPeriodMillis, sinkPeriodMillis);
       String clsName = conf.getClassName("");
       if (clsName == null) continue;  // sink can be registered later on
       String sinkName = entry.getKey();
@@ -503,8 +506,9 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
         LOG.warn("Error creating sink '"+ sinkName +"'", e);
       }
     }
-    period = confPeriod > 0 ? confPeriod
-                            : config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
+    long periodSec = config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
+    period = confPeriodMillis > 0 ? confPeriodMillis
+        : config.getLong(PERIOD_MILLIS_KEY, periodSec * 1000);
   }
 
   static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink,
@@ -550,7 +554,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   private void registerSystemSource() {
     MetricsConfig sysConf = sourceConfigs.get(MS_NAME);
     sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC,
-        MetricsAnnotations.makeSource(this), injectedTags, period * 1000L,
+        MetricsAnnotations.makeSource(this), injectedTags, period,
         sysConf == null ? config.subset(SOURCE_KEY) : sysConf);
     sysSource.start();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f0bc026..c49432d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1338,6 +1338,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9069. TestNameNodeMetricsLogger failing -port in use.
     (stevel)
 
+    HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter
+    is failing in trunk (Surendra Singh Lilhore via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index a12db13..3082544 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -68,6 +68,7 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang.UnhandledException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -133,6 +134,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.hdfs.tools.JMXGet;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.net.NetUtils;
@@ -1856,4 +1858,21 @@ public class DFSTestUtil {
     }
   }
 
+  public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue)
+      throws TimeoutException, InterruptedException {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          final int currentValue = Integer.parseInt(jmx.getValue(metricName));
+          LOG.info("Waiting for " + metricName +
+                       " to reach value " + expectedValue +
+                       ", current value = " + currentValue);
+          return currentValue == expectedValue;
+        } catch (Exception e) {
+          throw new UnhandledException("Test failed due to unexpected exception", e);
+        }
+      }
+    }, 1000, Integer.MAX_VALUE);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 273babb..6c49de5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -510,20 +510,7 @@ public abstract class LazyPersistTestCase {
 
   protected void waitForMetric(final String metricName, final int expectedValue)
       throws TimeoutException, InterruptedException {
-    GenericTestUtils.waitFor(new Supplier<Boolean>() {
-      @Override
-      public Boolean get() {
-        try {
-          final int currentValue = Integer.parseInt(jmx.getValue(metricName));
-          LOG.info("Waiting for " + metricName +
-                       " to reach value " + expectedValue +
-                       ", current value = " + currentValue);
-          return currentValue == expectedValue;
-        } catch (Exception e) {
-          throw new UnhandledException("Test failed due to unexpected exception", e);
-        }
-      }
-    }, 1000, Integer.MAX_VALUE);
+    DFSTestUtil.waitForMetric(jmx, metricName, expectedValue);
   }
 
   protected void triggerEviction(DataNode dn) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
index 6b16066..1680764 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
@@ -72,6 +72,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
     // for the previous one.
     Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
     makeTestFile(path2, BLOCK_SIZE, true);
+    waitForMetric("RamDiskBlocksEvicted", 1);
     verifyRamDiskJMXMetric("RamDiskBlocksEvicted", 1);
     verifyRamDiskJMXMetric("RamDiskBlocksEvictedWithoutRead", 1);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4405674/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties
new file mode 100644
index 0000000..abe0468
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-metrics2.properties
@@ -0,0 +1,85 @@
+#
+#   Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  See the NOTICE file distributed with
+#   this work for additional information regarding copyright ownership.
+#   The ASF licenses this file to You under the Apache License, Version 2.0
+#   (the "License"); you may not use this file except in compliance with
+#   the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# syntax: [prefix].[source|sink].[instance].[options]
+# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
+# default sampling period, in seconds
+*.period=10
+*.periodMillis=100
+
+# The namenode-metrics.out will contain metrics from all context
+#namenode.sink.file.filename=namenode-metrics.out
+# Specifying a special sampling period for namenode:
+#namenode.sink.*.period=8
+
+#datanode.sink.file.filename=datanode-metrics.out
+
+#resourcemanager.sink.file.filename=resourcemanager-metrics.out
+
+#nodemanager.sink.file.filename=nodemanager-metrics.out
+
+#mrappmaster.sink.file.filename=mrappmaster-metrics.out
+
+#jobhistoryserver.sink.file.filename=jobhistoryserver-metrics.out
+
+# the following example split metrics of different
+# context to different sinks (in this case files)
+#nodemanager.sink.file_jvm.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_jvm.context=jvm
+#nodemanager.sink.file_jvm.filename=nodemanager-jvm-metrics.out
+#nodemanager.sink.file_mapred.class=org.apache.hadoop.metrics2.sink.FileSink
+#nodemanager.sink.file_mapred.context=mapred
+#nodemanager.sink.file_mapred.filename=nodemanager-mapred-metrics.out
+
+#
+# Below are for sending metrics to Ganglia
+#
+# for Ganglia 3.0 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink30
+#
+# for Ganglia 3.1 support
+# *.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+
+# *.sink.ganglia.period=10
+
+# default for supportsparse is false
+# *.sink.ganglia.supportsparse=true
+
+#*.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+#*.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Tag values to use for the ganglia prefix. If not defined no tags are used.
+# If '*' all tags are used. If Specifying multiple tags separate them with
+# commas. Note that the last segment of the property name is the context name.
+#
+#*.sink.ganglia.tagsForPrefix.jvm=ProcesName
+#*.sink.ganglia.tagsForPrefix.dfs=
+#*.sink.ganglia.tagsForPrefix.rpc=
+#*.sink.ganglia.tagsForPrefix.mapred=
+
+#namenode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#datanode.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#resourcemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#nodemanager.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#mrappmaster.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649
+
+#jobhistoryserver.sink.ganglia.servers=yourgangliahost_1:8649,yourgangliahost_2:8649


[26/50] [abbrv] hadoop git commit: HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with wrong time unit parameter. (zxu via rkanter)

Posted by ec...@apache.org.
HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with wrong time unit parameter. (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9538af0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9538af0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9538af0e

Branch: refs/heads/HADOOP-11890
Commit: 9538af0e1a7428b8787afa8d5e0b82c1e04adca7
Parents: fba06a7
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Sep 11 15:20:17 2015 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Sep 11 15:20:17 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt        |  3 +++
 .../hadoop/metrics2/impl/MetricsSourceAdapter.java     | 12 ++++++++----
 .../apache/hadoop/metrics2/impl/MetricsSystemImpl.java |  6 +++---
 .../hadoop/metrics2/impl/TestMetricsSystemImpl.java    | 13 +++++++++++++
 4 files changed, 27 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538af0e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 6ea2484..37c1cc8 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1091,6 +1091,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12388. Fix components' version information in the web page
     'About the Cluster'. (Jun Gong via zxu)
 
+    HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with
+    wrong time unit parameter. (zxu via rkanter)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538af0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
index f3ddc91..ace874e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
@@ -61,7 +61,7 @@ class MetricsSourceAdapter implements DynamicMBean {
 
   private Iterable<MetricsRecordImpl> lastRecs;
   private long jmxCacheTS = 0;
-  private int jmxCacheTTL;
+  private long jmxCacheTTL;
   private MBeanInfo infoCache;
   private ObjectName mbeanName;
   private final boolean startMBeans;
@@ -69,7 +69,7 @@ class MetricsSourceAdapter implements DynamicMBean {
   MetricsSourceAdapter(String prefix, String name, String description,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
                        MetricsFilter recordFilter, MetricsFilter metricFilter,
-                       int jmxCacheTTL, boolean startMBeans) {
+                       long jmxCacheTTL, boolean startMBeans) {
     this.prefix = checkNotNull(prefix, "prefix");
     this.name = checkNotNull(name, "name");
     this.source = checkNotNull(source, "source");
@@ -84,7 +84,7 @@ class MetricsSourceAdapter implements DynamicMBean {
 
   MetricsSourceAdapter(String prefix, String name, String description,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
-                       int period, MetricsConfig conf) {
+                       long period, MetricsConfig conf) {
     this(prefix, name, description, source, injectedTags,
          conf.getFilter(RECORD_FILTER_KEY),
          conf.getFilter(METRIC_FILTER_KEY),
@@ -229,7 +229,11 @@ class MetricsSourceAdapter implements DynamicMBean {
     return mbeanName;
   }
 
-  
+  @VisibleForTesting
+  long getJmxCacheTTL() {
+    return jmxCacheTTL;
+  }
+
   private void updateInfoCache() {
     LOG.debug("Updating info cache...");
     infoCache = infoBuilder.reset(lastRecs).get();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538af0e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
index a1d258d..15914d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
@@ -262,7 +262,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
     checkNotNull(config, "config");
     MetricsConfig conf = sourceConfigs.get(name);
     MetricsSourceAdapter sa = new MetricsSourceAdapter(prefix, name, desc,
-        source, injectedTags, period, conf != null ? conf
+        source, injectedTags, period * 1000L, conf != null ? conf
             : config.subset(SOURCE_KEY));
     sources.put(name, sa);
     sa.start();
@@ -359,7 +359,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
       return;
     }
     logicalTime = 0;
-    long millis = period * 1000;
+    long millis = period * 1000L;
     timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
     timer.scheduleAtFixedRate(new TimerTask() {
           @Override
@@ -550,7 +550,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   private void registerSystemSource() {
     MetricsConfig sysConf = sourceConfigs.get(MS_NAME);
     sysSource = new MetricsSourceAdapter(prefix, MS_STATS_NAME, MS_STATS_DESC,
-        MetricsAnnotations.makeSource(this), injectedTags, period,
+        MetricsAnnotations.makeSource(this), injectedTags, period * 1000L,
         sysConf == null ? config.subset(SOURCE_KEY) : sysConf);
     sysSource.start();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9538af0e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
index 6238d79..ecf2cc8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
@@ -544,6 +544,19 @@ public class TestMetricsSystemImpl {
     }
   }
 
+  @Test
+  public void testRegisterSourceJmxCacheTTL() {
+    MetricsSystem ms = new MetricsSystemImpl();
+    ms.init("TestMetricsSystem");
+    TestSource ts = new TestSource("ts");
+    ms.register(ts);
+    MetricsSourceAdapter sa = ((MetricsSystemImpl) ms)
+        .getSourceAdapter("TestSource");
+    assertEquals(MetricsConfig.PERIOD_DEFAULT * 1000 + 1,
+        sa.getJmxCacheTTL());
+    ms.shutdown();
+  }
+
   @Metrics(context="test")
   private static class TestSource {
     @Metric("C1 desc") MutableCounterLong c1;


[13/50] [abbrv] hadoop git commit: removing accidental file in MAPREDUCE-6415

Posted by ec...@apache.org.
removing accidental file in MAPREDUCE-6415


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f1537106
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f1537106
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f1537106

Branch: refs/heads/HADOOP-11890
Commit: f15371062f1bbcbb79bf44fd67ec647020d56c69
Parents: 6dd6ca4
Author: Robert Kanter <rk...@cloudera.com>
Authored: Wed Sep 9 18:16:07 2015 -0700
Committer: Robert Kanter <rk...@cloudera.com>
Committed: Wed Sep 9 18:16:07 2015 -0700

----------------------------------------------------------------------
 MAPREDUCE-6415.003.patch | 1308 -----------------------------------------
 1 file changed, 1308 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f1537106/MAPREDUCE-6415.003.patch
----------------------------------------------------------------------
diff --git a/MAPREDUCE-6415.003.patch b/MAPREDUCE-6415.003.patch
deleted file mode 100644
index 7c14341..0000000
--- a/MAPREDUCE-6415.003.patch
+++ /dev/null
@@ -1,1308 +0,0 @@
-diff --git hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
-index fa55703..3f646e6 100644
---- hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
-+++ hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
-@@ -52,6 +52,13 @@
-       </includes>
-     </fileSet>
-     <fileSet>
-+      <directory>../hadoop-archive-logs/target</directory>
-+      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
-+      <includes>
-+        <include>*-sources.jar</include>
-+      </includes>
-+    </fileSet>
-+    <fileSet>
-       <directory>../hadoop-datajoin/target</directory>
-       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
-       <includes>
-diff --git hadoop-mapreduce-project/bin/mapred hadoop-mapreduce-project/bin/mapred
-index 426af80..2d56a8d 100755
---- hadoop-mapreduce-project/bin/mapred
-+++ hadoop-mapreduce-project/bin/mapred
-@@ -20,6 +20,7 @@ MYNAME="${BASH_SOURCE-$0}"
- function hadoop_usage
- {
-   hadoop_add_subcommand "archive" "create a hadoop archive"
-+  hadoop_add_subcommand "archive-logs" "combine aggregated logs into hadoop archives"
-   hadoop_add_subcommand "classpath" "prints the class path needed for running mapreduce subcommands"
-   hadoop_add_subcommand "distcp" "copy file or directories recursively"
-   hadoop_add_subcommand "historyserver" "run job history servers as a standalone daemon"
-@@ -72,6 +73,13 @@ case ${COMMAND} in
-     hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-     HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-   ;;
-+  archive-logs)
-+    CLASS=org.apache.hadoop.tools.HadoopArchiveLogs
-+    hadoop_debug "Injecting TOOL_PATH into CLASSPATH"
-+    hadoop_add_classpath "${TOOL_PATH}"
-+    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
-+    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
-+  ;;
-   classpath)
-     hadoop_do_classpath_subcommand CLASS "$@"
-   ;;
-diff --git hadoop-project/pom.xml hadoop-project/pom.xml
-index 9863475..636e063 100644
---- hadoop-project/pom.xml
-+++ hadoop-project/pom.xml
-@@ -324,6 +324,11 @@
-       </dependency>
-       <dependency>
-         <groupId>org.apache.hadoop</groupId>
-+        <artifactId>hadoop-archive-logs</artifactId>
-+        <version>${project.version}</version>
-+      </dependency>
-+      <dependency>
-+        <groupId>org.apache.hadoop</groupId>
-         <artifactId>hadoop-distcp</artifactId>
-         <version>${project.version}</version>
-       </dependency>
-diff --git hadoop-tools/hadoop-archive-logs/pom.xml hadoop-tools/hadoop-archive-logs/pom.xml
-new file mode 100644
-index 0000000..2a480a8
---- /dev/null
-+++ hadoop-tools/hadoop-archive-logs/pom.xml
-@@ -0,0 +1,171 @@
-+<?xml version="1.0" encoding="UTF-8"?>
-+<!--
-+  Licensed under the Apache License, Version 2.0 (the "License");
-+  you may not use this file except in compliance with the License.
-+  You may obtain a copy of the License at
-+
-+    http://www.apache.org/licenses/LICENSE-2.0
-+
-+  Unless required by applicable law or agreed to in writing, software
-+  distributed under the License is distributed on an "AS IS" BASIS,
-+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+  See the License for the specific language governing permissions and
-+  limitations under the License. See accompanying LICENSE file.
-+-->
-+<project xmlns="http://maven.apache.org/POM/4.0.0"
-+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-+  xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
-+                      http://maven.apache.org/xsd/maven-4.0.0.xsd">
-+  <modelVersion>4.0.0</modelVersion>
-+  <parent>
-+    <groupId>org.apache.hadoop</groupId>
-+    <artifactId>hadoop-project</artifactId>
-+    <version>3.0.0-SNAPSHOT</version>
-+    <relativePath>../../hadoop-project</relativePath>
-+  </parent>
-+  <groupId>org.apache.hadoop</groupId>
-+  <artifactId>hadoop-archive-logs</artifactId>
-+  <version>3.0.0-SNAPSHOT</version>
-+  <description>Apache Hadoop Archive Logs</description>
-+  <name>Apache Hadoop Archive Logs</name>
-+  <packaging>jar</packaging>
-+
-+  <properties>
-+    <hadoop.log.dir>${project.build.directory}/log</hadoop.log.dir>
-+  </properties>
-+
-+  <dependencies>
-+    <dependency>
-+      <groupId>junit</groupId>
-+      <artifactId>junit</artifactId>
-+      <scope>test</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-mapreduce-client-core</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-applications-distributedshell</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-common</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-hdfs</artifactId>
-+      <scope>test</scope>
-+      <type>test-jar</type>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-server-tests</artifactId>
-+      <type>test-jar</type>
-+      <scope>test</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-archives</artifactId>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-common</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-api</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>com.google.guava</groupId>
-+      <artifactId>guava</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>commons-io</groupId>
-+      <artifactId>commons-io</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>commons-logging</groupId>
-+      <artifactId>commons-logging</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>commons-cli</groupId>
-+      <artifactId>commons-cli</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-client</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-yarn-server-resourcemanager</artifactId>
-+      <scope>provided</scope>
-+    </dependency>
-+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-hdfs</artifactId>
-+      <scope>test</scope>
-+    </dependency>
-+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-common</artifactId>
-+      <scope>test</scope>
-+      <type>test-jar</type>
-+    </dependency>
-+    <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-+      <scope>test</scope>
-+      <type>test-jar</type>
-+    </dependency>
-+  </dependencies>
-+
-+  <build>
-+    <plugins>
-+      <plugin>
-+        <groupId>org.apache.maven.plugins</groupId>
-+        <artifactId>maven-antrun-plugin</artifactId>
-+        <executions>
-+          <execution>
-+            <id>create-log-dir</id>
-+            <phase>process-test-resources</phase>
-+            <goals>
-+              <goal>run</goal>
-+            </goals>
-+            <configuration>
-+              <target>
-+                <delete dir="${test.build.data}"/>
-+                <mkdir dir="${test.build.data}"/>
-+                <mkdir dir="${hadoop.log.dir}"/>
-+              </target>
-+            </configuration>
-+          </execution>
-+        </executions>
-+      </plugin>
-+      <plugin>
-+        <groupId>org.apache.maven.plugins</groupId>
-+        <artifactId>maven-jar-plugin</artifactId>
-+         <configuration>
-+          <archive>
-+           <manifest>
-+            <mainClass>org.apache.hadoop.tools.HadoopArchiveLogs</mainClass>
-+           </manifest>
-+         </archive>
-+        </configuration>
-+       </plugin>
-+    </plugins>
-+  </build>
-+</project>
-diff --git hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
-new file mode 100644
-index 0000000..4778dcb
---- /dev/null
-+++ hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogs.java
-@@ -0,0 +1,403 @@
-+/**
-+ * Licensed to the Apache Software Foundation (ASF) under one
-+ * or more contributor license agreements.  See the NOTICE file
-+ * distributed with this work for additional information
-+ * regarding copyright ownership.  The ASF licenses this file
-+ * to you under the Apache License, Version 2.0 (the
-+ * "License"); you may not use this file except in compliance
-+ * with the License.  You may obtain a copy of the License at
-+ *
-+ *     http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ */
-+
-+package org.apache.hadoop.tools;
-+
-+import com.google.common.annotations.VisibleForTesting;
-+import org.apache.commons.cli.CommandLine;
-+import org.apache.commons.cli.CommandLineParser;
-+import org.apache.commons.cli.GnuParser;
-+import org.apache.commons.cli.HelpFormatter;
-+import org.apache.commons.cli.Option;
-+import org.apache.commons.cli.Options;
-+import org.apache.commons.cli.ParseException;
-+import org.apache.commons.logging.Log;
-+import org.apache.commons.logging.LogFactory;
-+import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.FileStatus;
-+import org.apache.hadoop.fs.FileSystem;
-+import org.apache.hadoop.fs.Path;
-+import org.apache.hadoop.fs.permission.FsAction;
-+import org.apache.hadoop.fs.permission.FsPermission;
-+import org.apache.hadoop.mapred.JobConf;
-+import org.apache.hadoop.util.Tool;
-+import org.apache.hadoop.util.ToolRunner;
-+import org.apache.hadoop.yarn.api.records.ApplicationReport;
-+import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
-+import org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster;
-+import org.apache.hadoop.yarn.applications.distributedshell.Client;
-+import org.apache.hadoop.yarn.client.api.YarnClient;
-+import org.apache.hadoop.yarn.conf.YarnConfiguration;
-+import org.apache.hadoop.yarn.exceptions.YarnException;
-+import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils;
-+
-+import java.io.File;
-+import java.io.FileWriter;
-+import java.io.IOException;
-+import java.util.ArrayList;
-+import java.util.Collections;
-+import java.util.Comparator;
-+import java.util.HashSet;
-+import java.util.Iterator;
-+import java.util.List;
-+import java.util.Set;
-+
-+
-+/**
-+ * This tool moves Aggregated Log files into HAR archives using the
-+ * {@link HadoopArchives} tool and the Distributed Shell via the
-+ * {@link HadoopArchiveLogsRunner}.
-+ */
-+public class HadoopArchiveLogs implements Tool {
-+  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogs.class);
-+
-+  private static final String HELP_OPTION = "help";
-+  private static final String MAX_ELIGIBLE_APPS_OPTION = "maxEligibleApps";
-+  private static final String MIN_NUM_LOG_FILES_OPTION = "minNumberLogFiles";
-+  private static final String MAX_TOTAL_LOGS_SIZE_OPTION = "maxTotalLogsSize";
-+  private static final String MEMORY_OPTION = "memory";
-+
-+  private static final int DEFAULT_MAX_ELIGIBLE = -1;
-+  private static final int DEFAULT_MIN_NUM_LOG_FILES = 20;
-+  private static final long DEFAULT_MAX_TOTAL_LOGS_SIZE = 1024L;
-+  private static final long DEFAULT_MEMORY = 1024L;
-+
-+  @VisibleForTesting
-+  int maxEligible = DEFAULT_MAX_ELIGIBLE;
-+  @VisibleForTesting
-+  int minNumLogFiles = DEFAULT_MIN_NUM_LOG_FILES;
-+  @VisibleForTesting
-+  long maxTotalLogsSize = DEFAULT_MAX_TOTAL_LOGS_SIZE * 1024L * 1024L;
-+  @VisibleForTesting
-+  long memory = DEFAULT_MEMORY;
-+
-+  @VisibleForTesting
-+  Set<ApplicationReport> eligibleApplications;
-+
-+  private JobConf conf;
-+
-+  public HadoopArchiveLogs(Configuration conf) {
-+    setConf(conf);
-+    eligibleApplications = new HashSet<>();
-+  }
-+
-+  public static void main(String[] args) {
-+    JobConf job = new JobConf(HadoopArchiveLogs.class);
-+
-+    HadoopArchiveLogs hal = new HadoopArchiveLogs(job);
-+    int ret = 0;
-+
-+    try{
-+      ret = ToolRunner.run(hal, args);
-+    } catch(Exception e) {
-+      LOG.debug("Exception", e);
-+      System.err.println(e.getClass().getSimpleName());
-+      final String s = e.getLocalizedMessage();
-+      if (s != null) {
-+        System.err.println(s);
-+      } else {
-+        e.printStackTrace(System.err);
-+      }
-+      System.exit(1);
-+    }
-+    System.exit(ret);
-+  }
-+
-+  @Override
-+  public int run(String[] args) throws Exception {
-+    handleOpts(args);
-+
-+    findAggregatedApps();
-+
-+    FileSystem fs = null;
-+    Path remoteRootLogDir = new Path(conf.get(
-+        YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
-+        YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
-+    String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
-+    Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
-+    try {
-+      fs = FileSystem.get(conf);
-+      checkFiles(fs, remoteRootLogDir, suffix);
-+
-+      // Prepare working directory
-+      if (fs.exists(workingDir)) {
-+        fs.delete(workingDir, true);
-+      }
-+      fs.mkdirs(workingDir);
-+      fs.setPermission(workingDir,
-+          new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
-+    } finally {
-+      if (fs != null) {
-+        fs.close();
-+      }
-+    }
-+
-+    checkMaxEligible();
-+
-+    if (eligibleApplications.isEmpty()) {
-+      LOG.info("No eligible applications to process");
-+      System.exit(0);
-+    }
-+
-+    StringBuilder sb =
-+        new StringBuilder("Will process the following applications:");
-+    for (ApplicationReport report : eligibleApplications) {
-+      sb.append("\n\t").append(report.getApplicationId());
-+    }
-+    LOG.info(sb.toString());
-+
-+    File localScript = File.createTempFile("hadoop-archive-logs-", ".sh");
-+    generateScript(localScript, workingDir, remoteRootLogDir, suffix);
-+
-+    if (runDistributedShell(localScript)) {
-+      return 0;
-+    }
-+    return -1;
-+  }
-+
-+  private void handleOpts(String[] args) throws ParseException {
-+    Options opts = new Options();
-+    Option helpOpt = new Option(HELP_OPTION, false, "Prints this message");
-+    Option maxEligibleOpt = new Option(MAX_ELIGIBLE_APPS_OPTION, true,
-+        "The maximum number of eligible apps to process (default: "
-+            + DEFAULT_MAX_ELIGIBLE + " (all))");
-+    maxEligibleOpt.setArgName("n");
-+    Option minNumLogFilesOpt = new Option(MIN_NUM_LOG_FILES_OPTION, true,
-+        "The minimum number of log files required to be eligible (default: "
-+            + DEFAULT_MIN_NUM_LOG_FILES + ")");
-+    minNumLogFilesOpt.setArgName("n");
-+    Option maxTotalLogsSizeOpt = new Option(MAX_TOTAL_LOGS_SIZE_OPTION, true,
-+        "The maximum total logs size (in megabytes) required to be eligible" +
-+            " (default: " + DEFAULT_MAX_TOTAL_LOGS_SIZE + ")");
-+    maxTotalLogsSizeOpt.setArgName("megabytes");
-+    Option memoryOpt = new Option(MEMORY_OPTION, true,
-+        "The amount of memory (in megabytes) for each container (default: "
-+            + DEFAULT_MEMORY + ")");
-+    memoryOpt.setArgName("megabytes");
-+    opts.addOption(helpOpt);
-+    opts.addOption(maxEligibleOpt);
-+    opts.addOption(minNumLogFilesOpt);
-+    opts.addOption(maxTotalLogsSizeOpt);
-+    opts.addOption(memoryOpt);
-+
-+    try {
-+      CommandLineParser parser = new GnuParser();
-+      CommandLine commandLine = parser.parse(opts, args);
-+      if (commandLine.hasOption(HELP_OPTION)) {
-+        HelpFormatter formatter = new HelpFormatter();
-+        formatter.printHelp("yarn archive-logs", opts);
-+        System.exit(0);
-+      }
-+      if (commandLine.hasOption(MAX_ELIGIBLE_APPS_OPTION)) {
-+        maxEligible = Integer.parseInt(
-+            commandLine.getOptionValue(MAX_ELIGIBLE_APPS_OPTION));
-+        if (maxEligible == 0) {
-+          LOG.info("Setting " + MAX_ELIGIBLE_APPS_OPTION + " to 0 accomplishes "
-+              + "nothing. Please either set it to a negative value "
-+              + "(default, all) or a more reasonable value.");
-+          System.exit(0);
-+        }
-+      }
-+      if (commandLine.hasOption(MIN_NUM_LOG_FILES_OPTION)) {
-+        minNumLogFiles = Integer.parseInt(
-+            commandLine.getOptionValue(MIN_NUM_LOG_FILES_OPTION));
-+      }
-+      if (commandLine.hasOption(MAX_TOTAL_LOGS_SIZE_OPTION)) {
-+        maxTotalLogsSize = Long.parseLong(
-+            commandLine.getOptionValue(MAX_TOTAL_LOGS_SIZE_OPTION));
-+        maxTotalLogsSize *= 1024L * 1024L;
-+      }
-+      if (commandLine.hasOption(MEMORY_OPTION)) {
-+        memory = Long.parseLong(commandLine.getOptionValue(MEMORY_OPTION));
-+      }
-+    } catch (ParseException pe) {
-+      HelpFormatter formatter = new HelpFormatter();
-+      formatter.printHelp("yarn archive-logs", opts);
-+      throw pe;
-+    }
-+  }
-+
-+  @VisibleForTesting
-+  void findAggregatedApps() throws IOException, YarnException {
-+    YarnClient client = YarnClient.createYarnClient();
-+    try {
-+      client.init(getConf());
-+      client.start();
-+      List<ApplicationReport> reports = client.getApplications();
-+      for (ApplicationReport report : reports) {
-+        LogAggregationStatus aggStatus = report.getLogAggregationStatus();
-+        if (aggStatus.equals(LogAggregationStatus.SUCCEEDED) ||
-+            aggStatus.equals(LogAggregationStatus.FAILED)) {
-+          eligibleApplications.add(report);
-+        }
-+      }
-+    } finally {
-+      if (client != null) {
-+        client.stop();
-+      }
-+    }
-+  }
-+
-+  @VisibleForTesting
-+  void checkFiles(FileSystem fs, Path remoteRootLogDir, String suffix) {
-+    for (Iterator<ApplicationReport> reportIt = eligibleApplications.iterator();
-+         reportIt.hasNext(); ) {
-+      ApplicationReport report = reportIt.next();
-+      long totalFileSize = 0L;
-+      try {
-+        FileStatus[] files = fs.listStatus(
-+            LogAggregationUtils.getRemoteAppLogDir(remoteRootLogDir,
-+                report.getApplicationId(), report.getUser(), suffix));
-+        if (files.length < minNumLogFiles) {
-+          reportIt.remove();
-+        } else {
-+          for (FileStatus file : files) {
-+            if (file.getPath().getName().equals(report.getApplicationId()
-+                + ".har")) {
-+              reportIt.remove();
-+              break;
-+            }
-+            totalFileSize += file.getLen();
-+          }
-+          if (totalFileSize > maxTotalLogsSize) {
-+            reportIt.remove();
-+          }
-+        }
-+      } catch (IOException ioe) {
-+        // If the user doesn't have permission or it doesn't exist, then skip it
-+        reportIt.remove();
-+      }
-+    }
-+  }
-+
-+  @VisibleForTesting
-+  void checkMaxEligible() {
-+    // If we have too many eligible apps, remove the newest ones first
-+    if (maxEligible > 0 && eligibleApplications.size() > maxEligible) {
-+      List<ApplicationReport> sortedApplications =
-+          new ArrayList<ApplicationReport>(eligibleApplications);
-+      Collections.sort(sortedApplications, new Comparator<ApplicationReport>() {
-+        @Override
-+        public int compare(ApplicationReport o1, ApplicationReport o2) {
-+          return Long.compare(o1.getFinishTime(), o2.getFinishTime());
-+        }
-+      });
-+      for (int i = maxEligible; i < sortedApplications.size(); i++) {
-+        eligibleApplications.remove(sortedApplications.get(i));
-+      }
-+    }
-+  }
-+
-+  /*
-+  The generated script looks like this:
-+  #!/bin/bash
-+  set -e
-+  set -x
-+  if [ "$YARN_SHELL_ID" == "1" ]; then
-+        appId="application_1440448768987_0001"
-+        user="rkanter"
-+  elif [ "$YARN_SHELL_ID" == "2" ]; then
-+        appId="application_1440448768987_0002"
-+        user="rkanter"
-+  else
-+        echo "Unknown Mapping!"
-+        exit 1
-+  fi
-+  export HADOOP_CLIENT_OPTS="-Xmx1024m"
-+  export HADOOP_CLASSPATH=/dist/share/hadoop/tools/lib/hadoop-archive-logs-2.8.0-SNAPSHOT.jar:/dist/share/hadoop/tools/lib/hadoop-archives-2.8.0-SNAPSHOT.jar
-+  "$HADOOP_HOME"/bin/hadoop org.apache.hadoop.tools.HadoopArchiveLogsRunner -appId "$appId" -user "$user" -workingDir /tmp/logs/archive-logs-work -remoteRootLogDir /tmp/logs -suffix logs
-+   */
-+  @VisibleForTesting
-+  void generateScript(File localScript, Path workingDir,
-+        Path remoteRootLogDir, String suffix) throws IOException {
-+    LOG.info("Generating script at: " + localScript.getAbsolutePath());
-+    String halrJarPath = HadoopArchiveLogsRunner.class.getProtectionDomain()
-+        .getCodeSource().getLocation().getPath();
-+    String harJarPath = HadoopArchives.class.getProtectionDomain()
-+        .getCodeSource().getLocation().getPath();
-+    String classpath = halrJarPath + File.pathSeparator + harJarPath;
-+    FileWriter fw = null;
-+    try {
-+      fw = new FileWriter(localScript);
-+      fw.write("#!/bin/bash\nset -e\nset -x\n");
-+      int containerCount = 1;
-+      for (ApplicationReport report : eligibleApplications) {
-+        fw.write("if [ \"$YARN_SHELL_ID\" == \"");
-+        fw.write(Integer.toString(containerCount));
-+        fw.write("\" ]; then\n\tappId=\"");
-+        fw.write(report.getApplicationId().toString());
-+        fw.write("\"\n\tuser=\"");
-+        fw.write(report.getUser());
-+        fw.write("\"\nel");
-+        containerCount++;
-+      }
-+      fw.write("se\n\techo \"Unknown Mapping!\"\n\texit 1\nfi\n");
-+      fw.write("export HADOOP_CLIENT_OPTS=\"-Xmx");
-+      fw.write(Long.toString(memory));
-+      fw.write("m\"\n");
-+      fw.write("export HADOOP_CLASSPATH=");
-+      fw.write(classpath);
-+      fw.write("\n\"$HADOOP_HOME\"/bin/hadoop ");
-+      fw.write(HadoopArchiveLogsRunner.class.getName());
-+      fw.write(" -appId \"$appId\" -user \"$user\" -workingDir ");
-+      fw.write(workingDir.toString());
-+      fw.write(" -remoteRootLogDir ");
-+      fw.write(remoteRootLogDir.toString());
-+      fw.write(" -suffix ");
-+      fw.write(suffix);
-+      fw.write("\n");
-+    } finally {
-+      if (fw != null) {
-+        fw.close();
-+      }
-+    }
-+  }
-+
-+  private boolean runDistributedShell(File localScript) throws Exception {
-+    String[] dsArgs = {
-+        "--appname",
-+        "ArchiveLogs",
-+        "--jar",
-+        ApplicationMaster.class.getProtectionDomain().getCodeSource()
-+            .getLocation().getPath(),
-+        "--num_containers",
-+        Integer.toString(eligibleApplications.size()),
-+        "--container_memory",
-+        Long.toString(memory),
-+        "--shell_script",
-+        localScript.getAbsolutePath()
-+    };
-+    final Client dsClient = new Client(new Configuration(conf));
-+    dsClient.init(dsArgs);
-+    return dsClient.run();
-+  }
-+
-+  @Override
-+  public void setConf(Configuration conf) {
-+    if (conf instanceof JobConf) {
-+      this.conf = (JobConf) conf;
-+    } else {
-+      this.conf = new JobConf(conf, HadoopArchiveLogs.class);
-+    }
-+  }
-+
-+  @Override
-+  public Configuration getConf() {
-+    return this.conf;
-+  }
-+}
-diff --git hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
-new file mode 100644
-index 0000000..347e5fb
---- /dev/null
-+++ hadoop-tools/hadoop-archive-logs/src/main/java/org/apache/hadoop/tools/HadoopArchiveLogsRunner.java
-@@ -0,0 +1,180 @@
-+/**
-+ * Licensed to the Apache Software Foundation (ASF) under one
-+ * or more contributor license agreements.  See the NOTICE file
-+ * distributed with this work for additional information
-+ * regarding copyright ownership.  The ASF licenses this file
-+ * to you under the Apache License, Version 2.0 (the
-+ * "License"); you may not use this file except in compliance
-+ * with the License.  You may obtain a copy of the License at
-+ *
-+ *     http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ */
-+
-+package org.apache.hadoop.tools;
-+
-+import org.apache.commons.cli.CommandLine;
-+import org.apache.commons.cli.CommandLineParser;
-+import org.apache.commons.cli.GnuParser;
-+import org.apache.commons.cli.Option;
-+import org.apache.commons.cli.Options;
-+import org.apache.commons.cli.ParseException;
-+import org.apache.commons.logging.Log;
-+import org.apache.commons.logging.LogFactory;
-+import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.FileStatus;
-+import org.apache.hadoop.fs.FileSystem;
-+import org.apache.hadoop.fs.Path;
-+import org.apache.hadoop.fs.PathFilter;
-+import org.apache.hadoop.mapred.JobConf;
-+import org.apache.hadoop.util.Tool;
-+import org.apache.hadoop.util.ToolRunner;
-+
-+import java.io.File;
-+
-+/**
-+ * This is a child program designed to be used by the {@link HadoopArchiveLogs}
-+ * tool via the Distributed Shell.  It's not meant to be run directly.
-+ */
-+public class HadoopArchiveLogsRunner implements Tool {
-+  private static final Log LOG = LogFactory.getLog(HadoopArchiveLogsRunner.class);
-+
-+  private static final String APP_ID_OPTION = "appId";
-+  private static final String USER_OPTION = "user";
-+  private static final String WORKING_DIR_OPTION = "workingDir";
-+  private static final String REMOTE_ROOT_LOG_DIR = "remoteRootLogDir";
-+  private static final String SUFFIX_OPTION = "suffix";
-+
-+  private String appId;
-+  private String user;
-+  private String workingDir;
-+  private String remoteLogDir;
-+  private String suffix;
-+
-+  private JobConf conf;
-+
-+  public HadoopArchiveLogsRunner(Configuration conf) {
-+    setConf(conf);
-+  }
-+
-+  public static void main(String[] args) {
-+    JobConf job = new JobConf(HadoopArchiveLogsRunner.class);
-+
-+    HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(job);
-+    int ret = 0;
-+
-+    try{
-+      ret = ToolRunner.run(halr, args);
-+    } catch(Exception e) {
-+      LOG.debug("Exception", e);
-+      System.err.println(e.getClass().getSimpleName());
-+      final String s = e.getLocalizedMessage();
-+      if (s != null) {
-+        System.err.println(s);
-+      } else {
-+        e.printStackTrace(System.err);
-+      }
-+      System.exit(1);
-+    }
-+    System.exit(ret);
-+  }
-+
-+  @Override
-+  public int run(String[] args) throws Exception {
-+    handleOpts(args);
-+    String remoteAppLogDir = remoteLogDir + File.separator + user
-+        + File.separator + suffix + File.separator + appId;
-+
-+    // Run 'hadoop archives' command in local mode
-+    Configuration haConf = new Configuration(getConf());
-+    haConf.set("mapreduce.framework.name", "local");
-+    HadoopArchives ha = new HadoopArchives(haConf);
-+    String[] haArgs = {
-+        "-archiveName",
-+        appId + ".har",
-+        "-p",
-+        remoteAppLogDir,
-+        "*",
-+        workingDir
-+    };
-+    StringBuilder sb = new StringBuilder("Executing 'hadoop archives'");
-+    for (String haArg : haArgs) {
-+      sb.append("\n\t").append(haArg);
-+    }
-+    LOG.info(sb.toString());
-+    ha.run(haArgs);
-+
-+    FileSystem fs = null;
-+    // Move har file to correct location and delete original logs
-+    try {
-+      fs = FileSystem.get(conf);
-+      LOG.info("Moving har to original location");
-+      fs.rename(new Path(workingDir, appId + ".har"),
-+          new Path(remoteAppLogDir, appId + ".har"));
-+      LOG.info("Deleting original logs");
-+      for (FileStatus original : fs.listStatus(new Path(remoteAppLogDir),
-+          new PathFilter() {
-+            @Override
-+            public boolean accept(Path path) {
-+              return !path.getName().endsWith(".har");
-+            }
-+          })) {
-+        fs.delete(original.getPath(), false);
-+      }
-+    } finally {
-+      if (fs != null) {
-+        fs.close();
-+      }
-+    }
-+
-+    return 0;
-+  }
-+
-+  private void handleOpts(String[] args) throws ParseException {
-+    Options opts = new Options();
-+    Option appIdOpt = new Option(APP_ID_OPTION, true, "Application ID");
-+    appIdOpt.setRequired(true);
-+    Option userOpt = new Option(USER_OPTION, true, "User");
-+    userOpt.setRequired(true);
-+    Option workingDirOpt = new Option(WORKING_DIR_OPTION, true,
-+        "Working Directory");
-+    workingDirOpt.setRequired(true);
-+    Option remoteLogDirOpt = new Option(REMOTE_ROOT_LOG_DIR, true,
-+        "Remote Root Log Directory");
-+    remoteLogDirOpt.setRequired(true);
-+    Option suffixOpt = new Option(SUFFIX_OPTION, true, "Suffix");
-+    suffixOpt.setRequired(true);
-+    opts.addOption(appIdOpt);
-+    opts.addOption(userOpt);
-+    opts.addOption(workingDirOpt);
-+    opts.addOption(remoteLogDirOpt);
-+    opts.addOption(suffixOpt);
-+
-+    CommandLineParser parser = new GnuParser();
-+    CommandLine commandLine = parser.parse(opts, args);
-+    appId = commandLine.getOptionValue(APP_ID_OPTION);
-+    user = commandLine.getOptionValue(USER_OPTION);
-+    workingDir = commandLine.getOptionValue(WORKING_DIR_OPTION);
-+    remoteLogDir = commandLine.getOptionValue(REMOTE_ROOT_LOG_DIR);
-+    suffix = commandLine.getOptionValue(SUFFIX_OPTION);
-+  }
-+
-+  @Override
-+  public void setConf(Configuration conf) {
-+    if (conf instanceof JobConf) {
-+      this.conf = (JobConf) conf;
-+    } else {
-+      this.conf = new JobConf(conf, HadoopArchiveLogsRunner.class);
-+    }
-+  }
-+
-+  @Override
-+  public Configuration getConf() {
-+    return this.conf;
-+  }
-+}
-diff --git hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
-new file mode 100644
-index 0000000..c8ff201
---- /dev/null
-+++ hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java
-@@ -0,0 +1,293 @@
-+/**
-+ * Licensed to the Apache Software Foundation (ASF) under one
-+ * or more contributor license agreements.  See the NOTICE file
-+ * distributed with this work for additional information
-+ * regarding copyright ownership.  The ASF licenses this file
-+ * to you under the Apache License, Version 2.0 (the
-+ * "License"); you may not use this file except in compliance
-+ * with the License.  You may obtain a copy of the License at
-+ *
-+ *     http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ */
-+
-+package org.apache.hadoop.tools;
-+
-+import org.apache.commons.io.IOUtils;
-+import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.FSDataOutputStream;
-+import org.apache.hadoop.fs.FileSystem;
-+import org.apache.hadoop.fs.Path;
-+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-+import org.apache.hadoop.yarn.api.records.ApplicationId;
-+import org.apache.hadoop.yarn.api.records.ApplicationReport;
-+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
-+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
-+import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
-+import org.apache.hadoop.yarn.api.records.Priority;
-+import org.apache.hadoop.yarn.api.records.Resource;
-+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
-+import org.apache.hadoop.yarn.conf.YarnConfiguration;
-+import org.apache.hadoop.yarn.server.MiniYARNCluster;
-+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
-+import org.junit.Assert;
-+import org.junit.Test;
-+
-+import java.io.File;
-+import java.io.IOException;
-+import java.util.Random;
-+
-+public class TestHadoopArchiveLogs {
-+
-+  private static final long CLUSTER_TIMESTAMP = System.currentTimeMillis();
-+  private static final int FILE_SIZE_INCREMENT = 4096;
-+  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
-+  static {
-+    new Random().nextBytes(DUMMY_DATA);
-+  }
-+
-+  @Test(timeout = 10000)
-+  public void testCheckFiles() throws Exception {
-+    Configuration conf = new Configuration();
-+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
-+    FileSystem fs = FileSystem.getLocal(conf);
-+    Path rootLogDir = new Path("target", "logs");
-+    String suffix = "logs";
-+    Path logDir = new Path(rootLogDir,
-+        new Path(System.getProperty("user.name"), suffix));
-+    fs.mkdirs(logDir);
-+
-+    Assert.assertEquals(0, hal.eligibleApplications.size());
-+    ApplicationReport app1 = createAppReport(1);  // no files found
-+    ApplicationReport app2 = createAppReport(2);  // too few files
-+    Path app2Path = new Path(logDir, app2.getApplicationId().toString());
-+    fs.mkdirs(app2Path);
-+    createFile(fs, new Path(app2Path, "file1"), 1);
-+    hal.minNumLogFiles = 2;
-+    ApplicationReport app3 = createAppReport(3);  // too large
-+    Path app3Path = new Path(logDir, app3.getApplicationId().toString());
-+    fs.mkdirs(app3Path);
-+    createFile(fs, new Path(app3Path, "file1"), 2);
-+    createFile(fs, new Path(app3Path, "file2"), 5);
-+    hal.maxTotalLogsSize = FILE_SIZE_INCREMENT * 6;
-+    ApplicationReport app4 = createAppReport(4);  // has har already
-+    Path app4Path = new Path(logDir, app4.getApplicationId().toString());
-+    fs.mkdirs(app4Path);
-+    createFile(fs, new Path(app4Path, app4.getApplicationId() + ".har"), 1);
-+    ApplicationReport app5 = createAppReport(5);  // just right
-+    Path app5Path = new Path(logDir, app5.getApplicationId().toString());
-+    fs.mkdirs(app5Path);
-+    createFile(fs, new Path(app5Path, "file1"), 2);
-+    createFile(fs, new Path(app5Path, "file2"), 3);
-+    hal.eligibleApplications.add(app1);
-+    hal.eligibleApplications.add(app2);
-+    hal.eligibleApplications.add(app3);
-+    hal.eligibleApplications.add(app4);
-+    hal.eligibleApplications.add(app5);
-+
-+    hal.checkFiles(fs, rootLogDir, suffix);
-+    Assert.assertEquals(1, hal.eligibleApplications.size());
-+    Assert.assertEquals(app5, hal.eligibleApplications.iterator().next());
-+  }
-+
-+  @Test(timeout = 10000)
-+  public void testCheckMaxEligible() throws Exception {
-+    Configuration conf = new Configuration();
-+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
-+    ApplicationReport app1 = createAppReport(1);
-+    app1.setFinishTime(CLUSTER_TIMESTAMP - 5);
-+    ApplicationReport app2 = createAppReport(2);
-+    app2.setFinishTime(CLUSTER_TIMESTAMP - 10);
-+    ApplicationReport app3 = createAppReport(3);
-+    app3.setFinishTime(CLUSTER_TIMESTAMP + 5);
-+    ApplicationReport app4 = createAppReport(4);
-+    app4.setFinishTime(CLUSTER_TIMESTAMP + 10);
-+    ApplicationReport app5 = createAppReport(5);
-+    app5.setFinishTime(CLUSTER_TIMESTAMP);
-+    Assert.assertEquals(0, hal.eligibleApplications.size());
-+    hal.eligibleApplications.add(app1);
-+    hal.eligibleApplications.add(app2);
-+    hal.eligibleApplications.add(app3);
-+    hal.eligibleApplications.add(app4);
-+    hal.eligibleApplications.add(app5);
-+    hal.maxEligible = -1;
-+    hal.checkMaxEligible();
-+    Assert.assertEquals(5, hal.eligibleApplications.size());
-+
-+    hal.maxEligible = 4;
-+    hal.checkMaxEligible();
-+    Assert.assertEquals(4, hal.eligibleApplications.size());
-+    Assert.assertFalse(hal.eligibleApplications.contains(app4));
-+
-+    hal.maxEligible = 3;
-+    hal.checkMaxEligible();
-+    Assert.assertEquals(3, hal.eligibleApplications.size());
-+    Assert.assertFalse(hal.eligibleApplications.contains(app3));
-+
-+    hal.maxEligible = 2;
-+    hal.checkMaxEligible();
-+    Assert.assertEquals(2, hal.eligibleApplications.size());
-+    Assert.assertFalse(hal.eligibleApplications.contains(app5));
-+
-+    hal.maxEligible = 1;
-+    hal.checkMaxEligible();
-+    Assert.assertEquals(1, hal.eligibleApplications.size());
-+    Assert.assertFalse(hal.eligibleApplications.contains(app1));
-+  }
-+
-+  @Test(timeout = 10000)
-+  public void testFindAggregatedApps() throws Exception {
-+    MiniYARNCluster yarnCluster = null;
-+    try {
-+      Configuration conf = new Configuration();
-+      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
-+      yarnCluster =
-+          new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1,
-+              1, 1, 1);
-+      yarnCluster.init(conf);
-+      yarnCluster.start();
-+      conf = yarnCluster.getConfig();
-+
-+      RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
-+      RMAppImpl app1 = (RMAppImpl)createRMApp(1, conf, rmContext,
-+          LogAggregationStatus.DISABLED);
-+      RMAppImpl app2 = (RMAppImpl)createRMApp(2, conf, rmContext,
-+          LogAggregationStatus.FAILED);
-+      RMAppImpl app3 = (RMAppImpl)createRMApp(3, conf, rmContext,
-+          LogAggregationStatus.NOT_START);
-+      RMAppImpl app4 = (RMAppImpl)createRMApp(4, conf, rmContext,
-+          LogAggregationStatus.SUCCEEDED);
-+      RMAppImpl app5 = (RMAppImpl)createRMApp(5, conf, rmContext,
-+          LogAggregationStatus.RUNNING);
-+      RMAppImpl app6 = (RMAppImpl)createRMApp(6, conf, rmContext,
-+          LogAggregationStatus.RUNNING_WITH_FAILURE);
-+      RMAppImpl app7 = (RMAppImpl)createRMApp(7, conf, rmContext,
-+          LogAggregationStatus.TIME_OUT);
-+      rmContext.getRMApps().put(app1.getApplicationId(), app1);
-+      rmContext.getRMApps().put(app2.getApplicationId(), app2);
-+      rmContext.getRMApps().put(app3.getApplicationId(), app3);
-+      rmContext.getRMApps().put(app4.getApplicationId(), app4);
-+      rmContext.getRMApps().put(app5.getApplicationId(), app5);
-+      rmContext.getRMApps().put(app6.getApplicationId(), app6);
-+      rmContext.getRMApps().put(app7.getApplicationId(), app7);
-+
-+      HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
-+      Assert.assertEquals(0, hal.eligibleApplications.size());
-+      hal.findAggregatedApps();
-+      Assert.assertEquals(2, hal.eligibleApplications.size());
-+    } finally {
-+      if (yarnCluster != null) {
-+        yarnCluster.stop();
-+      }
-+    }
-+  }
-+
-+  @Test(timeout = 10000)
-+  public void testGenerateScript() throws Exception {
-+    Configuration conf = new Configuration();
-+    HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
-+    ApplicationReport app1 = createAppReport(1);
-+    ApplicationReport app2 = createAppReport(2);
-+    hal.eligibleApplications.add(app1);
-+    hal.eligibleApplications.add(app2);
-+
-+    File localScript = new File("target", "script.sh");
-+    Path workingDir = new Path("/tmp", "working");
-+    Path remoteRootLogDir = new Path("/tmp", "logs");
-+    String suffix = "logs";
-+    localScript.delete();
-+    Assert.assertFalse(localScript.exists());
-+    hal.generateScript(localScript, workingDir, remoteRootLogDir, suffix);
-+    Assert.assertTrue(localScript.exists());
-+    String script = IOUtils.toString(localScript.toURI());
-+    String[] lines = script.split(System.lineSeparator());
-+    Assert.assertEquals(16, lines.length);
-+    Assert.assertEquals("#!/bin/bash", lines[0]);
-+    Assert.assertEquals("set -e", lines[1]);
-+    Assert.assertEquals("set -x", lines[2]);
-+    Assert.assertEquals("if [ \"$YARN_SHELL_ID\" == \"1\" ]; then", lines[3]);
-+    if (lines[4].contains(app1.getApplicationId().toString())) {
-+      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
-+          + "\"", lines[4]);
-+      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
-+          + "\"", lines[7]);
-+    } else {
-+      Assert.assertEquals("\tappId=\"" + app2.getApplicationId().toString()
-+          + "\"", lines[4]);
-+      Assert.assertEquals("\tappId=\"" + app1.getApplicationId().toString()
-+          + "\"", lines[7]);
-+    }
-+    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
-+        lines[5]);
-+    Assert.assertEquals("elif [ \"$YARN_SHELL_ID\" == \"2\" ]; then", lines[6]);
-+    Assert.assertEquals("\tuser=\"" + System.getProperty("user.name") + "\"",
-+        lines[8]);
-+    Assert.assertEquals("else", lines[9]);
-+    Assert.assertEquals("\techo \"Unknown Mapping!\"", lines[10]);
-+    Assert.assertEquals("\texit 1", lines[11]);
-+    Assert.assertEquals("fi", lines[12]);
-+    Assert.assertEquals("export HADOOP_CLIENT_OPTS=\"-Xmx1024m\"", lines[13]);
-+    Assert.assertTrue(lines[14].startsWith("export HADOOP_CLASSPATH="));
-+    Assert.assertEquals("\"$HADOOP_HOME\"/bin/hadoop org.apache.hadoop.tools." +
-+        "HadoopArchiveLogsRunner -appId \"$appId\" -user \"$user\" -workingDir "
-+        + workingDir.toString() + " -remoteRootLogDir " +
-+        remoteRootLogDir.toString() + " -suffix " + suffix, lines[15]);
-+  }
-+
-+  private static ApplicationReport createAppReport(int id) {
-+    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
-+    return ApplicationReport.newInstance(
-+        appId,
-+        ApplicationAttemptId.newInstance(appId, 1),
-+        System.getProperty("user.name"),
-+        null, null, null, 0, null, YarnApplicationState.FINISHED, null,
-+        null, 0L, 0L, FinalApplicationStatus.SUCCEEDED, null, null, 100f,
-+        null, null);
-+  }
-+
-+  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
-+      throws IOException {
-+    FSDataOutputStream out = null;
-+    try {
-+      out = fs.create(p);
-+      for (int i = 0 ; i < sizeMultiple; i++) {
-+        out.write(DUMMY_DATA);
-+      }
-+    } finally {
-+      if (out != null) {
-+        out.close();
-+      }
-+    }
-+  }
-+
-+  private static RMApp createRMApp(int id, Configuration conf, RMContext rmContext,
-+       final LogAggregationStatus aggStatus) {
-+    ApplicationId appId = ApplicationId.newInstance(CLUSTER_TIMESTAMP, id);
-+    ApplicationSubmissionContext submissionContext =
-+        ApplicationSubmissionContext.newInstance(appId, "test", "default",
-+            Priority.newInstance(0), null, false, true,
-+            2, Resource.newInstance(10, 2), "test");
-+    return new RMAppImpl(appId, rmContext, conf, "test",
-+        System.getProperty("user.name"), "default", submissionContext,
-+        rmContext.getScheduler(),
-+        rmContext.getApplicationMasterService(),
-+        System.currentTimeMillis(), "test",
-+        null, null) {
-+      @Override
-+      public ApplicationReport createAndGetApplicationReport(
-+          String clientUserName, boolean allowAccess) {
-+        ApplicationReport report =
-+            super.createAndGetApplicationReport(clientUserName, allowAccess);
-+        report.setLogAggregationStatus(aggStatus);
-+        return report;
-+      }
-+    };
-+  }
-+}
-diff --git hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
-new file mode 100644
-index 0000000..af66f14
---- /dev/null
-+++ hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogsRunner.java
-@@ -0,0 +1,143 @@
-+/**
-+ * Licensed to the Apache Software Foundation (ASF) under one
-+ * or more contributor license agreements.  See the NOTICE file
-+ * distributed with this work for additional information
-+ * regarding copyright ownership.  The ASF licenses this file
-+ * to you under the Apache License, Version 2.0 (the
-+ * "License"); you may not use this file except in compliance
-+ * with the License.  You may obtain a copy of the License at
-+ *
-+ *     http://www.apache.org/licenses/LICENSE-2.0
-+ *
-+ * Unless required by applicable law or agreed to in writing, software
-+ * distributed under the License is distributed on an "AS IS" BASIS,
-+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-+ * See the License for the specific language governing permissions and
-+ * limitations under the License.
-+ */
-+
-+package org.apache.hadoop.tools;
-+
-+import org.apache.hadoop.conf.Configuration;
-+import org.apache.hadoop.fs.FSDataOutputStream;
-+import org.apache.hadoop.fs.FileStatus;
-+import org.apache.hadoop.fs.FileSystem;
-+import org.apache.hadoop.fs.HarFs;
-+import org.apache.hadoop.fs.Path;
-+import org.apache.hadoop.hdfs.MiniDFSCluster;
-+import org.apache.hadoop.util.ToolRunner;
-+import org.apache.hadoop.yarn.api.records.ApplicationId;
-+import org.apache.hadoop.yarn.conf.YarnConfiguration;
-+import org.apache.hadoop.yarn.server.MiniYARNCluster;
-+import org.junit.Assert;
-+import org.junit.Test;
-+
-+import java.io.IOException;
-+import java.util.Arrays;
-+import java.util.Comparator;
-+import java.util.Random;
-+
-+import static org.junit.Assert.assertEquals;
-+
-+public class TestHadoopArchiveLogsRunner {
-+
-+  private static final int FILE_SIZE_INCREMENT = 4096;
-+  private static final byte[] DUMMY_DATA = new byte[FILE_SIZE_INCREMENT];
-+  static {
-+    new Random().nextBytes(DUMMY_DATA);
-+  }
-+
-+  @Test(timeout = 30000)
-+  public void testHadoopArchiveLogs() throws Exception {
-+    MiniYARNCluster yarnCluster = null;
-+    MiniDFSCluster dfsCluster = null;
-+    FileSystem fs = null;
-+    try {
-+      Configuration conf = new YarnConfiguration();
-+      conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
-+      conf.setBoolean(YarnConfiguration.YARN_MINICLUSTER_FIXED_PORTS, true);
-+      yarnCluster =
-+          new MiniYARNCluster(TestHadoopArchiveLogsRunner.class.getSimpleName(),
-+              1, 2, 1, 1);
-+      yarnCluster.init(conf);
-+      yarnCluster.start();
-+      conf = yarnCluster.getConfig();
-+      dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-+
-+      ApplicationId app1 =
-+          ApplicationId.newInstance(System.currentTimeMillis(), 1);
-+      fs = FileSystem.get(conf);
-+      Path remoteRootLogDir = new Path(conf.get(
-+          YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
-+          YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
-+      Path workingDir = new Path(remoteRootLogDir, "archive-logs-work");
-+      String suffix = "logs";
-+      Path logDir = new Path(remoteRootLogDir,
-+          new Path(System.getProperty("user.name"), suffix));
-+      fs.mkdirs(logDir);
-+      Path app1Path = new Path(logDir, app1.toString());
-+      fs.mkdirs(app1Path);
-+      createFile(fs, new Path(app1Path, "log1"), 3);
-+      createFile(fs, new Path(app1Path, "log2"), 4);
-+      createFile(fs, new Path(app1Path, "log3"), 2);
-+      FileStatus[] app1Files = fs.listStatus(app1Path);
-+      Assert.assertEquals(3, app1Files.length);
-+
-+      String[] args = new String[]{
-+          "-appId", app1.toString(),
-+          "-user", System.getProperty("user.name"),
-+          "-workingDir", workingDir.toString(),
-+          "-remoteRootLogDir", remoteRootLogDir.toString(),
-+          "-suffix", suffix};
-+      final HadoopArchiveLogsRunner halr = new HadoopArchiveLogsRunner(conf);
-+      assertEquals(0, ToolRunner.run(halr, args));
-+
-+      fs = FileSystem.get(conf);
-+      app1Files = fs.listStatus(app1Path);
-+      Assert.assertEquals(1, app1Files.length);
-+      FileStatus harFile = app1Files[0];
-+      Assert.assertEquals(app1.toString() + ".har", harFile.getPath().getName());
-+      Path harPath = new Path("har:///" + harFile.getPath().toUri().getRawPath());
-+      FileStatus[] harLogs = HarFs.get(harPath.toUri(), conf).listStatus(harPath);
-+      Assert.assertEquals(3, harLogs.length);
-+      Arrays.sort(harLogs, new Comparator<FileStatus>() {
-+        @Override
-+        public int compare(FileStatus o1, FileStatus o2) {
-+          return o1.getPath().getName().compareTo(o2.getPath().getName());
-+        }
-+      });
-+      Assert.assertEquals("log1", harLogs[0].getPath().getName());
-+      Assert.assertEquals(3 * FILE_SIZE_INCREMENT, harLogs[0].getLen());
-+      Assert.assertEquals("log2", harLogs[1].getPath().getName());
-+      Assert.assertEquals(4 * FILE_SIZE_INCREMENT, harLogs[1].getLen());
-+      Assert.assertEquals("log3", harLogs[2].getPath().getName());
-+      Assert.assertEquals(2 * FILE_SIZE_INCREMENT, harLogs[2].getLen());
-+      Assert.assertEquals(0, fs.listStatus(workingDir).length);
-+    } finally {
-+      if (yarnCluster != null) {
-+        yarnCluster.stop();
-+      }
-+      if (fs != null) {
-+        fs.close();
-+      }
-+      if (dfsCluster != null) {
-+        dfsCluster.shutdown();
-+      }
-+    }
-+  }
-+
-+  private static void createFile(FileSystem fs, Path p, long sizeMultiple)
-+      throws IOException {
-+    FSDataOutputStream out = null;
-+    try {
-+      out = fs.create(p);
-+      for (int i = 0 ; i < sizeMultiple; i++) {
-+        out.write(DUMMY_DATA);
-+      }
-+    } finally {
-+      if (out != null) {
-+        out.close();
-+      }
-+    }
-+  }
-+}
-diff --git hadoop-tools/hadoop-tools-dist/pom.xml hadoop-tools/hadoop-tools-dist/pom.xml
-index 540401d..e6c458f 100644
---- hadoop-tools/hadoop-tools-dist/pom.xml
-+++ hadoop-tools/hadoop-tools-dist/pom.xml
-@@ -52,6 +52,11 @@
-     </dependency>
-     <dependency>
-       <groupId>org.apache.hadoop</groupId>
-+      <artifactId>hadoop-archive-logs</artifactId>
-+      <scope>compile</scope>
-+    </dependency>
-+    <dependency>
-+      <groupId>org.apache.hadoop</groupId>
-       <artifactId>hadoop-rumen</artifactId>
-       <scope>compile</scope>
-     </dependency>
-diff --git hadoop-tools/pom.xml hadoop-tools/pom.xml
-index 5b35f46..0061bf0 100644
---- hadoop-tools/pom.xml
-+++ hadoop-tools/pom.xml
-@@ -34,6 +34,7 @@
-     <module>hadoop-streaming</module>
-     <module>hadoop-distcp</module>
-     <module>hadoop-archives</module>
-+    <module>hadoop-archive-logs</module>
-     <module>hadoop-rumen</module>
-     <module>hadoop-gridmix</module>
-     <module>hadoop-datajoin</module>


[46/50] [abbrv] hadoop git commit: HDFS-9008. Balancer#Parameters class could use a builder pattern. (Chris Trezzo via mingma)

Posted by ec...@apache.org.
HDFS-9008. Balancer#Parameters class could use a builder pattern. (Chris Trezzo via mingma)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/083b44c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/083b44c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/083b44c1

Branch: refs/heads/HADOOP-11890
Commit: 083b44c136ea5aba660fcd1dddbb2d21513b4456
Parents: 73e3a49
Author: Ming Ma <mi...@apache.org>
Authored: Tue Sep 15 10:16:02 2015 -0700
Committer: Ming Ma <mi...@apache.org>
Committed: Tue Sep 15 10:16:02 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/server/balancer/Balancer.java   | 134 ++++----------
 .../server/balancer/BalancerParameters.java     | 168 +++++++++++++++++
 .../hdfs/server/balancer/TestBalancer.java      | 180 ++++++++++---------
 .../balancer/TestBalancerWithHANameNodes.java   |   4 +-
 .../TestBalancerWithMultipleNameNodes.java      |  26 ++-
 .../balancer/TestBalancerWithNodeGroup.java     |   4 +-
 7 files changed, 317 insertions(+), 202 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c49432d..fef8ee5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -915,6 +915,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9065. Include commas on # of files, blocks, total filesystem objects
     in NN Web UI. (Daniel Templeton via wheat9)
 
+    HDFS-9008. Balancer#Parameters class could use a builder pattern.
+    (Chris Trezzo via mingma)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
index 259b280..f3f3d6f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
@@ -243,7 +243,8 @@ public class Balancer {
    * namenode as a client and a secondary namenode and retry proxies
    * when connection fails.
    */
-  Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) {
+  Balancer(NameNodeConnector theblockpool, BalancerParameters p,
+      Configuration conf) {
     final long movedWinWidth = getLong(conf,
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -265,13 +266,15 @@ public class Balancer {
         DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
 
     this.nnc = theblockpool;
-    this.dispatcher = new Dispatcher(theblockpool, p.includedNodes,
-        p.excludedNodes, movedWinWidth, moverThreads, dispatcherThreads,
-        maxConcurrentMovesPerNode, getBlocksSize, getBlocksMinBlockSize, conf);
-    this.threshold = p.threshold;
-    this.policy = p.policy;
-    this.sourceNodes = p.sourceNodes;
-    this.runDuringUpgrade = p.runDuringUpgrade;
+    this.dispatcher =
+        new Dispatcher(theblockpool, p.getIncludedNodes(),
+            p.getExcludedNodes(), movedWinWidth, moverThreads,
+            dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
+            getBlocksMinBlockSize, conf);
+    this.threshold = p.getThreshold();
+    this.policy = p.getBalancingPolicy();
+    this.sourceNodes = p.getSourceNodes();
+    this.runDuringUpgrade = p.getRunDuringUpgrade();
 
     this.maxSizeToMove = getLong(conf,
         DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
@@ -629,7 +632,7 @@ public class Balancer {
    * for each namenode,
    * execute a {@link Balancer} to work through all datanodes once.  
    */
-  static int run(Collection<URI> namenodes, final Parameters p,
+  static int run(Collection<URI> namenodes, final BalancerParameters p,
       Configuration conf) throws IOException, InterruptedException {
     final long sleeptime =
         conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
@@ -638,24 +641,25 @@ public class Balancer {
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
     LOG.info("namenodes  = " + namenodes);
     LOG.info("parameters = " + p);
-    LOG.info("included nodes = " + p.includedNodes);
-    LOG.info("excluded nodes = " + p.excludedNodes);
-    LOG.info("source nodes = " + p.sourceNodes);
-    
+    LOG.info("included nodes = " + p.getIncludedNodes());
+    LOG.info("excluded nodes = " + p.getExcludedNodes());
+    LOG.info("source nodes = " + p.getSourceNodes());
+
     System.out.println("Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved");
     
     List<NameNodeConnector> connectors = Collections.emptyList();
     try {
       connectors = NameNodeConnector.newNameNodeConnectors(namenodes, 
-            Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, p.maxIdleIteration);
-    
+              Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf,
+              p.getMaxIdleIteration());
+
       boolean done = false;
       for(int iteration = 0; !done; iteration++) {
         done = true;
         Collections.shuffle(connectors);
         for(NameNodeConnector nnc : connectors) {
-          if (p.blockpools.size() == 0
-              || p.blockpools.contains(nnc.getBlockpoolID())) {
+          if (p.getBlockPools().size() == 0
+              || p.getBlockPools().contains(nnc.getBlockpoolID())) {
             final Balancer b = new Balancer(nnc, p, conf);
             final Result r = b.runOneIteration();
             r.print(iteration, System.out);
@@ -705,65 +709,6 @@ public class Balancer {
     return time+" "+unit;
   }
 
-  static class Parameters {
-    static final Parameters DEFAULT =
-        new Parameters(BalancingPolicy.Node.INSTANCE, 10.0,
-            NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-            Collections.<String> emptySet(), Collections.<String> emptySet(),
-            Collections.<String> emptySet(), Collections.<String> emptySet(),
-            false);
-
-    final BalancingPolicy policy;
-    final double threshold;
-    final int maxIdleIteration;
-    /** Exclude the nodes in this set. */
-    final Set<String> excludedNodes;
-    /** If empty, include any node; otherwise, include only these nodes. */
-    final Set<String> includedNodes;
-    /** If empty, any node can be a source;
-     *  otherwise, use only these nodes as source nodes.
-     */
-    final Set<String> sourceNodes;
-    /**
-     * A set of block pools to run the balancer on.
-     */
-    final Set<String> blockpools;
-    /**
-     * Whether to run the balancer during upgrade.
-     */
-    final boolean runDuringUpgrade;
-
-    Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
-        Set<String> excludedNodes, Set<String> includedNodes,
-        Set<String> sourceNodes, Set<String> blockpools,
-        boolean runDuringUpgrade) {
-      this.policy = policy;
-      this.threshold = threshold;
-      this.maxIdleIteration = maxIdleIteration;
-      this.excludedNodes = excludedNodes;
-      this.includedNodes = includedNodes;
-      this.sourceNodes = sourceNodes;
-      this.blockpools = blockpools;
-      this.runDuringUpgrade = runDuringUpgrade;
-    }
-
-    @Override
-    public String toString() {
-      return String.format("%s.%s [%s,"
-              + " threshold = %s,"
-              + " max idle iteration = %s,"
-              + " #excluded nodes = %s,"
-              + " #included nodes = %s,"
-              + " #source nodes = %s,"
-              + " #blockpools = %s,"
-              + " run during upgrade = %s]",
-          Balancer.class.getSimpleName(), getClass().getSimpleName(), policy,
-          threshold, maxIdleIteration, excludedNodes.size(),
-          includedNodes.size(), sourceNodes.size(), blockpools.size(),
-          runDuringUpgrade);
-    }
-  }
-
   static class Cli extends Configured implements Tool {
     /**
      * Parse arguments and then run Balancer.
@@ -796,15 +741,10 @@ public class Balancer {
     }
 
     /** parse command line arguments */
-    static Parameters parse(String[] args) {
-      BalancingPolicy policy = Parameters.DEFAULT.policy;
-      double threshold = Parameters.DEFAULT.threshold;
-      int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
-      Set<String> excludedNodes = Parameters.DEFAULT.excludedNodes;
-      Set<String> includedNodes = Parameters.DEFAULT.includedNodes;
-      Set<String> sourceNodes = Parameters.DEFAULT.sourceNodes;
-      Set<String> blockpools = Parameters.DEFAULT.blockpools;
-      boolean runDuringUpgrade = Parameters.DEFAULT.runDuringUpgrade;
+    static BalancerParameters parse(String[] args) {
+      Set<String> excludedNodes = null;
+      Set<String> includedNodes = null;
+      BalancerParameters.Builder b = new BalancerParameters.Builder();
 
       if (args != null) {
         try {
@@ -813,12 +753,13 @@ public class Balancer {
               checkArgument(++i < args.length,
                 "Threshold value is missing: args = " + Arrays.toString(args));
               try {
-                threshold = Double.parseDouble(args[i]);
+                double threshold = Double.parseDouble(args[i]);
                 if (threshold < 1 || threshold > 100) {
                   throw new IllegalArgumentException(
                       "Number out of range: threshold = " + threshold);
                 }
                 LOG.info( "Using a threshold of " + threshold );
+                b.setThreshold(threshold);
               } catch(IllegalArgumentException e) {
                 System.err.println(
                     "Expecting a number in the range of [1.0, 100.0]: "
@@ -829,7 +770,7 @@ public class Balancer {
               checkArgument(++i < args.length,
                 "Policy value is missing: args = " + Arrays.toString(args));
               try {
-                policy = BalancingPolicy.parse(args[i]);
+                b.setBalancingPolicy(BalancingPolicy.parse(args[i]));
               } catch(IllegalArgumentException e) {
                 System.err.println("Illegal policy name: " + args[i]);
                 throw e;
@@ -837,28 +778,33 @@ public class Balancer {
             } else if ("-exclude".equalsIgnoreCase(args[i])) {
               excludedNodes = new HashSet<>();
               i = processHostList(args, i, "exclude", excludedNodes);
+              b.setExcludedNodes(excludedNodes);
             } else if ("-include".equalsIgnoreCase(args[i])) {
               includedNodes = new HashSet<>();
               i = processHostList(args, i, "include", includedNodes);
+              b.setIncludedNodes(includedNodes);
             } else if ("-source".equalsIgnoreCase(args[i])) {
-              sourceNodes = new HashSet<>();
+              Set<String> sourceNodes = new HashSet<>();
               i = processHostList(args, i, "source", sourceNodes);
+              b.setSourceNodes(sourceNodes);
             } else if ("-blockpools".equalsIgnoreCase(args[i])) {
               checkArgument(
                   ++i < args.length,
                   "blockpools value is missing: args = "
                       + Arrays.toString(args));
-              blockpools = parseBlockPoolList(args[i]);
+              Set<String> blockpools = parseBlockPoolList(args[i]);
               LOG.info("Balancer will run on the following blockpools: "
                   + blockpools.toString());
+              b.setBlockpools(blockpools);
             } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
               checkArgument(++i < args.length,
                   "idleiterations value is missing: args = " + Arrays
                       .toString(args));
-              maxIdleIteration = Integer.parseInt(args[i]);
+              int maxIdleIteration = Integer.parseInt(args[i]);
               LOG.info("Using a idleiterations of " + maxIdleIteration);
+              b.setMaxIdleIteration(maxIdleIteration);
             } else if ("-runDuringUpgrade".equalsIgnoreCase(args[i])) {
-              runDuringUpgrade = true;
+              b.setRunDuringUpgrade(true);
               LOG.info("Will run the balancer even during an ongoing HDFS "
                   + "upgrade. Most users will not want to run the balancer "
                   + "during an upgrade since it will not affect used space "
@@ -868,16 +814,14 @@ public class Balancer {
                   + Arrays.toString(args));
             }
           }
-          checkArgument(excludedNodes.isEmpty() || includedNodes.isEmpty(),
+          checkArgument(excludedNodes == null || includedNodes == null,
               "-exclude and -include options cannot be specified together.");
         } catch(RuntimeException e) {
           printUsage(System.err);
           throw e;
         }
       }
-      
-      return new Parameters(policy, threshold, maxIdleIteration, excludedNodes,
-          includedNodes, sourceNodes, blockpools, runDuringUpgrade);
+      return b.build();
     }
 
     private static int processHostList(String[] args, int i, String type,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java
new file mode 100644
index 0000000..5d5e9b1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java
@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+final class BalancerParameters {
+  private final BalancingPolicy policy;
+  private final double threshold;
+  private final int maxIdleIteration;
+  /** Exclude the nodes in this set. */
+  private final Set<String> excludedNodes;
+  /** If empty, include any node; otherwise, include only these nodes. */
+  private final Set<String> includedNodes;
+  /**
+   * If empty, any node can be a source; otherwise, use only these nodes as
+   * source nodes.
+   */
+  private final Set<String> sourceNodes;
+  /**
+   * A set of block pools to run the balancer on.
+   */
+  private final Set<String> blockpools;
+  /**
+   * Whether to run the balancer during upgrade.
+   */
+  private final boolean runDuringUpgrade;
+
+  static final BalancerParameters DEFAULT = new BalancerParameters();
+
+  private BalancerParameters() {
+    this(new Builder());
+  }
+
+  private BalancerParameters(Builder builder) {
+    this.policy = builder.policy;
+    this.threshold = builder.threshold;
+    this.maxIdleIteration = builder.maxIdleIteration;
+    this.excludedNodes = builder.excludedNodes;
+    this.includedNodes = builder.includedNodes;
+    this.sourceNodes = builder.sourceNodes;
+    this.blockpools = builder.blockpools;
+    this.runDuringUpgrade = builder.runDuringUpgrade;
+  }
+
+  BalancingPolicy getBalancingPolicy() {
+    return this.policy;
+  }
+
+  double getThreshold() {
+    return this.threshold;
+  }
+
+  int getMaxIdleIteration() {
+    return this.maxIdleIteration;
+  }
+
+  Set<String> getExcludedNodes() {
+    return this.excludedNodes;
+  }
+
+  Set<String> getIncludedNodes() {
+    return this.includedNodes;
+  }
+
+  Set<String> getSourceNodes() {
+    return this.sourceNodes;
+  }
+
+  Set<String> getBlockPools() {
+    return this.blockpools;
+  }
+
+  boolean getRunDuringUpgrade() {
+    return this.runDuringUpgrade;
+  }
+
+  @Override
+  public String toString() {
+    return String.format("%s.%s [%s," + " threshold = %s,"
+        + " max idle iteration = %s," + " #excluded nodes = %s,"
+        + " #included nodes = %s," + " #source nodes = %s,"
+        + " #blockpools = %s," + " run during upgrade = %s]",
+        Balancer.class.getSimpleName(), getClass().getSimpleName(), policy,
+        threshold, maxIdleIteration, excludedNodes.size(),
+        includedNodes.size(), sourceNodes.size(), blockpools.size(),
+        runDuringUpgrade);
+  }
+
+  static class Builder {
+    // Defaults
+    private BalancingPolicy policy = BalancingPolicy.Node.INSTANCE;
+    private double threshold = 10.0;
+    private int maxIdleIteration =
+        NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS;
+    private Set<String> excludedNodes = Collections.<String> emptySet();
+    private Set<String> includedNodes = Collections.<String> emptySet();
+    private Set<String> sourceNodes = Collections.<String> emptySet();
+    private Set<String> blockpools = Collections.<String> emptySet();
+    private boolean runDuringUpgrade = false;
+
+    Builder() {
+    }
+
+    Builder setBalancingPolicy(BalancingPolicy p) {
+      this.policy = p;
+      return this;
+    }
+
+    Builder setThreshold(double t) {
+      this.threshold = t;
+      return this;
+    }
+
+    Builder setMaxIdleIteration(int m) {
+      this.maxIdleIteration = m;
+      return this;
+    }
+
+    Builder setExcludedNodes(Set<String> nodes) {
+      this.excludedNodes = nodes;
+      return this;
+    }
+
+    Builder setIncludedNodes(Set<String> nodes) {
+      this.includedNodes = nodes;
+      return this;
+    }
+
+    Builder setSourceNodes(Set<String> nodes) {
+      this.sourceNodes = nodes;
+      return this;
+    }
+
+    Builder setBlockpools(Set<String> pools) {
+      this.blockpools = pools;
+      return this;
+    }
+
+    Builder setRunDuringUpgrade(boolean run) {
+      this.runDuringUpgrade = run;
+      return this;
+    }
+
+    BalancerParameters build() {
+      return new BalancerParameters(this);
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index b0223d2..a655d66 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -75,8 +75,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
-import org.apache.hadoop.hdfs.server.balancer.Balancer.Parameters;
 import org.apache.hadoop.hdfs.server.balancer.Balancer.Result;
+import org.apache.hadoop.hdfs.server.balancer.BalancerParameters;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
@@ -319,7 +319,7 @@ public class TestBalancer {
    * @throws TimeoutException
    */
   static void waitForBalancer(long totalUsedSpace, long totalCapacity,
-      ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p)
+      ClientProtocol client, MiniDFSCluster cluster, BalancerParameters p)
   throws IOException, TimeoutException {
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, 0);
   }
@@ -377,7 +377,7 @@ public class TestBalancer {
 
       // start rebalancing
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-      int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
+      int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
       assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
       
     } finally {
@@ -393,16 +393,16 @@ public class TestBalancer {
    * @throws TimeoutException
    */
   static void waitForBalancer(long totalUsedSpace, long totalCapacity,
-      ClientProtocol client, MiniDFSCluster cluster, Balancer.Parameters p,
+      ClientProtocol client, MiniDFSCluster cluster, BalancerParameters p,
       int expectedExcludedNodes) throws IOException, TimeoutException {
     long timeout = TIMEOUT;
     long failtime = (timeout <= 0L) ? Long.MAX_VALUE
         : Time.monotonicNow() + timeout;
-    if (!p.includedNodes.isEmpty()) {
-      totalCapacity = p.includedNodes.size() * CAPACITY;
+    if (!p.getIncludedNodes().isEmpty()) {
+      totalCapacity = p.getIncludedNodes().size() * CAPACITY;
     }
-    if (!p.excludedNodes.isEmpty()) {
-        totalCapacity -= p.excludedNodes.size() * CAPACITY;
+    if (!p.getExcludedNodes().isEmpty()) {
+      totalCapacity -= p.getExcludedNodes().size() * CAPACITY;
     }
     final double avgUtilization = ((double)totalUsedSpace) / totalCapacity;
     boolean balanced;
@@ -415,12 +415,12 @@ public class TestBalancer {
       for (DatanodeInfo datanode : datanodeReport) {
         double nodeUtilization = ((double)datanode.getDfsUsed())
             / datanode.getCapacity();
-        if (Dispatcher.Util.isExcluded(p.excludedNodes, datanode)) {
+        if (Dispatcher.Util.isExcluded(p.getExcludedNodes(), datanode)) {
           assertTrue(nodeUtilization == 0);
           actualExcludedNodeCount++;
           continue;
         }
-        if (!Dispatcher.Util.isIncluded(p.includedNodes, datanode)) {
+        if (!Dispatcher.Util.isIncluded(p.getIncludedNodes(), datanode)) {
           assertTrue(nodeUtilization == 0);
           actualExcludedNodeCount++;
           continue;
@@ -636,16 +636,14 @@ public class TestBalancer {
         }
       }
       // run balancer and validate results
-      Balancer.Parameters p = Balancer.Parameters.DEFAULT;
+      BalancerParameters.Builder pBuilder =
+          new BalancerParameters.Builder();
       if (nodes != null) {
-        p = new Balancer.Parameters(
-            Balancer.Parameters.DEFAULT.policy,
-            Balancer.Parameters.DEFAULT.threshold,
-            Balancer.Parameters.DEFAULT.maxIdleIteration,
-            nodes.getNodesToBeExcluded(), nodes.getNodesToBeIncluded(),
-            Balancer.Parameters.DEFAULT.sourceNodes,
-            Balancer.Parameters.DEFAULT.blockpools, false);
+        pBuilder.setExcludedNodes(nodes.getNodesToBeExcluded());
+        pBuilder.setIncludedNodes(nodes.getNodesToBeIncluded());
+        pBuilder.setRunDuringUpgrade(false);
       }
+      BalancerParameters p = pBuilder.build();
 
       int expectedExcludedNodes = 0;
       if (nodes != null) {
@@ -668,14 +666,15 @@ public class TestBalancer {
     }
   }
 
-  private void runBalancer(Configuration conf,
-      long totalUsedSpace, long totalCapacity) throws Exception {
-    runBalancer(conf, totalUsedSpace, totalCapacity, Balancer.Parameters.DEFAULT, 0);
+  private void runBalancer(Configuration conf, long totalUsedSpace,
+      long totalCapacity) throws Exception {
+    runBalancer(conf, totalUsedSpace, totalCapacity,
+        BalancerParameters.DEFAULT, 0);
   }
 
-  private void runBalancer(Configuration conf,
-     long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
-     int excludedNodes) throws Exception {
+  private void runBalancer(Configuration conf, long totalUsedSpace,
+      long totalCapacity, BalancerParameters p, int excludedNodes)
+      throws Exception {
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
 
     // start rebalancing
@@ -693,7 +692,8 @@ public class TestBalancer {
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
   }
 
-  private static int runBalancer(Collection<URI> namenodes, final Parameters p,
+  private static int runBalancer(Collection<URI> namenodes,
+      final BalancerParameters p,
       Configuration conf) throws IOException, InterruptedException {
     final long sleeptime =
         conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
@@ -710,8 +710,8 @@ public class TestBalancer {
     try {
       connectors = NameNodeConnector.newNameNodeConnectors(namenodes, 
           Balancer.class.getSimpleName(), Balancer.BALANCER_ID_PATH, conf,
-          Balancer.Parameters.DEFAULT.maxIdleIteration);
-    
+              BalancerParameters.DEFAULT.getMaxIdleIteration());
+
       boolean done = false;
       for(int iteration = 0; !done; iteration++) {
         done = true;
@@ -747,45 +747,45 @@ public class TestBalancer {
     return ExitStatus.SUCCESS.getExitCode();
   }
 
-  private void runBalancerCli(Configuration conf,
-      long totalUsedSpace, long totalCapacity,
-      Balancer.Parameters p, boolean useFile, int expectedExcludedNodes) throws Exception {
+  private void runBalancerCli(Configuration conf, long totalUsedSpace,
+      long totalCapacity, BalancerParameters p, boolean useFile,
+      int expectedExcludedNodes) throws Exception {
     waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
     List <String> args = new ArrayList<String>();
     args.add("-policy");
     args.add("datanode");
 
     File excludeHostsFile = null;
-    if (!p.excludedNodes.isEmpty()) {
+    if (!p.getExcludedNodes().isEmpty()) {
       args.add("-exclude");
       if (useFile) {
         excludeHostsFile = new File ("exclude-hosts-file");
         PrintWriter pw = new PrintWriter(excludeHostsFile);
-        for (String host: p.excludedNodes) {
+        for (String host : p.getExcludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
         args.add("exclude-hosts-file");
       } else {
-        args.add(StringUtils.join(p.excludedNodes, ','));
+        args.add(StringUtils.join(p.getExcludedNodes(), ','));
       }
     }
 
     File includeHostsFile = null;
-    if (!p.includedNodes.isEmpty()) {
+    if (!p.getIncludedNodes().isEmpty()) {
       args.add("-include");
       if (useFile) {
         includeHostsFile = new File ("include-hosts-file");
         PrintWriter pw = new PrintWriter(includeHostsFile);
-        for (String host: p.includedNodes){
+        for (String host : p.getIncludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
         args.add("include-hosts-file");
       } else {
-        args.add(StringUtils.join(p.includedNodes, ','));
+        args.add(StringUtils.join(p.getIncludedNodes(), ','));
       }
     }
 
@@ -879,14 +879,11 @@ public class TestBalancer {
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
       Set<String>  datanodes = new HashSet<String>();
       datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName());
-      Balancer.Parameters p = new Balancer.Parameters(
-          Balancer.Parameters.DEFAULT.policy,
-          Balancer.Parameters.DEFAULT.threshold,
-          Balancer.Parameters.DEFAULT.maxIdleIteration,
-          datanodes, Balancer.Parameters.DEFAULT.includedNodes,
-          Balancer.Parameters.DEFAULT.sourceNodes,
-          Balancer.Parameters.DEFAULT.blockpools, false);
-      final int r = Balancer.run(namenodes, p, conf);
+      BalancerParameters.Builder pBuilder =
+          new BalancerParameters.Builder();
+      pBuilder.setExcludedNodes(datanodes);
+      pBuilder.setRunDuringUpgrade(false);
+      final int r = Balancer.run(namenodes, pBuilder.build(), conf);
       assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
     } finally {
       cluster.shutdown();
@@ -1081,20 +1078,20 @@ public class TestBalancer {
   @Test
   public void testBalancerCliParseBlockpools() {
     String[] parameters = new String[] { "-blockpools", "bp-1,bp-2,bp-3" };
-    Balancer.Parameters p = Balancer.Cli.parse(parameters);
-    assertEquals(3, p.blockpools.size());
+    BalancerParameters p = Balancer.Cli.parse(parameters);
+    assertEquals(3, p.getBlockPools().size());
 
     parameters = new String[] { "-blockpools", "bp-1" };
     p = Balancer.Cli.parse(parameters);
-    assertEquals(1, p.blockpools.size());
+    assertEquals(1, p.getBlockPools().size());
 
     parameters = new String[] { "-blockpools", "bp-1,,bp-2" };
     p = Balancer.Cli.parse(parameters);
-    assertEquals(3, p.blockpools.size());
+    assertEquals(3, p.getBlockPools().size());
 
     parameters = new String[] { "-blockpools", "bp-1," };
     p = Balancer.Cli.parse(parameters);
-    assertEquals(1, p.blockpools.size());
+    assertEquals(1, p.getBlockPools().size());
   }
 
   /**
@@ -1123,7 +1120,8 @@ public class TestBalancer {
     excludeHosts.add( "datanodeZ");
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
         new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
-        excludeHosts, Parameters.DEFAULT.includedNodes), false, false);
+            excludeHosts, BalancerParameters.DEFAULT.getIncludedNodes()),
+        false, false);
   }
 
   /**
@@ -1151,9 +1149,11 @@ public class TestBalancer {
     Set<String> excludeHosts = new HashSet<String>();
     excludeHosts.add( "datanodeY");
     excludeHosts.add( "datanodeZ");
-    doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
-      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"}, excludeHosts,
-      Parameters.DEFAULT.includedNodes), true, false);
+    doTest(conf, new long[] { CAPACITY, CAPACITY },
+        new String[] { RACK0, RACK1 }, CAPACITY, RACK2, new HostNameBasedNodes(
+            new String[] { "datanodeX", "datanodeY", "datanodeZ" },
+            excludeHosts, BalancerParameters.DEFAULT.getIncludedNodes()), true,
+        false);
   }
 
   /**
@@ -1183,7 +1183,8 @@ public class TestBalancer {
     excludeHosts.add( "datanodeZ");
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
         new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
-        excludeHosts, Parameters.DEFAULT.includedNodes), true, true);
+            excludeHosts, BalancerParameters.DEFAULT.getIncludedNodes()), true,
+        true);
   }
 
   /**
@@ -1212,7 +1213,8 @@ public class TestBalancer {
     includeHosts.add( "datanodeY");
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
         new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
-        Parameters.DEFAULT.excludedNodes, includeHosts), false, false);
+            BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts),
+        false, false);
   }
 
   /**
@@ -1241,7 +1243,8 @@ public class TestBalancer {
     includeHosts.add( "datanodeY");
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
         new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
-        Parameters.DEFAULT.excludedNodes, includeHosts), true, false);
+            BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts), true,
+        false);
   }
 
   /**
@@ -1270,7 +1273,8 @@ public class TestBalancer {
     includeHosts.add( "datanodeY");
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
         new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
-        Parameters.DEFAULT.excludedNodes, includeHosts), true, true);
+            BalancerParameters.DEFAULT.getExcludedNodes(), includeHosts), true,
+        true);
   }
 
   /**
@@ -1343,7 +1347,7 @@ public class TestBalancer {
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
 
       // Run Balancer
-      final Balancer.Parameters p = Parameters.DEFAULT;
+      final BalancerParameters p = BalancerParameters.DEFAULT;
       final int r = Balancer.run(namenodes, p, conf);
 
       // Validate no RAM_DISK block should be moved
@@ -1395,7 +1399,7 @@ public class TestBalancer {
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
 
       // Run balancer
-      final Balancer.Parameters p = Parameters.DEFAULT;
+      final BalancerParameters p = BalancerParameters.DEFAULT;
 
       fs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
       fs.rollingUpgrade(HdfsConstants.RollingUpgradeAction.PREPARE);
@@ -1406,14 +1410,10 @@ public class TestBalancer {
           Balancer.run(namenodes, p, conf));
 
       // Should work with the -runDuringUpgrade flag.
-      final Balancer.Parameters runDuringUpgrade =
-          new Balancer.Parameters(Parameters.DEFAULT.policy,
-              Parameters.DEFAULT.threshold,
-              Parameters.DEFAULT.maxIdleIteration,
-              Parameters.DEFAULT.excludedNodes,
-              Parameters.DEFAULT.includedNodes,
-              Parameters.DEFAULT.sourceNodes,
-              Balancer.Parameters.DEFAULT.blockpools, true);
+      BalancerParameters.Builder b =
+          new BalancerParameters.Builder();
+      b.setRunDuringUpgrade(true);
+      final BalancerParameters runDuringUpgrade = b.build();
       assertEquals(ExitStatus.SUCCESS.getExitCode(),
           Balancer.run(namenodes, runDuringUpgrade, conf));
 
@@ -1480,7 +1480,7 @@ public class TestBalancer {
       // update space info
       cluster.triggerHeartbeats();
 
-      Balancer.Parameters p = Balancer.Parameters.DEFAULT;
+      BalancerParameters p = BalancerParameters.DEFAULT;
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
       final int r = Balancer.run(namenodes, p, conf);
 
@@ -1612,12 +1612,11 @@ public class TestBalancer {
       final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
 
       { // run Balancer with min-block-size=50
-        final Parameters p = new Parameters(
-            BalancingPolicy.Node.INSTANCE, 1,
-            NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-            Collections.<String> emptySet(), Collections.<String> emptySet(),
-            Collections.<String> emptySet(),
-            Balancer.Parameters.DEFAULT.blockpools, false);
+        BalancerParameters.Builder b =
+            new BalancerParameters.Builder();
+        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+        b.setThreshold(1);
+        final BalancerParameters p = b.build();
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1632,11 +1631,12 @@ public class TestBalancer {
         for(int i = capacities.length; i < datanodes.size(); i++) {
           sourceNodes.add(datanodes.get(i).getDisplayName());
         }
-        final Parameters p = new Parameters(
-          BalancingPolicy.Node.INSTANCE, 1,
-          NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-          Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
+        BalancerParameters.Builder b =
+            new BalancerParameters.Builder();
+        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+        b.setThreshold(1);
+        b.setSourceNodes(sourceNodes);
+        final BalancerParameters p = b.build();
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 50);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1647,11 +1647,12 @@ public class TestBalancer {
         final Set<String> sourceNodes = new HashSet<>();
         final List<DataNode> datanodes = cluster.getDataNodes();
         sourceNodes.add(datanodes.get(0).getDisplayName());
-        final Parameters p = new Parameters(
-          BalancingPolicy.Node.INSTANCE, 1,
-          NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-          Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
+        BalancerParameters.Builder b =
+            new BalancerParameters.Builder();
+        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+        b.setThreshold(1);
+        b.setSourceNodes(sourceNodes);
+        final BalancerParameters p = b.build();
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
         final int r = Balancer.run(namenodes, p, conf);
@@ -1664,11 +1665,12 @@ public class TestBalancer {
         for(int i = 0; i < capacities.length; i++) {
           sourceNodes.add(datanodes.get(i).getDisplayName());
         }
-        final Parameters p = new Parameters(
-          BalancingPolicy.Node.INSTANCE, 1,
-          NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-          Collections.<String> emptySet(), Collections.<String> emptySet(),
-          sourceNodes, Balancer.Parameters.DEFAULT.blockpools, false);
+        BalancerParameters.Builder b =
+            new BalancerParameters.Builder();
+        b.setBalancingPolicy(BalancingPolicy.Node.INSTANCE);
+        b.setThreshold(1);
+        b.setSourceNodes(sourceNodes);
+        final BalancerParameters p = b.build();
 
         conf.setLong(DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_KEY, 1);
         final int r = Balancer.run(namenodes, p, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index 7559de4..1693cf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -97,10 +97,10 @@ public class TestBalancerWithHANameNodes {
       Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
       assertEquals(1, namenodes.size());
       assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster)));
-      final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
+      final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
       assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
       TestBalancer.waitForBalancer(totalUsedSpace, totalCapacity, client,
-          cluster, Balancer.Parameters.DEFAULT);
+          cluster, BalancerParameters.DEFAULT);
     } finally {
       cluster.shutdown();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
index b07ad89..c5d16ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.balancer.BalancerParameters;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.log4j.Level;
@@ -84,10 +85,10 @@ public class TestBalancerWithMultipleNameNodes {
     final MiniDFSCluster cluster;
     final ClientProtocol[] clients;
     final short replication;
-    final Balancer.Parameters parameters;
+    final BalancerParameters parameters;
 
     Suite(MiniDFSCluster cluster, final int nNameNodes, final int nDataNodes,
-        Balancer.Parameters parameters, Configuration conf) throws IOException {
+        BalancerParameters parameters, Configuration conf) throws IOException {
       this.conf = conf;
       this.cluster = cluster;
       clients = new ClientProtocol[nNameNodes];
@@ -204,7 +205,7 @@ public class TestBalancerWithMultipleNameNodes {
       balanced = true;
       for(int d = 0; d < used.length; d++) {
         final double p = used[d]*100.0/cap[d];
-        balanced = p <= avg + s.parameters.threshold;
+        balanced = p <= avg + s.parameters.getThreshold();
         if (!balanced) {
           if (i % 100 == 0) {
             LOG.warn("datanodes " + d + " is not yet balanced: "
@@ -278,13 +279,14 @@ public class TestBalancerWithMultipleNameNodes {
     DatanodeStorageReport[]> getStorageReports(Suite s) throws IOException {
     Map<Integer, DatanodeStorageReport[]> reports =
         new HashMap<Integer, DatanodeStorageReport[]>();
-    if (s.parameters.blockpools.size() == 0) {
+    if (s.parameters.getBlockPools().size() == 0) {
       // the blockpools parameter was not set, so we don't need to track any
       // blockpools.
       return Collections.emptyMap();
     }
     for (int i = 0; i < s.clients.length; i++) {
-      if (s.parameters.blockpools.contains(s.cluster.getNamesystem(i)
+      if (s.parameters.getBlockPools().contains(
+          s.cluster.getNamesystem(i)
           .getBlockPoolId())) {
         // we want to ensure that blockpools not specified by the balancer
         // parameters were left alone. Therefore, if the pool was specified,
@@ -388,14 +390,10 @@ public class TestBalancerWithMultipleNameNodes {
         for (int i = 0; i < nNameNodesToBalance; i++) {
           blockpools.add(cluster.getNamesystem(i).getBlockPoolId());
         }
-        Balancer.Parameters params =
-            new Balancer.Parameters(Balancer.Parameters.DEFAULT.policy,
-                Balancer.Parameters.DEFAULT.threshold,
-                Balancer.Parameters.DEFAULT.maxIdleIteration,
-                Balancer.Parameters.DEFAULT.excludedNodes,
-                Balancer.Parameters.DEFAULT.includedNodes,
-                Balancer.Parameters.DEFAULT.sourceNodes, blockpools,
-                Balancer.Parameters.DEFAULT.runDuringUpgrade);
+        BalancerParameters.Builder b =
+            new BalancerParameters.Builder();
+        b.setBlockpools(blockpools);
+        BalancerParameters params = b.build();
         final Suite s =
             new Suite(cluster, nNameNodes, nDataNodes, params, conf);
         for(int n = 0; n < nNameNodes; n++) {
@@ -455,7 +453,7 @@ public class TestBalancerWithMultipleNameNodes {
       LOG.info("RUN_TEST 1");
       final Suite s =
           new Suite(cluster, nNameNodes, nDataNodes,
-              Balancer.Parameters.DEFAULT, conf);
+              BalancerParameters.DEFAULT, conf);
       long totalCapacity = TestBalancer.sum(capacities);
 
       LOG.info("RUN_TEST 2");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/083b44c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
index 7af3a0e..bfa2835 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
@@ -175,7 +175,7 @@ public class TestBalancerWithNodeGroup {
 
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
+    final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
     assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
 
     waitForHeartBeat(totalUsedSpace, totalCapacity);
@@ -189,7 +189,7 @@ public class TestBalancerWithNodeGroup {
 
     // start rebalancing
     Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
-    final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf);
+    final int r = Balancer.run(namenodes, BalancerParameters.DEFAULT, conf);
     Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() ||
         (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode()));
     waitForHeartBeat(totalUsedSpace, totalCapacity);


[30/50] [abbrv] hadoop git commit: syncing branch-2 and trunk CHANGES.TXT to be closer together

Posted by ec...@apache.org.
syncing branch-2 and trunk CHANGES.TXT to be closer together


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4992f075
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4992f075
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4992f075

Branch: refs/heads/HADOOP-11890
Commit: 4992f075c80eff3c93a895a4a2b772adf9a53542
Parents: 8c05441
Author: Steve Loughran <st...@apache.org>
Authored: Sat Sep 12 18:51:37 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sat Sep 12 18:51:37 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 21 ++++++++++++++++----
 1 file changed, 17 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4992f075/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index d695c53..fffd561 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1028,6 +1028,12 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
     name using reflection. (Lei (Eddy) Xu)
 
+    HADOOP-12173. NetworkTopology::add calls toString always.
+    (Inigo Goiri via cdouglas)
+
+    HADOOP-12185. NetworkTopology is not efficient adding/getting/removing
+    nodes. (Inigo Goiri via cdouglas)
+
     HADOOP-12117. Potential NPE from Configuration#loadProperty with
     allowNullValueProperties set. (zhihai xu via vinayakumarb)
 
@@ -1049,6 +1055,13 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
     (Brahma Reddy Battula via stevel)
 
+    HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)
+
+    HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with wrong
+    time unit parameter. (zxu via rkanter)
+
+  OPTIMIZATIONS
+
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
     over getMessage() in logging/span events. (Varun Saxena via stevel)
 
@@ -1102,9 +1115,6 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12388. Fix components' version information in the web page
     'About the Cluster'. (Jun Gong via zxu)
 
-    HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with
-    wrong time unit parameter. (zxu via rkanter)
-
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1161,6 +1171,9 @@ Release 2.7.1 - 2015-07-06
     HADOOP-11868. Invalid user logins trigger large backtraces in server log
     (Chang Li via jlowe)
 
+    HADOOP-11872. "hadoop dfs" command prints message about using "yarn jar" on
+    Windows(branch-2 only) (Varun Vasudev via cnauroth)
+
     HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
     HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
@@ -2043,7 +2056,7 @@ Release 2.6.0 - 2014-11-18
     HADOOP-10281. Create a scheduler, which assigns schedulables a priority
     level. (Chris Li via Arpit Agarwal)
 
-    HADOOP-8944. Shell command fs -count should include human readable option 
+    HADOOP-8944. Shell command fs -count should include human readable option
     (Jonathan Allen via aw)
 
     HADOOP-10231. Add some components in Native Libraries document (Akira 


[22/50] [abbrv] hadoop git commit: HDFS-9027. Refactor o.a.h.hdfs.DataStreamer#isLazyPersist() method. (Contributed by Mingliang Liu)

Posted by ec...@apache.org.
HDFS-9027. Refactor o.a.h.hdfs.DataStreamer#isLazyPersist() method. (Contributed by Mingliang Liu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15a557fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15a557fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15a557fc

Branch: refs/heads/HADOOP-11890
Commit: 15a557fcfec5eceedde9f1597385d5d3b01b2cd7
Parents: ca0827a
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Sep 11 10:11:52 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Sep 11 10:11:52 2015 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  8 +++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java    |  8 +------
 .../BlockStoragePolicySuite.java                | 13 +++++------
 .../hdfs/server/common/HdfsServerConstants.java |  6 -----
 .../hadoop/hdfs/TestBlockStoragePolicy.java     | 24 ++++++++------------
 6 files changed, 28 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index d5f4d53..0453d1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -34,12 +34,20 @@ public final class HdfsConstants {
    * URI Scheme for hdfs://namenode/ URIs.
    */
   public static final String HDFS_URI_SCHEME = "hdfs";
+
+  public static final byte MEMORY_STORAGE_POLICY_ID = 15;
   public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
+  public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
   public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
+  public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
+  public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final String HOT_STORAGE_POLICY_NAME = "HOT";
+  public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
+  public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
+
   // TODO should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b5be944..842627f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -906,6 +906,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-6763. Initialize file system-wide quota once on transitioning to active
     (kihwal)
 
+    HDFS-9027. Refactor o.a.h.hdfs.DataStreamer#isLazyPersist() method.
+    (Mingliang Liu via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index a975312..4a016bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -46,7 +46,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -69,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
@@ -155,9 +153,7 @@ class DataStreamer extends Daemon {
    * @return if this file is lazy persist
    */
   static boolean isLazyPersist(HdfsFileStatus stat) {
-    final BlockStoragePolicy p = blockStoragePolicySuite.getPolicy(
-        HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
-    return p != null && stat.getStoragePolicy() == p.getId();
+    return stat.getStoragePolicy() == HdfsConstants.MEMORY_STORAGE_POLICY_ID;
   }
 
   /**
@@ -379,8 +375,6 @@ class DataStreamer extends Daemon {
   private final LinkedList<DFSPacket> ackQueue = new LinkedList<>();
   private final AtomicReference<CachingStrategy> cachingStrategy;
   private final ByteArrayManager byteArrayManager;
-  private static final BlockStoragePolicySuite blockStoragePolicySuite =
-      BlockStoragePolicySuite.createDefaultSuite();
   //persist blocks on namenode
   private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
   private boolean failPacket = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index fa9d22c..c8923da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -48,37 +47,37 @@ public class BlockStoragePolicySuite {
   public static BlockStoragePolicySuite createDefaultSuite() {
     final BlockStoragePolicy[] policies =
         new BlockStoragePolicy[1 << ID_BIT_LENGTH];
-    final byte lazyPersistId = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID;
+    final byte lazyPersistId = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
     policies[lazyPersistId] = new BlockStoragePolicy(lazyPersistId,
         HdfsConstants.MEMORY_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.RAM_DISK, StorageType.DISK},
         new StorageType[]{StorageType.DISK},
         new StorageType[]{StorageType.DISK},
         true);    // Cannot be changed on regular files, but inherited.
-    final byte allssdId = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID;
+    final byte allssdId = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
     policies[allssdId] = new BlockStoragePolicy(allssdId,
         HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.SSD},
         new StorageType[]{StorageType.DISK},
         new StorageType[]{StorageType.DISK});
-    final byte onessdId = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID;
+    final byte onessdId = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
     policies[onessdId] = new BlockStoragePolicy(onessdId,
         HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.SSD, StorageType.DISK},
         new StorageType[]{StorageType.SSD, StorageType.DISK},
         new StorageType[]{StorageType.SSD, StorageType.DISK});
-    final byte hotId = HdfsServerConstants.HOT_STORAGE_POLICY_ID;
+    final byte hotId = HdfsConstants.HOT_STORAGE_POLICY_ID;
     policies[hotId] = new BlockStoragePolicy(hotId,
         HdfsConstants.HOT_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
         new StorageType[]{StorageType.ARCHIVE});
-    final byte warmId = HdfsServerConstants.WARM_STORAGE_POLICY_ID;
+    final byte warmId = HdfsConstants.WARM_STORAGE_POLICY_ID;
     policies[warmId] = new BlockStoragePolicy(warmId,
         HdfsConstants.WARM_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
         new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
-    final byte coldId = HdfsServerConstants.COLD_STORAGE_POLICY_ID;
+    final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
     policies[coldId] = new BlockStoragePolicy(coldId,
         HdfsConstants.COLD_STORAGE_POLICY_NAME,
         new StorageType[]{StorageType.ARCHIVE}, StorageType.EMPTY_ARRAY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index 0667bdb..bd9afbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -99,12 +99,6 @@ public interface HdfsServerConstants {
   };
   byte[] DOT_SNAPSHOT_DIR_BYTES
               = DFSUtil.string2Bytes(HdfsConstants.DOT_SNAPSHOT_DIR);
-  byte MEMORY_STORAGE_POLICY_ID = 15;
-  byte ALLSSD_STORAGE_POLICY_ID = 12;
-  byte ONESSD_STORAGE_POLICY_ID = 10;
-  byte HOT_STORAGE_POLICY_ID = 7;
-  byte WARM_STORAGE_POLICY_ID = 5;
-  byte COLD_STORAGE_POLICY_ID = 2;
 
   /**
    * Type of the node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/15a557fc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
index 689a1d1..cfc317f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
@@ -76,12 +76,12 @@ public class TestBlockStoragePolicy {
   static final long FILE_LEN = 1024;
   static final short REPLICATION = 3;
 
-  static final byte COLD = HdfsServerConstants.COLD_STORAGE_POLICY_ID;
-  static final byte WARM = HdfsServerConstants.WARM_STORAGE_POLICY_ID;
-  static final byte HOT  = HdfsServerConstants.HOT_STORAGE_POLICY_ID;
-  static final byte ONESSD  = HdfsServerConstants.ONESSD_STORAGE_POLICY_ID;
-  static final byte ALLSSD  = HdfsServerConstants.ALLSSD_STORAGE_POLICY_ID;
-  static final byte LAZY_PERSIST  = HdfsServerConstants.MEMORY_STORAGE_POLICY_ID;
+  static final byte COLD = HdfsConstants.COLD_STORAGE_POLICY_ID;
+  static final byte WARM = HdfsConstants.WARM_STORAGE_POLICY_ID;
+  static final byte HOT  = HdfsConstants.HOT_STORAGE_POLICY_ID;
+  static final byte ONESSD  = HdfsConstants.ONESSD_STORAGE_POLICY_ID;
+  static final byte ALLSSD  = HdfsConstants.ALLSSD_STORAGE_POLICY_ID;
+  static final byte LAZY_PERSIST  = HdfsConstants.MEMORY_STORAGE_POLICY_ID;
 
   @Test (timeout=300000)
   public void testConfigKeyEnabled() throws IOException {
@@ -1317,19 +1317,15 @@ public class TestBlockStoragePolicy {
       HdfsFileStatus status = fs.getClient().getFileInfo(file);
       // 5. get file policy, it should be parent policy.
       Assert
-          .assertTrue(
-              "File storage policy should be HOT",
-              status.getStoragePolicy()
-              == HdfsServerConstants.HOT_STORAGE_POLICY_ID);
+          .assertTrue("File storage policy should be HOT",
+              status.getStoragePolicy() == HOT);
       // 6. restart NameNode for reloading edits logs.
       cluster.restartNameNode(true);
       // 7. get file policy, it should be parent policy.
       status = fs.getClient().getFileInfo(file);
       Assert
-          .assertTrue(
-              "File storage policy should be HOT",
-              status.getStoragePolicy()
-              == HdfsServerConstants.HOT_STORAGE_POLICY_ID);
+          .assertTrue("File storage policy should be HOT",
+              status.getStoragePolicy() == HOT);
 
     } finally {
       cluster.shutdown();


[32/50] [abbrv] hadoop git commit: HDFS-9041. Move entries in META-INF/services/o.a.h.fs.FileSystem to hdfs-client. Contributed by Mingliang Liu.

Posted by ec...@apache.org.
HDFS-9041. Move entries in META-INF/services/o.a.h.fs.FileSystem to hdfs-client. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0c7d3f48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0c7d3f48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0c7d3f48

Branch: refs/heads/HADOOP-11890
Commit: 0c7d3f480548745e9e9ccad1d318371c020c3003
Parents: 3f685cd
Author: Haohui Mai <wh...@apache.org>
Authored: Sat Sep 12 10:58:58 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Sat Sep 12 11:02:51 2015 -0700

----------------------------------------------------------------------
 .../services/org.apache.hadoop.fs.FileSystem       | 17 +++++++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt        |  3 +++
 .../services/org.apache.hadoop.fs.FileSystem       |  2 --
 3 files changed, 20 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c7d3f48/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
new file mode 100644
index 0000000..3c832de
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdfs.web.WebHdfsFileSystem
+org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c7d3f48/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0c891ab..b1ba39b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1319,6 +1319,9 @@ Release 2.8.0 - UNRELEASED
     node is selected eventhough fallbackToLocalRack is true.
     (J.Andreina via vinayakumarb)
 
+    HDFS-9041. Move entries in META-INF/services/o.a.h.fs.FileSystem to
+    hdfs-client. (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0c7d3f48/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
index abe2bfc..120ff94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
@@ -14,5 +14,3 @@
 # limitations under the License.
 
 org.apache.hadoop.hdfs.DistributedFileSystem
-org.apache.hadoop.hdfs.web.WebHdfsFileSystem
-org.apache.hadoop.hdfs.web.SWebHdfsFileSystem


[21/50] [abbrv] hadoop git commit: HADOOP-12324. Better exception reporting in SaslPlainServer. (Mike Yoder via stevel)

Posted by ec...@apache.org.
HADOOP-12324. Better exception reporting in SaslPlainServer.   (Mike Yoder via stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca0827a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca0827a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca0827a8

Branch: refs/heads/HADOOP-11890
Commit: ca0827a86235dbc4d7e00cc8426ebff9fcc2d421
Parents: 486d5cb
Author: Steve Loughran <st...@apache.org>
Authored: Fri Sep 11 15:55:14 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Sep 11 15:58:53 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/security/SaslPlainServer.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca0827a8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c04bfd0..6ea2484 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -765,6 +765,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12384. Add "-direct" flag option for fs copy so that user can choose
     not to create "._COPYING_" file (J.Andreina via vinayakumarb)
 
+    HADOOP-12324. Better exception reporting in SaslPlainServer.
+    (Mike Yoder via stevel)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca0827a8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
index 7c74f4a..270b579 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
@@ -105,7 +105,7 @@ public class SaslPlainServer implements SaslServer {
         authz = ac.getAuthorizedID();
       }
     } catch (Exception e) {
-      throw new SaslException("PLAIN auth failed: " + e.getMessage(), e);
+      throw new SaslException("PLAIN auth failed: " + e.toString(), e);
     } finally {
       completed = true;
     }


[43/50] [abbrv] hadoop git commit: YARN-3635. Refactored current queue mapping implementation in CapacityScheduler to use a generic PlacementManager framework. Contributed by Wangda Tan

Posted by ec...@apache.org.
YARN-3635. Refactored current queue mapping implementation in CapacityScheduler to use a generic PlacementManager framework. Contributed by Wangda Tan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5468baa8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5468baa8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5468baa8

Branch: refs/heads/HADOOP-11890
Commit: 5468baa80aa2a3e2a02e9a902deebafd734daf23
Parents: d777757
Author: Jian He <ji...@apache.org>
Authored: Tue Sep 15 15:39:20 2015 +0800
Committer: Jian He <ji...@apache.org>
Committed: Tue Sep 15 15:39:20 2015 +0800

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../resourcemanager/RMActiveServiceContext.java |  16 +-
 .../server/resourcemanager/RMAppManager.java    |   9 +
 .../yarn/server/resourcemanager/RMContext.java  |   5 +
 .../server/resourcemanager/RMContextImpl.java   |  12 +-
 .../placement/PlacementManager.java             |  95 +++++++++
 .../placement/PlacementRule.java                |  55 +++++
 .../UserGroupMappingPlacementRule.java          | 164 +++++++++++++++
 .../scheduler/capacity/CapacityScheduler.java   | 126 ++++--------
 .../CapacitySchedulerConfiguration.java         |  32 +--
 .../server/resourcemanager/TestAppManager.java  |  54 ++++-
 .../TestUserGroupMappingPlacementRule.java      |  89 ++++++++
 .../scheduler/capacity/TestQueueMappings.java   | 203 +++++--------------
 13 files changed, 584 insertions(+), 279 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 766d4ef..cff5205 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -455,6 +455,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3983. Refactored CapacityScheduleri#FiCaSchedulerApp to easier extend
     container allocation logic. (Wangda Tan via jianhe)
 
+    YARN-3635. Refactored current queue mapping implementation in CapacityScheduler
+    to use a generic PlacementManager framework. (Wangda Tan via jianhe)
+
   BUG FIXES
 
     YARN-3197. Confusing log generated by CapacityScheduler. (Varun Saxena 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 1abb14e..c71323f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
@@ -99,9 +100,10 @@ public class RMActiveServiceContext {
   private long schedulerRecoveryWaitTime = 0;
   private boolean printLog = true;
   private boolean isSchedulerReady = false;
+  private PlacementManager queuePlacementManager = null;
 
   public RMActiveServiceContext() {
-
+    queuePlacementManager = new PlacementManager();
   }
 
   @Private
@@ -424,4 +426,16 @@ public class RMActiveServiceContext {
   public ConcurrentMap<ApplicationId, ByteBuffer> getSystemCredentialsForApps() {
     return systemCredentials;
   }
+  
+  @Private
+  @Unstable
+  public PlacementManager getQueuePlacementManager() {
+    return queuePlacementManager;
+  }
+  
+  @Private
+  @Unstable
+  public void setQueuePlacementManager(PlacementManager placementMgr) {
+    this.queuePlacementManager = placementMgr;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index 6fd1838..703ec1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -326,6 +326,15 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
   private RMAppImpl createAndPopulateNewRMApp(
       ApplicationSubmissionContext submissionContext, long submitTime,
       String user, boolean isRecovery) throws YarnException {
+    // Do queue mapping
+    if (!isRecovery) {
+      if (rmContext.getQueuePlacementManager() != null) {
+        // We only do queue mapping when it's a new application
+        rmContext.getQueuePlacementManager().placeApplication(
+            submissionContext, user);
+      }
+    }
+    
     ApplicationId applicationId = submissionContext.getApplicationId();
     ResourceRequest amReq =
         validateAndCreateResourceRequest(submissionContext, isRecovery);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index bc50268..b64c834 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -124,4 +125,8 @@ public interface RMContext {
   boolean isSchedulerReadyForAllocatingContainers();
   
   Configuration getYarnConfiguration();
+  
+  PlacementManager getQueuePlacementManager();
+  
+  void setQueuePlacementManager(PlacementManager placementMgr);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
index d6d573d..840cea7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
 import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
@@ -76,7 +77,6 @@ public class RMContextImpl implements RMContext {
    * individual fields.
    */
   public RMContextImpl() {
-
   }
 
   @VisibleForTesting
@@ -438,4 +438,14 @@ public class RMContextImpl implements RMContext {
   public void setYarnConfiguration(Configuration yarnConfiguration) {
     this.yarnConfiguration=yarnConfiguration;
   }
+
+  @Override
+  public PlacementManager getQueuePlacementManager() {
+    return this.activeServiceContext.getQueuePlacementManager();
+  }
+  
+  @Override
+  public void setQueuePlacementManager(PlacementManager placementMgr) {
+    this.activeServiceContext.setQueuePlacementManager(placementMgr);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
new file mode 100644
index 0000000..43a4deb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementManager.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.placement;
+
+import java.util.List;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class PlacementManager {  
+  private static final Log LOG = LogFactory.getLog(PlacementManager.class);
+
+  List<PlacementRule> rules;
+  ReadLock readLock;
+  WriteLock writeLock;
+
+  public PlacementManager() {
+    ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
+    readLock = lock.readLock();
+    writeLock = lock.writeLock();
+  }
+
+  public void updateRules(List<PlacementRule> rules) {
+    try {
+      writeLock.lock();
+      this.rules = rules;
+    } finally {
+      writeLock.unlock();
+    }
+  }
+
+  public void placeApplication(ApplicationSubmissionContext asc, String user)
+      throws YarnException {
+    try {
+      readLock.lock();
+      if (null == rules || rules.isEmpty()) {
+        return;
+      }
+      
+      String newQueueName = null;
+      for (PlacementRule rule : rules) {
+        newQueueName = rule.getQueueForApp(asc, user);
+        if (newQueueName != null) {
+          break;
+        }
+      }
+      
+      // Failed to get where to place application
+      if (null == newQueueName && null == asc.getQueue()) {
+        String msg = "Failed to get where to place application="
+            + asc.getApplicationId();
+        LOG.error(msg);
+        throw new YarnException(msg);
+      }
+      
+      // Set it to ApplicationSubmissionContext
+      if (!StringUtils.equals(asc.getQueue(), newQueueName)) {
+        LOG.info("Placed application=" + asc.getApplicationId() + " to queue="
+            + newQueueName + ", original queue=" + asc.getQueue());
+        asc.setQueue(newQueueName);
+      }
+    } finally {
+      readLock.unlock();
+    }
+  }
+  
+  @VisibleForTesting
+  public List<PlacementRule> getPlacementRules() {
+    return rules;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
new file mode 100644
index 0000000..47dc48a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.placement;
+
+import java.util.Map;
+
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+
+public abstract class PlacementRule {
+  public String getName() {
+    return this.getClass().getName();
+  }
+
+  public void initialize(Map<String, String> parameters, RMContext rmContext)
+      throws YarnException {
+  }
+
+  /**
+   * Get queue for a given application
+   * 
+   * @param asc application submission context
+   * @param user userName
+   * 
+   * @throws YarnException
+   *           if any error happens
+   * 
+   * @return <p>
+   *         non-null value means it is determined
+   *         </p>
+   *         <p>
+   *         null value means it is undetermined, so next {@link PlacementRule}
+   *         in the {@link PlacementManager} will take care
+   *         </p>
+   */
+  public abstract String getQueueForApp(ApplicationSubmissionContext asc,
+      String user) throws YarnException;
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
new file mode 100644
index 0000000..d617d16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/UserGroupMappingPlacementRule.java
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.placement;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping.MappingType;
+
+import com.google.common.annotations.VisibleForTesting;
+
+public class UserGroupMappingPlacementRule extends PlacementRule {
+  private static final Log LOG = LogFactory
+      .getLog(UserGroupMappingPlacementRule.class);
+
+  public static final String CURRENT_USER_MAPPING = "%user";
+
+  public static final String PRIMARY_GROUP_MAPPING = "%primary_group";
+
+  private boolean overrideWithQueueMappings = false;
+  private List<QueueMapping> mappings = null;
+  private Groups groups;
+
+  @Private
+  public static class QueueMapping {
+
+    public enum MappingType {
+
+      USER("u"), GROUP("g");
+      private final String type;
+
+      private MappingType(String type) {
+        this.type = type;
+      }
+
+      public String toString() {
+        return type;
+      }
+
+    };
+
+    MappingType type;
+    String source;
+    String queue;
+
+    public QueueMapping(MappingType type, String source, String queue) {
+      this.type = type;
+      this.source = source;
+      this.queue = queue;
+    }
+    
+    public String getQueue() {
+      return queue;
+    }
+    
+    @Override
+    public int hashCode() {
+      return super.hashCode();
+    }
+    
+    @Override
+    public boolean equals(Object obj) {
+      if (obj instanceof QueueMapping) {
+        QueueMapping other = (QueueMapping) obj;
+        return (other.type.equals(type) && 
+            other.source.equals(source) && 
+            other.queue.equals(queue));
+      } else {
+        return false;
+      }
+    }
+  }
+
+  public UserGroupMappingPlacementRule(boolean overrideWithQueueMappings,
+      List<QueueMapping> newMappings, Groups groups) {
+    this.mappings = newMappings;
+    this.overrideWithQueueMappings = overrideWithQueueMappings;
+    this.groups = groups;
+  }
+
+  private String getMappedQueue(String user) throws IOException {
+    for (QueueMapping mapping : mappings) {
+      if (mapping.type == MappingType.USER) {
+        if (mapping.source.equals(CURRENT_USER_MAPPING)) {
+          if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
+            return user;
+          } else if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
+            return groups.getGroups(user).get(0);
+          } else {
+            return mapping.queue;
+          }
+        }
+        if (user.equals(mapping.source)) {
+          return mapping.queue;
+        }
+      }
+      if (mapping.type == MappingType.GROUP) {
+        for (String userGroups : groups.getGroups(user)) {
+          if (userGroups.equals(mapping.source)) {
+            return mapping.queue;
+          }
+        }
+      }
+    }
+    return null;
+  }
+
+  @Override
+  public String getQueueForApp(ApplicationSubmissionContext asc, String user)
+      throws YarnException {
+    String queueName = asc.getQueue();
+    ApplicationId applicationId = asc.getApplicationId();
+    if (mappings != null && mappings.size() > 0) {
+      try {
+        String mappedQueue = getMappedQueue(user);
+        if (mappedQueue != null) {
+          // We have a mapping, should we use it?
+          if (queueName.equals(YarnConfiguration.DEFAULT_QUEUE_NAME)
+              || overrideWithQueueMappings) {
+            LOG.info("Application " + applicationId + " user " + user
+                + " mapping [" + queueName + "] to [" + mappedQueue
+                + "] override " + overrideWithQueueMappings);
+            return mappedQueue;
+          }
+        }
+      } catch (IOException ioex) {
+        String message = "Failed to submit application " + applicationId +
+            " submitted by user " + user + " reason: " + ioex.getMessage();
+        throw new YarnException(message);
+      }
+    }
+    
+    return queueName;
+  }
+  
+  @VisibleForTesting
+  public List<QueueMapping> getQueueMappings() {
+    return mappings;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index dbaccaf..ad5c76c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -69,6 +69,9 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationConstants;
@@ -98,8 +101,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicat
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerDynamicEditException;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerHealth;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QueueMapping;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QueueMapping.MappingType;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.AssignmentInformation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.QueueEntitlement;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
@@ -228,16 +229,6 @@ public class CapacityScheduler extends
       CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_PREFIX
           + ".scheduling-interval-ms";
   private static final long DEFAULT_ASYNC_SCHEDULER_INTERVAL = 5;
-  
-  private boolean overrideWithQueueMappings = false;
-  private List<QueueMapping> mappings = null;
-  private Groups groups;
-
-  @VisibleForTesting
-  public synchronized String getMappedQueueForTest(String user)
-      throws IOException {
-    return getMappedQueue(user);
-  }
 
   public CapacityScheduler() {
     super(CapacityScheduler.class.getName());
@@ -447,29 +438,52 @@ public class CapacityScheduler extends
   }
   private static final QueueHook noop = new QueueHook();
 
-  private void initializeQueueMappings() throws IOException {
-    overrideWithQueueMappings = conf.getOverrideWithQueueMappings();
+  @VisibleForTesting
+  public synchronized UserGroupMappingPlacementRule
+      getUserGroupMappingPlacementRule() throws IOException {
+    boolean overrideWithQueueMappings = conf.getOverrideWithQueueMappings();
     LOG.info("Initialized queue mappings, override: "
         + overrideWithQueueMappings);
+
     // Get new user/group mappings
-    List<QueueMapping> newMappings = conf.getQueueMappings();
-    //check if mappings refer to valid queues
+    List<UserGroupMappingPlacementRule.QueueMapping> newMappings =
+        conf.getQueueMappings();
+    // check if mappings refer to valid queues
     for (QueueMapping mapping : newMappings) {
-      if (!mapping.queue.equals(CURRENT_USER_MAPPING) &&
-          !mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
-        CSQueue queue = queues.get(mapping.queue);
+      String mappingQueue = mapping.getQueue();
+      if (!mappingQueue
+          .equals(UserGroupMappingPlacementRule.CURRENT_USER_MAPPING)
+          && !mappingQueue
+              .equals(UserGroupMappingPlacementRule.PRIMARY_GROUP_MAPPING)) {
+        CSQueue queue = queues.get(mappingQueue);
         if (queue == null || !(queue instanceof LeafQueue)) {
-          throw new IOException(
-              "mapping contains invalid or non-leaf queue " + mapping.queue);
+          throw new IOException("mapping contains invalid or non-leaf queue "
+              + mappingQueue);
         }
       }
     }
-    //apply the new mappings since they are valid
-    mappings = newMappings;
+
     // initialize groups if mappings are present
-    if (mappings.size() > 0) {
-      groups = new Groups(conf);
+    if (newMappings.size() > 0) {
+      Groups groups = new Groups(conf);
+      return new UserGroupMappingPlacementRule(overrideWithQueueMappings,
+          newMappings, groups);
     }
+
+    return null;
+  }
+
+  private void updatePlacementRules() throws IOException {
+    List<PlacementRule> placementRules = new ArrayList<>();
+    
+    // Initialize UserGroupMappingPlacementRule
+    // TODO, need make this defineable by configuration.
+    UserGroupMappingPlacementRule ugRule = getUserGroupMappingPlacementRule();
+    if (null != ugRule) {
+      placementRules.add(ugRule);
+    }
+    
+    rmContext.getQueuePlacementManager().updateRules(placementRules);
   }
 
   @Lock(CapacityScheduler.class)
@@ -481,7 +495,7 @@ public class CapacityScheduler extends
             queues, queues, noop);
     labelManager.reinitializeQueueLabels(getQueueToLabels());
     LOG.info("Initialized root queue " + root);
-    initializeQueueMappings();
+    updatePlacementRules();
     setQueueAcls(authorizer, queues);
   }
 
@@ -502,7 +516,7 @@ public class CapacityScheduler extends
     
     // Re-configure queues
     root.reinitialize(newRoot, clusterResource);
-    initializeQueueMappings();
+    updatePlacementRules();
 
     // Re-calculate headroom for active applications
     root.updateClusterResource(clusterResource, new ResourceLimits(
@@ -647,66 +661,8 @@ public class CapacityScheduler extends
     return queues.get(queueName);
   }
 
-  private static final String CURRENT_USER_MAPPING = "%user";
-
-  private static final String PRIMARY_GROUP_MAPPING = "%primary_group";
-
-  private String getMappedQueue(String user) throws IOException {
-    for (QueueMapping mapping : mappings) {
-      if (mapping.type == MappingType.USER) {
-        if (mapping.source.equals(CURRENT_USER_MAPPING)) {
-          if (mapping.queue.equals(CURRENT_USER_MAPPING)) {
-            return user;
-          }
-          else if (mapping.queue.equals(PRIMARY_GROUP_MAPPING)) {
-            return groups.getGroups(user).get(0);
-          }
-          else {
-            return mapping.queue;
-          }
-        }
-        if (user.equals(mapping.source)) {
-          return mapping.queue;
-        }
-      }
-      if (mapping.type == MappingType.GROUP) {
-        for (String userGroups : groups.getGroups(user)) {
-          if (userGroups.equals(mapping.source)) {
-            return mapping.queue;
-          }
-        }
-      }
-    }
-    return null;
-  }
-
   private synchronized void addApplication(ApplicationId applicationId,
       String queueName, String user, boolean isAppRecovering, Priority priority) {
-
-    if (mappings != null && mappings.size() > 0) {
-      try {
-        String mappedQueue = getMappedQueue(user);
-        if (mappedQueue != null) {
-          // We have a mapping, should we use it?
-          if (queueName.equals(YarnConfiguration.DEFAULT_QUEUE_NAME)
-              || overrideWithQueueMappings) {
-            LOG.info("Application " + applicationId + " user " + user
-                + " mapping [" + queueName + "] to [" + mappedQueue
-                + "] override " + overrideWithQueueMappings);
-            queueName = mappedQueue;
-            RMApp rmApp = rmContext.getRMApps().get(applicationId);
-            rmApp.setQueue(queueName);
-          }
-        }
-      } catch (IOException ioex) {
-        String message = "Failed to submit application " + applicationId +
-            " submitted by user " + user + " reason: " + ioex.getMessage();
-        this.rmContext.getDispatcher().getEventHandler()
-            .handle(new RMAppRejectedEvent(applicationId, message));
-        return;
-      }
-    }
-
     // sanity checks.
     CSQueue queue = getQueue(queueName);
     if (queue == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index be5e6dd..b1461c1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 import org.apache.hadoop.yarn.security.AccessType;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy;
@@ -211,35 +212,6 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
 
   @Private
   public static final Integer DEFAULT_CONFIGURATION_APPLICATION_PRIORITY = 0;
-
-  @Private
-  public static class QueueMapping {
-
-    public enum MappingType {
-
-      USER("u"),
-      GROUP("g");
-      private final String type;
-      private MappingType(String type) {
-        this.type = type;
-      }
-
-      public String toString() {
-        return type;
-      }
-
-    };
-
-    MappingType type;
-    String source;
-    String queue;
-
-    public QueueMapping(MappingType type, String source, String queue) {
-      this.type = type;
-      this.source = source;
-      this.queue = queue;
-    }
-  }
   
   @Private
   public static final String AVERAGE_CAPACITY = "average-capacity";
@@ -747,7 +719,7 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur
    */
   public List<QueueMapping> getQueueMappings() {
     List<QueueMapping> mappings =
-        new ArrayList<CapacitySchedulerConfiguration.QueueMapping>();
+        new ArrayList<QueueMapping>();
     Collection<String> mappingsString =
         getTrimmedStringCollection(QUEUE_MAPPING);
     for (String mappingValue : mappingsString) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
index cbeae5b..c435692 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
@@ -19,16 +19,10 @@
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
-
-import static org.mockito.Matchers.isA;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.isA;
+import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.times;
@@ -40,8 +34,11 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.concurrent.ConcurrentMap;
 
-import org.junit.Assert;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.yarn.MockApps;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
@@ -57,11 +54,14 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter;
+import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
@@ -73,8 +73,11 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
 import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
@@ -658,6 +661,39 @@ public class TestAppManager{
     Assert.assertTrue(msg.contains("preemptedResources=<memory:1234\\, vCores:56>"));
     Assert.assertTrue(msg.contains("applicationType=MAPREDUCE"));
  }
+  
+  @Test
+  public void testRMAppSubmitWithQueueChanged() throws Exception {
+    // Setup a PlacementManager returns a new queue
+    PlacementManager placementMgr = mock(PlacementManager.class);
+    doAnswer(new Answer<Void>() {
+
+      @Override
+      public Void answer(InvocationOnMock invocation) throws Throwable {
+        ApplicationSubmissionContext ctx =
+            (ApplicationSubmissionContext) invocation.getArguments()[0];
+        ctx.setQueue("newQueue");
+        return null;
+      }
+      
+    }).when(placementMgr).placeApplication(any(ApplicationSubmissionContext.class),
+            any(String.class));
+    rmContext.setQueuePlacementManager(placementMgr);
+    
+    asContext.setQueue("oldQueue");
+    appMonitor.submitApplication(asContext, "test");
+    RMApp app = rmContext.getRMApps().get(appId);
+    Assert.assertNotNull("app is null", app);
+    Assert.assertEquals("newQueue", asContext.getQueue());
+
+    // wait for event to be processed
+    int timeoutSecs = 0;
+    while ((getAppEventType() == RMAppEventType.KILL) && timeoutSecs++ < 20) {
+      Thread.sleep(1000);
+    }
+    Assert.assertEquals("app event type sent is wrong", RMAppEventType.START,
+        getAppEventType());
+  }
 
   private static ResourceScheduler mockResourceScheduler() {
     ResourceScheduler scheduler = mock(ResourceScheduler.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
new file mode 100644
index 0000000..61bc8d9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/placement/TestUserGroupMappingPlacementRule.java
@@ -0,0 +1,89 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.placement;
+
+import java.util.Arrays;
+
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.security.GroupMappingServiceProvider;
+import org.apache.hadoop.security.Groups;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping.MappingType;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SimpleGroupsMapping;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestUserGroupMappingPlacementRule {
+  YarnConfiguration conf = new YarnConfiguration();
+
+  @Before
+  public void setup() {
+    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        SimpleGroupsMapping.class, GroupMappingServiceProvider.class);
+  }
+
+  private void verifyQueueMapping(QueueMapping queueMapping, String inputUser,
+      String expectedQueue) throws YarnException {
+    verifyQueueMapping(queueMapping, inputUser,
+        YarnConfiguration.DEFAULT_QUEUE_NAME, expectedQueue, false);
+  }
+
+  private void verifyQueueMapping(QueueMapping queueMapping, String inputUser,
+      String inputQueue, String expectedQueue, boolean overwrite) throws YarnException {
+    Groups groups = new Groups(conf);
+    UserGroupMappingPlacementRule rule =
+        new UserGroupMappingPlacementRule(overwrite, Arrays.asList(queueMapping),
+            groups);
+    ApplicationSubmissionContext asc =
+        Records.newRecord(ApplicationSubmissionContext.class);
+    asc.setQueue(inputQueue);
+    String queue = rule.getQueueForApp(asc, inputUser);
+    Assert.assertEquals(expectedQueue, queue);
+  }
+
+  @Test
+  public void testMapping() throws YarnException {
+    // simple base case for mapping user to queue
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "a", "q1"), "a", "q1");
+    verifyQueueMapping(new QueueMapping(MappingType.GROUP, "agroup", "q1"),
+        "a", "q1");
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "%user", "q2"), "a",
+        "q2");
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "%user", "%user"),
+        "a", "a");
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "%user",
+        "%primary_group"), "a", "agroup");
+    verifyQueueMapping(new QueueMapping(MappingType.GROUP, "asubgroup1", "q1"),
+        "a", "q1");
+    
+    // specify overwritten, and see if user specified a queue, and it will be
+    // overridden
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "user", "q1"),
+        "user", "q2", "q1", true);
+    
+    // if overwritten not specified, it should be which user specified
+    verifyQueueMapping(new QueueMapping(MappingType.USER, "user", "q1"),
+        "user", "q2", "q2", false);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5468baa8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
index 005f40b..1df6b4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueMappings.java
@@ -18,22 +18,16 @@
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
 
 import java.io.IOException;
-import java.util.HashMap;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.security.GroupMappingServiceProvider;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
-import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SimpleGroupsMapping;
-import org.junit.After;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping;
+import org.apache.hadoop.yarn.server.resourcemanager.placement.UserGroupMappingPlacementRule.QueueMapping.MappingType;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Test;
 
 public class TestQueueMappings {
@@ -47,15 +41,23 @@ public class TestQueueMappings {
       CapacitySchedulerConfiguration.ROOT + "." + Q1;
   private final static String Q2_PATH =
       CapacitySchedulerConfiguration.ROOT + "." + Q2;
+  
+  private CapacityScheduler cs;
+  private YarnConfiguration conf;
+  
+  @Before
+  public void setup() {
+    CapacitySchedulerConfiguration csConf =
+        new CapacitySchedulerConfiguration();
+    setupQueueConfiguration(csConf);
+    conf = new YarnConfiguration(csConf);
+    cs = new CapacityScheduler();
 
-  private MockRM resourceManager;
-
-  @After
-  public void tearDown() throws Exception {
-    if (resourceManager != null) {
-      LOG.info("Stopping the resource manager");
-      resourceManager.stop();
-    }
+    RMContext rmContext = TestUtils.getMockRMContext();
+    cs.setConf(conf);
+    cs.setRMContext(rmContext);
+    cs.init(conf);
+    cs.start();
   }
 
   private void setupQueueConfiguration(CapacitySchedulerConfiguration conf) {
@@ -67,26 +69,32 @@ public class TestQueueMappings {
 
     LOG.info("Setup top-level queues q1 and q2");
   }
+  
+  @Test
+  public void testQueueMappingSpecifyingNotExistedQueue() {
+    // if the mapping specifies a queue that does not exist, reinitialize will
+    // be failed
+    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,
+        "u:user:non_existent_queue");
+    boolean fail = false;
+    try {
+      cs.reinitialize(conf, null);
+    } catch (IOException ioex) {
+      fail = true;
+    }
+    Assert.assertTrue("queue initialization failed for non-existent q", fail);
+  }
+  
+  @Test
+  public void testQueueMappingTrimSpaces() throws IOException {
+    // space trimming
+    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "    u : a : " + Q1);
+    cs.reinitialize(conf, null);
+    checkQMapping(new QueueMapping(MappingType.USER, "a", Q1));
+  }
 
   @Test (timeout = 60000)
-  public void testQueueMapping() throws Exception {
-    CapacitySchedulerConfiguration csConf =
-        new CapacitySchedulerConfiguration();
-    setupQueueConfiguration(csConf);
-    YarnConfiguration conf = new YarnConfiguration(csConf);
-    CapacityScheduler cs = new CapacityScheduler();
-
-    RMContext rmContext = TestUtils.getMockRMContext();
-    cs.setConf(conf);
-    cs.setRMContext(rmContext);
-    cs.init(conf);
-    cs.start();
-
-    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-        SimpleGroupsMapping.class, GroupMappingServiceProvider.class);
-    conf.set(CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,
-        "true");
-
+  public void testQueueMappingParsingInvalidCases() throws Exception {
     // configuration parsing tests - negative test cases
     checkInvalidQMapping(conf, cs, "x:a:b", "invalid specifier");
     checkInvalidQMapping(conf, cs, "u:a", "no queue specified");
@@ -97,119 +105,6 @@ public class TestQueueMappings {
     checkInvalidQMapping(conf, cs, "u::", "empty source and queue");
     checkInvalidQMapping(conf, cs, "u:", "missing source missing queue");
     checkInvalidQMapping(conf, cs, "u:a:", "empty source missing q");
-
-    // simple base case for mapping user to queue
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "u:a:" + Q1);
-    cs.reinitialize(conf, null);
-    checkQMapping("a", Q1, cs);
-
-    // group mapping test
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "g:agroup:" + Q1);
-    cs.reinitialize(conf, null);
-    checkQMapping("a", Q1, cs);
-
-    // %user tests
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "u:%user:" + Q2);
-    cs.reinitialize(conf, null);
-    checkQMapping("a", Q2, cs);
-
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "u:%user:%user");
-    cs.reinitialize(conf, null);
-    checkQMapping("a", "a", cs);
-
-    // %primary_group tests
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,
-        "u:%user:%primary_group");
-    cs.reinitialize(conf, null);
-    checkQMapping("a", "agroup", cs);
-
-    // non-primary group mapping
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,
-        "g:asubgroup1:" + Q1);
-    cs.reinitialize(conf, null);
-    checkQMapping("a", Q1, cs);
-
-    // space trimming
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "    u : a : " + Q1);
-    cs.reinitialize(conf, null);
-    checkQMapping("a", Q1, cs);
-
-    csConf = new CapacitySchedulerConfiguration();
-    csConf.set(YarnConfiguration.RM_SCHEDULER,
-        CapacityScheduler.class.getName());
-    setupQueueConfiguration(csConf);
-    conf = new YarnConfiguration(csConf);
-
-    resourceManager = new MockRM(csConf);
-    resourceManager.start();
-
-    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
-        SimpleGroupsMapping.class, GroupMappingServiceProvider.class);
-    conf.set(CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,
-        "true");
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "u:user:" + Q1);
-    resourceManager.getResourceScheduler().reinitialize(conf, null);
-
-    // ensure that if the user specifies a Q that is still overriden
-    checkAppQueue(resourceManager, "user", Q2, Q1);
-
-    // toggle admin override and retry
-    conf.setBoolean(
-        CapacitySchedulerConfiguration.ENABLE_QUEUE_MAPPING_OVERRIDE,
-        false);
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "u:user:" + Q1);
-    setupQueueConfiguration(csConf);
-    resourceManager.getResourceScheduler().reinitialize(conf, null);
-
-    checkAppQueue(resourceManager, "user", Q2, Q2);
-
-    // ensure that if a user does not specify a Q, the user mapping is used
-    checkAppQueue(resourceManager, "user", null, Q1);
-
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "g:usergroup:" + Q2);
-    setupQueueConfiguration(csConf);
-    resourceManager.getResourceScheduler().reinitialize(conf, null);
-
-    // ensure that if a user does not specify a Q, the group mapping is used
-    checkAppQueue(resourceManager, "user", null, Q2);
-
-    // if the mapping specifies a queue that does not exist, the job is rejected
-    conf.set(CapacitySchedulerConfiguration.QUEUE_MAPPING,
-        "u:user:non_existent_queue");
-    setupQueueConfiguration(csConf);
-
-    boolean fail = false;
-    try {
-      resourceManager.getResourceScheduler().reinitialize(conf, null);
-    }
-    catch (IOException ioex) {
-      fail = true;
-    }
-    Assert.assertTrue("queue initialization failed for non-existent q", fail);
-    resourceManager.stop();
-  }
-
-  private void checkAppQueue(MockRM resourceManager, String user,
-      String submissionQueue, String expected)
-      throws Exception {
-    RMApp app = resourceManager.submitApp(200, "name", user,
-        new HashMap<ApplicationAccessType, String>(), false, submissionQueue, -1,
-        null, "MAPREDUCE", false);
-    RMAppState expectedState = expected.isEmpty() ? RMAppState.FAILED
-        : RMAppState.ACCEPTED;
-    resourceManager.waitForState(app.getApplicationId(), expectedState);
-    // get scheduler app
-    CapacityScheduler cs = (CapacityScheduler)
-        resourceManager.getResourceScheduler();
-    SchedulerApplication schedulerApp =
-        cs.getSchedulerApplications().get(app.getApplicationId());
-    String queue = "";
-    if (schedulerApp != null) {
-      queue = schedulerApp.getQueue().getQueueName();
-    }
-    Assert.assertTrue("expected " + expected + " actual " + queue,
-        expected.equals(queue));
-    Assert.assertEquals(expected, app.getQueue());
   }
 
   private void checkInvalidQMapping(YarnConfiguration conf,
@@ -227,10 +122,12 @@ public class TestQueueMappings {
         fail);
   }
 
-  private void checkQMapping(String user, String expected, CapacityScheduler cs)
+  private void checkQMapping(QueueMapping expected)
           throws IOException {
-    String actual = cs.getMappedQueueForTest(user);
-    Assert.assertTrue("expected " + expected + " actual " + actual,
-        expected.equals(actual));
+    UserGroupMappingPlacementRule rule =
+        (UserGroupMappingPlacementRule) cs.getRMContext()
+            .getQueuePlacementManager().getPlacementRules().get(0);
+    QueueMapping queueMapping = rule.getQueueMappings().get(0);
+    Assert.assertEquals(queueMapping, expected);
   }
 }


[34/50] [abbrv] hadoop git commit: YARN-2005. Blacklisting support for scheduling AMs. (Anubhav Dhoot via kasha)

Posted by ec...@apache.org.
YARN-2005. Blacklisting support for scheduling AMs. (Anubhav Dhoot via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81df7b58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81df7b58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81df7b58

Branch: refs/heads/HADOOP-11890
Commit: 81df7b586a16f8226c7b01c139c1c70c060399c3
Parents: 7269906
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Sep 13 17:03:15 2015 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Sun Sep 13 17:03:15 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   2 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |   9 ++
 .../src/main/resources/yarn-default.xml         |  18 +++
 .../blacklist/BlacklistManager.java             |  47 ++++++
 .../blacklist/BlacklistUpdates.java             |  47 ++++++
 .../blacklist/DisabledBlacklistManager.java     |  45 ++++++
 .../blacklist/SimpleBlacklistManager.java       |  84 +++++++++++
 .../server/resourcemanager/rmapp/RMAppImpl.java |  32 +++-
 .../rmapp/attempt/RMAppAttempt.java             |   7 +
 .../rmapp/attempt/RMAppAttemptImpl.java         |  58 +++++++-
 .../scheduler/AppSchedulingInfo.java            |  78 +++++++---
 .../scheduler/SchedulerApplicationAttempt.java  |  37 +++--
 .../scheduler/capacity/CapacityScheduler.java   |   9 +-
 .../common/fica/FiCaSchedulerUtils.java         |  48 ------
 .../scheduler/fair/FairScheduler.java           |   9 +-
 .../scheduler/fifo/FifoScheduler.java           |  11 +-
 .../yarn/server/resourcemanager/MockRM.java     |  14 +-
 .../applicationsmanager/TestAMRestart.java      | 149 +++++++++++++++++--
 .../blacklist/TestBlacklistManager.java         | 118 +++++++++++++++
 .../TestRMAppLogAggregationStatus.java          |   2 +-
 .../rmapp/TestRMAppTransitions.java             |   2 +-
 .../capacity/TestCapacityScheduler.java         |  25 ++++
 .../scheduler/fair/FairSchedulerTestBase.java   |   2 +-
 23 files changed, 741 insertions(+), 112 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3246946..4a3a666 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -430,6 +430,8 @@ Release 2.8.0 - UNRELEASED
     YARN-4145. Make RMHATestBase abstract so its not run when running all
     tests under that namespace (adhoot via rkanter)
 
+    YARN-2005. Blacklisting support for scheduling AMs. (Anubhav Dhoot via kasha)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 9ec25ae..cc4f5de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -2025,6 +2025,15 @@ public class YarnConfiguration extends Configuration {
   public static final String NM_PROVIDER_CONFIGURED_NODE_LABELS =
       NM_NODE_LABELS_PROVIDER_PREFIX + "configured-node-labels";
 
+  public static final String AM_BLACKLISTING_ENABLED =
+      YARN_PREFIX + "am.blacklisting.enabled";
+  public static final boolean DEFAULT_AM_BLACKLISTING_ENABLED = true;
+
+  public static final String AM_BLACKLISTING_DISABLE_THRESHOLD =
+      YARN_PREFIX + "am.blacklisting.disable-failure-threshold";
+  public static final float DEFAULT_AM_BLACKLISTING_DISABLE_THRESHOLD = 0.8f;
+
+
   public YarnConfiguration() {
     super();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index b76defb..bcd64c3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2293,4 +2293,22 @@
     <value>org.apache.hadoop.yarn.server.nodemanager.amrmproxy.DefaultRequestInterceptor</value>
   </property>
 
+ <property>
+    <description>
+    Enable/disable blacklisting of hosts for AM based on AM failures on those
+    hosts.
+    </description>
+    <name>yarn.am.blacklisting.enabled</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <description>
+    Threshold of ratio number of NodeManager hosts that are allowed to be
+    blacklisted for AM. Beyond this ratio there is no blacklisting to avoid
+    danger of blacklisting the entire cluster.
+    </description>
+    <name>yarn.am.blacklisting.disable-failure-threshold</name>
+    <value>0.8f</value>
+  </property>
 </configuration>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistManager.java
new file mode 100644
index 0000000..f03b421
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistManager.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+
+/**
+ * Tracks blacklists based on failures reported on nodes.
+ */
+@Private
+public interface BlacklistManager {
+
+  /**
+   * Report failure of a container on node.
+   * @param node that has a container failure
+   */
+  void addNode(String node);
+
+  /**
+   * Get {@link BlacklistUpdates} that indicate which nodes should be
+   * added or to removed from the blacklist.
+   * @return {@link BlacklistUpdates}
+   */
+  BlacklistUpdates getBlacklistUpdates();
+
+  /**
+   * Refresh the number of nodemanager hosts available for scheduling.
+   * @param nodeHostCount is the number of node hosts.
+   */
+  void refreshNodeHostCount(int nodeHostCount);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistUpdates.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistUpdates.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistUpdates.java
new file mode 100644
index 0000000..c76dfb4
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/BlacklistUpdates.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+
+import java.util.List;
+
+/**
+ * Class to track blacklist additions and removals.
+ */
+@Private
+public class BlacklistUpdates {
+
+  private List<String> additions;
+  private List<String> removals;
+
+  public BlacklistUpdates(List<String> additions,
+      List<String> removals) {
+    this.additions = additions;
+    this.removals = removals;
+  }
+
+  public List<String> getAdditions() {
+    return additions;
+  }
+
+  public List<String> getRemovals() {
+    return removals;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/DisabledBlacklistManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/DisabledBlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/DisabledBlacklistManager.java
new file mode 100644
index 0000000..f155b45
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/DisabledBlacklistManager.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
+
+import java.util.ArrayList;
+
+/**
+ * A {@link BlacklistManager} that returns no blacklists.
+ */
+public class DisabledBlacklistManager implements BlacklistManager{
+
+  private static final ArrayList<String> EMPTY_LIST = new ArrayList<String>();
+  private BlacklistUpdates noBlacklist =
+      new BlacklistUpdates(EMPTY_LIST, EMPTY_LIST);
+
+  @Override
+  public void addNode(String node) {
+  }
+
+  @Override
+  public BlacklistUpdates getBlacklistUpdates() {
+    return noBlacklist;
+  }
+
+  @Override
+  public void refreshNodeHostCount(int nodeHostCount) {
+    // Do nothing
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
new file mode 100644
index 0000000..a544ab8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/SimpleBlacklistManager.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Maintains a list of failed nodes and returns that as long as number of
+ * blacklisted nodes is below a threshold percentage of total nodes. If more
+ * than threshold number of nodes are marked as failure they all are returned
+ * as removal from blacklist so previous additions are reversed.
+ */
+public class SimpleBlacklistManager implements BlacklistManager {
+
+  private int numberOfNodeManagerHosts;
+  private final double blacklistDisableFailureThreshold;
+  private final Set<String> blacklistNodes = new HashSet<>();
+  private static final ArrayList<String> EMPTY_LIST = new ArrayList<>();
+
+  private static final Log LOG = LogFactory.getLog(SimpleBlacklistManager.class);
+
+  public SimpleBlacklistManager(int numberOfNodeManagerHosts,
+      double blacklistDisableFailureThreshold) {
+    this.numberOfNodeManagerHosts = numberOfNodeManagerHosts;
+    this.blacklistDisableFailureThreshold = blacklistDisableFailureThreshold;
+  }
+
+  @Override
+  public void addNode(String node) {
+    blacklistNodes.add(node);
+  }
+
+  @Override
+  public void refreshNodeHostCount(int nodeHostCount) {
+    this.numberOfNodeManagerHosts = nodeHostCount;
+  }
+
+  @Override
+  public BlacklistUpdates getBlacklistUpdates() {
+    BlacklistUpdates ret;
+    List<String> blacklist = new ArrayList<>(blacklistNodes);
+    final int currentBlacklistSize = blacklist.size();
+    final double failureThreshold = this.blacklistDisableFailureThreshold *
+        numberOfNodeManagerHosts;
+    if (currentBlacklistSize < failureThreshold) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("blacklist size " + currentBlacklistSize + " is less than " +
+            "failure threshold ratio " + blacklistDisableFailureThreshold +
+            " out of total usable nodes " + numberOfNodeManagerHosts);
+      }
+      ret = new BlacklistUpdates(blacklist, EMPTY_LIST);
+    } else {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("blacklist size " + currentBlacklistSize + " is more than " +
+            "failure threshold ratio " + blacklistDisableFailureThreshold +
+            " out of total usable nodes " + numberOfNodeManagerHosts);
+      }
+      ret = new BlacklistUpdates(EMPTY_LIST, blacklist);
+    }
+    return ret;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 2eb74f7..7cf39b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -74,6 +74,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.BlacklistManager;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.DisabledBlacklistManager;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.SimpleBlacklistManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
@@ -133,6 +136,8 @@ public class RMAppImpl implements RMApp, Recoverable {
   private final Set<String> applicationTags;
 
   private final long attemptFailuresValidityInterval;
+  private final boolean amBlacklistingEnabled;
+  private final float blacklistDisableThreshold;
 
   private Clock systemClock;
 
@@ -456,6 +461,18 @@ public class RMAppImpl implements RMApp, Recoverable {
     maxLogAggregationDiagnosticsInMemory = conf.getInt(
         YarnConfiguration.RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY,
         YarnConfiguration.DEFAULT_RM_MAX_LOG_AGGREGATION_DIAGNOSTICS_IN_MEMORY);
+
+    amBlacklistingEnabled = conf.getBoolean(
+        YarnConfiguration.AM_BLACKLISTING_ENABLED,
+        YarnConfiguration.DEFAULT_AM_BLACKLISTING_ENABLED);
+
+    if (amBlacklistingEnabled) {
+      blacklistDisableThreshold = conf.getFloat(
+          YarnConfiguration.AM_BLACKLISTING_DISABLE_THRESHOLD,
+          YarnConfiguration.DEFAULT_AM_BLACKLISTING_DISABLE_THRESHOLD);
+    } else {
+      blacklistDisableThreshold = 0.0f;
+    }
   }
 
   @Override
@@ -797,6 +814,18 @@ public class RMAppImpl implements RMApp, Recoverable {
   private void createNewAttempt() {
     ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(applicationId, attempts.size() + 1);
+
+    BlacklistManager currentAMBlacklist;
+    if (currentAttempt != null) {
+      currentAMBlacklist = currentAttempt.getAMBlacklist();
+    } else {
+      if (amBlacklistingEnabled) {
+        currentAMBlacklist = new SimpleBlacklistManager(
+            scheduler.getNumClusterNodes(), blacklistDisableThreshold);
+      } else {
+        currentAMBlacklist = new DisabledBlacklistManager();
+      }
+    }
     RMAppAttempt attempt =
         new RMAppAttemptImpl(appAttemptId, rmContext, scheduler, masterService,
           submissionContext, conf,
@@ -804,7 +833,8 @@ public class RMAppImpl implements RMApp, Recoverable {
           // previously failed attempts(which should not include Preempted,
           // hardware error and NM resync) + 1) equal to the max-attempt
           // limit.
-          maxAppAttempts == (getNumFailedAppAttempts() + 1), amReq);
+          maxAppAttempts == (getNumFailedAppAttempts() + 1), amReq,
+          currentAMBlacklist);
     attempts.put(appAttemptId, attempt);
     currentAttempt = attempt;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
index b85174e..4dd8345 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.BlacklistManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 
 /**
@@ -185,6 +186,12 @@ public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent> {
   ApplicationResourceUsageReport getApplicationResourceUsageReport();
 
   /**
+   * Get the {@link BlacklistManager} that manages blacklists for AM failures
+   * @return the {@link BlacklistManager} that tracks AM failures.
+   */
+  BlacklistManager getAMBlacklist();
+
+  /**
    * the start time of the application.
    * @return the start time of the application.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index 74a4000..629b2a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -36,7 +36,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 
 import javax.crypto.SecretKey;
 
-import org.apache.commons.lang.StringUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -71,6 +70,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.BlacklistManager;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.BlacklistUpdates;
+import org.apache.hadoop.yarn.server.resourcemanager.blacklist.DisabledBlacklistManager;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationAttemptStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
@@ -182,6 +184,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
   
   private RMAppAttemptMetrics attemptMetrics = null;
   private ResourceRequest amReq = null;
+  private BlacklistManager blacklistedNodesForAM = null;
 
   private static final StateMachineFactory<RMAppAttemptImpl,
                                            RMAppAttemptState,
@@ -434,6 +437,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
       ApplicationMasterService masterService,
       ApplicationSubmissionContext submissionContext,
       Configuration conf, boolean maybeLastAttempt, ResourceRequest amReq) {
+    this(appAttemptId, rmContext, scheduler, masterService, submissionContext,
+        conf, maybeLastAttempt, amReq, new DisabledBlacklistManager());
+  }
+
+  public RMAppAttemptImpl(ApplicationAttemptId appAttemptId,
+      RMContext rmContext, YarnScheduler scheduler,
+      ApplicationMasterService masterService,
+      ApplicationSubmissionContext submissionContext,
+      Configuration conf, boolean maybeLastAttempt, ResourceRequest amReq,
+      BlacklistManager amBlacklist) {
     this.conf = conf;
     this.applicationAttemptId = appAttemptId;
     this.rmContext = rmContext;
@@ -454,6 +467,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         new RMAppAttemptMetrics(applicationAttemptId, rmContext);
     
     this.amReq = amReq;
+    this.blacklistedNodesForAM = amBlacklist;
   }
 
   @Override
@@ -939,12 +953,25 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         appAttempt.amReq.setPriority(AM_CONTAINER_PRIORITY);
         appAttempt.amReq.setResourceName(ResourceRequest.ANY);
         appAttempt.amReq.setRelaxLocality(true);
-        
+
+        appAttempt.getAMBlacklist().refreshNodeHostCount(
+            appAttempt.scheduler.getNumClusterNodes());
+
+        BlacklistUpdates amBlacklist = appAttempt.getAMBlacklist()
+            .getBlacklistUpdates();
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Using blacklist for AM: additions(" +
+              amBlacklist.getAdditions() + ") and removals(" +
+              amBlacklist.getRemovals() + ")");
+        }
         // AM resource has been checked when submission
         Allocation amContainerAllocation =
-            appAttempt.scheduler.allocate(appAttempt.applicationAttemptId,
+            appAttempt.scheduler.allocate(
+                appAttempt.applicationAttemptId,
                 Collections.singletonList(appAttempt.amReq),
-                EMPTY_CONTAINER_RELEASE_LIST, null, null);
+                EMPTY_CONTAINER_RELEASE_LIST,
+                amBlacklist.getAdditions(),
+                amBlacklist.getRemovals());
         if (amContainerAllocation != null
             && amContainerAllocation.getContainers() != null) {
           assert (amContainerAllocation.getContainers().size() == 0);
@@ -1331,7 +1358,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
     }
   }
 
-  private static final class UnmanagedAMAttemptSavedTransition 
+  private boolean shouldCountTowardsNodeBlacklisting(int exitStatus) {
+    return exitStatus == ContainerExitStatus.DISKS_FAILED;
+  }
+
+  private static final class UnmanagedAMAttemptSavedTransition
                                                 extends AMLaunchedTransition {
     @Override
     public void transition(RMAppAttemptImpl appAttempt,
@@ -1694,6 +1725,14 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
   private void sendAMContainerToNM(RMAppAttemptImpl appAttempt,
       RMAppAttemptContainerFinishedEvent containerFinishedEvent) {
     NodeId nodeId = containerFinishedEvent.getNodeId();
+    if (containerFinishedEvent.getContainerStatus() != null) {
+      if (shouldCountTowardsNodeBlacklisting(containerFinishedEvent
+          .getContainerStatus().getExitStatus())) {
+        appAttempt.addAMNodeToBlackList(containerFinishedEvent.getNodeId());
+      }
+    } else {
+      LOG.warn("No ContainerStatus in containerFinishedEvent");
+    }
     finishedContainersSentToAM.putIfAbsent(nodeId,
       new ArrayList<ContainerStatus>());
     appAttempt.finishedContainersSentToAM.get(nodeId).add(
@@ -1708,6 +1747,15 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
     }
   }
 
+  private void addAMNodeToBlackList(NodeId nodeId) {
+    blacklistedNodesForAM.addNode(nodeId.getHost().toString());
+  }
+
+  @Override
+  public BlacklistManager getAMBlacklist() {
+    return blacklistedNodesForAM;
+  }
+
   private static void addJustFinishedContainer(RMAppAttemptImpl appAttempt,
       RMAppAttemptContainerFinishedEvent containerFinishedEvent) {
     appAttempt.justFinishedContainers.putIfAbsent(containerFinishedEvent

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
index 77ac5b3..e318d47 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java
@@ -65,7 +65,8 @@ public class AppSchedulingInfo {
       new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator());
   final Map<Priority, Map<String, ResourceRequest>> requests =
     new ConcurrentHashMap<Priority, Map<String, ResourceRequest>>();
-  private Set<String> blacklist = new HashSet<String>();
+  private Set<String> userBlacklist = new HashSet<>();
+  private Set<String> amBlacklist = new HashSet<>();
 
   //private final ApplicationStore store;
   private ActiveUsersManager activeUsersManager;
@@ -217,21 +218,39 @@ public class AppSchedulingInfo {
   }
 
   /**
-   * The ApplicationMaster is updating the blacklist
+   * The ApplicationMaster is updating the userBlacklist used for containers
+   * other than AMs.
    *
-   * @param blacklistAdditions resources to be added to the blacklist
-   * @param blacklistRemovals resources to be removed from the blacklist
+   * @param blacklistAdditions resources to be added to the userBlacklist
+   * @param blacklistRemovals resources to be removed from the userBlacklist
    */
-  synchronized public void updateBlacklist(
+   public void updateBlacklist(
       List<String> blacklistAdditions, List<String> blacklistRemovals) {
-    // Add to blacklist
-    if (blacklistAdditions != null) {
-      blacklist.addAll(blacklistAdditions);
-    }
+     updateUserOrAMBlacklist(userBlacklist, blacklistAdditions,
+         blacklistRemovals);
+  }
+
+  /**
+   * RM is updating blacklist for AM containers.
+   * @param blacklistAdditions resources to be added to the amBlacklist
+   * @param blacklistRemovals resources to be added to the amBlacklist
+   */
+  public void updateAMBlacklist(
+      List<String> blacklistAdditions, List<String> blacklistRemovals) {
+    updateUserOrAMBlacklist(amBlacklist, blacklistAdditions,
+        blacklistRemovals);
+  }
+
+  void updateUserOrAMBlacklist(Set<String> blacklist,
+      List<String> blacklistAdditions, List<String> blacklistRemovals) {
+    synchronized (blacklist) {
+      if (blacklistAdditions != null) {
+        blacklist.addAll(blacklistAdditions);
+      }
 
-    // Remove from blacklist
-    if (blacklistRemovals != null) {
-      blacklist.removeAll(blacklistRemovals);
+      if (blacklistRemovals != null) {
+        blacklist.removeAll(blacklistRemovals);
+      }
     }
   }
 
@@ -263,8 +282,23 @@ public class AppSchedulingInfo {
     return (request == null) ? null : request.getCapability();
   }
 
-  public synchronized boolean isBlacklisted(String resourceName) {
-    return blacklist.contains(resourceName);
+  /**
+   * Returns if the node is either blacklisted by the user or the system
+   * @param resourceName the resourcename
+   * @param useAMBlacklist true if it should check amBlacklist
+   * @return true if its blacklisted
+   */
+  public boolean isBlacklisted(String resourceName,
+      boolean useAMBlacklist) {
+    if (useAMBlacklist){
+      synchronized (amBlacklist) {
+        return amBlacklist.contains(resourceName);
+      }
+    } else {
+      synchronized (userBlacklist) {
+        return userBlacklist.contains(resourceName);
+      }
+    }
   }
   
   /**
@@ -473,19 +507,25 @@ public class AppSchedulingInfo {
     this.queue = queue;
   }
 
-  public synchronized Set<String> getBlackList() {
-    return this.blacklist;
+  public Set<String> getBlackList() {
+    return this.userBlacklist;
   }
 
-  public synchronized Set<String> getBlackListCopy() {
-    return new HashSet<>(this.blacklist);
+  public Set<String> getBlackListCopy() {
+    synchronized (userBlacklist) {
+      return new HashSet<>(this.userBlacklist);
+    }
   }
 
   public synchronized void transferStateFromPreviousAppSchedulingInfo(
       AppSchedulingInfo appInfo) {
     //    this.priorities = appInfo.getPriorities();
     //    this.requests = appInfo.getRequests();
-    this.blacklist = appInfo.getBlackList();
+    // This should not require locking the userBlacklist since it will not be
+    // used by this instance until after setCurrentAppAttempt.
+    // Should cleanup this to avoid sharing between instances and can
+    // then remove getBlacklist as well.
+    this.userBlacklist = appInfo.getBlackList();
   }
 
   public synchronized void recoverContainer(RMContainer rmContainer) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 4872543..b361d15 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -470,16 +470,9 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
       RMContainer rmContainer = i.next();
       Container container = rmContainer.getContainer();
       ContainerType containerType = ContainerType.TASK;
-      // The working knowledge is that masterContainer for AM is null as it
-      // itself is the master container.
-      RMAppAttempt appAttempt =
-          rmContext
-              .getRMApps()
-              .get(
-                  container.getId().getApplicationAttemptId()
-                      .getApplicationId()).getCurrentAppAttempt();
-      if (appAttempt.getMasterContainer() == null
-          && appAttempt.getSubmissionContext().getUnmanagedAM() == false) {
+      boolean isWaitingForAMContainer = isWaitingForAMContainer(
+          container.getId().getApplicationAttemptId().getApplicationId());
+      if (isWaitingForAMContainer) {
         containerType = ContainerType.APPLICATION_MASTER;
       }
       try {
@@ -509,6 +502,16 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
     return new ContainersAndNMTokensAllocation(returnContainerList, nmTokens);
   }
 
+  public boolean isWaitingForAMContainer(ApplicationId applicationId) {
+    // The working knowledge is that masterContainer for AM is null as it
+    // itself is the master container.
+    RMAppAttempt appAttempt =
+        rmContext.getRMApps().get(applicationId).getCurrentAppAttempt();
+    return (appAttempt != null && appAttempt.getMasterContainer() == null
+        && appAttempt.getSubmissionContext().getUnmanagedAM() == false);
+  }
+
+  // Blacklist used for user containers
   public synchronized void updateBlacklist(
       List<String> blacklistAdditions, List<String> blacklistRemovals) {
     if (!isStopped) {
@@ -516,9 +519,19 @@ public class SchedulerApplicationAttempt implements SchedulableEntity {
           blacklistAdditions, blacklistRemovals);
     }
   }
-  
+
+  // Blacklist used for AM containers
+  public synchronized void updateAMBlacklist(
+      List<String> blacklistAdditions, List<String> blacklistRemovals) {
+    if (!isStopped) {
+      this.appSchedulingInfo.updateAMBlacklist(
+          blacklistAdditions, blacklistRemovals);
+    }
+  }
+
   public boolean isBlacklisted(String resourceName) {
-    return this.appSchedulingInfo.isBlacklisted(resourceName);
+    boolean useAMBlacklist = isWaitingForAMContainer(getApplicationId());
+    return this.appSchedulingInfo.isBlacklisted(resourceName, useAMBlacklist);
   }
 
   public synchronized int addMissedNonPartitionedRequestSchedulingOpportunity(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index a7e9d8c..dbaccaf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -52,7 +52,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -934,7 +933,13 @@ public class CapacityScheduler extends
         }
       }
 
-      application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      if (application.isWaitingForAMContainer(application.getApplicationId())) {
+        // Allocate is for AM and update AM blacklist for this
+        application.updateAMBlacklist(
+            blacklistAdditions, blacklistRemovals);
+      } else {
+        application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      }
 
       allocation = application.getAllocation(getResourceCalculator(),
                    clusterResource, getMinimumResourceCapability());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java
deleted file mode 100644
index 9bece9b..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica;
-
-import org.apache.commons.logging.Log;
-
-public class FiCaSchedulerUtils {
-
-  public static  boolean isBlacklisted(FiCaSchedulerApp application,
-      FiCaSchedulerNode node, Log LOG) {
-    if (application.isBlacklisted(node.getNodeName())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Skipping 'host' " + node.getNodeName() + 
-            " for " + application.getApplicationId() + 
-            " since it has been blacklisted");
-      }
-      return true;
-    }
-
-    if (application.isBlacklisted(node.getRackName())) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Skipping 'rack' " + node.getRackName() + 
-            " for " + application.getApplicationId() + 
-            " since it has been blacklisted");
-      }
-      return true;
-    }
-
-    return false;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 3eefb8f..5243fb3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -955,7 +955,14 @@ public class FairScheduler extends
         preemptionContainerIds.add(container.getContainerId());
       }
 
-      application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      if (application.isWaitingForAMContainer(application.getApplicationId())) {
+        // Allocate is for AM and update AM blacklist for this
+        application.updateAMBlacklist(
+            blacklistAdditions, blacklistRemovals);
+      } else {
+        application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      }
+
       ContainersAndNMTokensAllocation allocation =
           application.pullNewlyAllocatedContainersAndNMTokens();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
index 6b77ceb..99760df 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java
@@ -352,11 +352,18 @@ public class FifoScheduler extends
         application.showRequests();
 
         LOG.debug("allocate:" +
-            " applicationId=" + applicationAttemptId + 
+            " applicationId=" + applicationAttemptId +
             " #ask=" + ask.size());
       }
 
-      application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      if (application.isWaitingForAMContainer(application.getApplicationId())) {
+        // Allocate is for AM and update AM blacklist for this
+        application.updateAMBlacklist(
+            blacklistAdditions, blacklistRemovals);
+      } else {
+        application.updateBlacklist(blacklistAdditions, blacklistRemovals);
+      }
+
       ContainersAndNMTokensAllocation allocation =
           application.pullNewlyAllocatedContainersAndNMTokens();
       Resource headroom = application.getHeadroom();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
index 5080355..e464401 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
@@ -750,10 +750,7 @@ public class MockRM extends ResourceManager {
 
   public static MockAM launchAM(RMApp app, MockRM rm, MockNM nm)
       throws Exception {
-    rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
-    RMAppAttempt attempt = app.getCurrentAppAttempt();
-    waitForSchedulerAppAttemptAdded(attempt.getAppAttemptId(), rm);
-    rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
+    RMAppAttempt attempt = waitForAttemptScheduled(app, rm);
     System.out.println("Launch AM " + attempt.getAppAttemptId());
     nm.nodeHeartbeat(true);
     MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
@@ -761,6 +758,15 @@ public class MockRM extends ResourceManager {
     return am;
   }
 
+  public static RMAppAttempt waitForAttemptScheduled(RMApp app, MockRM rm)
+      throws Exception {
+    rm.waitForState(app.getApplicationId(), RMAppState.ACCEPTED);
+    RMAppAttempt attempt = app.getCurrentAppAttempt();
+    waitForSchedulerAppAttemptAdded(attempt.getAppAttemptId(), rm);
+    rm.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.SCHEDULED);
+    return attempt;
+  }
+
   public static MockAM launchAndRegisterAM(RMApp app, MockRM rm, MockNM nm)
       throws Exception {
     MockAM am = launchAM(app, rm, nm);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
index d579595..dc843b9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/TestAMRestart.java
@@ -35,8 +35,12 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.NMToken;
+import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.event.DrainDispatcher;
+import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -49,11 +53,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
+import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.util.ControlledClock;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.SystemClock;
@@ -82,21 +89,7 @@ public class TestAMRestart {
 
     MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
     int NUM_CONTAINERS = 3;
-    // allocate NUM_CONTAINERS containers
-    am1.allocate("127.0.0.1", 1024, NUM_CONTAINERS,
-      new ArrayList<ContainerId>());
-    nm1.nodeHeartbeat(true);
-
-    // wait for containers to be allocated.
-    List<Container> containers =
-        am1.allocate(new ArrayList<ResourceRequest>(),
-          new ArrayList<ContainerId>()).getAllocatedContainers();
-    while (containers.size() != NUM_CONTAINERS) {
-      nm1.nodeHeartbeat(true);
-      containers.addAll(am1.allocate(new ArrayList<ResourceRequest>(),
-        new ArrayList<ContainerId>()).getAllocatedContainers());
-      Thread.sleep(200);
-    }
+    allocateContainers(nm1, am1, NUM_CONTAINERS);
 
     // launch the 2nd container, for testing running container transferred.
     nm1.nodeHeartbeat(am1.getApplicationAttemptId(), 2, ContainerState.RUNNING);
@@ -244,6 +237,29 @@ public class TestAMRestart {
     rm1.stop();
   }
 
+  private List<Container> allocateContainers(MockNM nm1, MockAM am1,
+      int NUM_CONTAINERS) throws Exception {
+    // allocate NUM_CONTAINERS containers
+    am1.allocate("127.0.0.1", 1024, NUM_CONTAINERS,
+      new ArrayList<ContainerId>());
+    nm1.nodeHeartbeat(true);
+
+    // wait for containers to be allocated.
+    List<Container> containers =
+        am1.allocate(new ArrayList<ResourceRequest>(),
+          new ArrayList<ContainerId>()).getAllocatedContainers();
+    while (containers.size() != NUM_CONTAINERS) {
+      nm1.nodeHeartbeat(true);
+      containers.addAll(am1.allocate(new ArrayList<ResourceRequest>(),
+        new ArrayList<ContainerId>()).getAllocatedContainers());
+      Thread.sleep(200);
+    }
+
+    Assert.assertEquals("Did not get all containers allocated",
+        NUM_CONTAINERS, containers.size());
+    return containers;
+  }
+
   private void waitForContainersToFinish(int expectedNum, RMAppAttempt attempt)
       throws InterruptedException {
     int count = 0;
@@ -258,6 +274,9 @@ public class TestAMRestart {
   public void testNMTokensRebindOnAMRestart() throws Exception {
     YarnConfiguration conf = new YarnConfiguration();
     conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 3);
+    // To prevent test from blacklisting nm1 for AM, we sit threshold to half
+    // of 2 nodes which is 1
+    conf.setFloat(YarnConfiguration.AM_BLACKLISTING_DISABLE_THRESHOLD, 0.5f);
 
     MockRM rm1 = new MockRM(conf);
     rm1.start();
@@ -355,6 +374,106 @@ public class TestAMRestart {
     rm1.stop();
   }
 
+  @Test(timeout = 100000)
+  public void testAMBlacklistPreventsRestartOnSameNode() throws Exception {
+    YarnConfiguration conf = new YarnConfiguration();
+    conf.setBoolean(YarnConfiguration.AM_BLACKLISTING_ENABLED, true);
+    MemoryRMStateStore memStore = new MemoryRMStateStore();
+    memStore.init(conf);
+    final DrainDispatcher dispatcher = new DrainDispatcher();
+    MockRM rm1 = new MockRM(conf, memStore) {
+      @Override
+      protected EventHandler<SchedulerEvent> createSchedulerEventDispatcher() {
+        return new SchedulerEventDispatcher(this.scheduler) {
+          @Override
+          public void handle(SchedulerEvent event) {
+            scheduler.handle(event);
+          }
+        };
+      }
+
+      @Override
+      protected Dispatcher createDispatcher() {
+        return dispatcher;
+      }
+    };
+
+    rm1.start();
+
+    MockNM nm1 =
+        new MockNM("127.0.0.1:1234", 8000, rm1.getResourceTrackerService());
+    nm1.registerNode();
+
+    MockNM nm2 =
+        new MockNM("127.0.0.2:2345", 8000, rm1.getResourceTrackerService());
+    nm2.registerNode();
+
+    RMApp app1 = rm1.submitApp(200);
+
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
+    CapacityScheduler scheduler =
+        (CapacityScheduler) rm1.getResourceScheduler();
+    ContainerId amContainer =
+        ContainerId.newContainerId(am1.getApplicationAttemptId(), 1);
+    // Preempt the first attempt;
+    RMContainer rmContainer = scheduler.getRMContainer(amContainer);
+    NodeId nodeWhereAMRan = rmContainer.getAllocatedNode();
+
+    MockNM currentNode, otherNode;
+    if (nodeWhereAMRan == nm1.getNodeId()) {
+      currentNode = nm1;
+      otherNode = nm2;
+    } else {
+      currentNode = nm2;
+      otherNode = nm1;
+    }
+
+    ContainerStatus containerStatus =
+        BuilderUtils.newContainerStatus(amContainer, ContainerState.COMPLETE,
+            "", ContainerExitStatus.DISKS_FAILED);
+    currentNode.containerStatus(containerStatus);
+    am1.waitForState(RMAppAttemptState.FAILED);
+    rm1.waitForState(app1.getApplicationId(), RMAppState.ACCEPTED);
+
+    // restart the am
+    RMAppAttempt attempt = rm1.waitForAttemptScheduled(app1, rm1);
+    System.out.println("Launch AM " + attempt.getAppAttemptId());
+
+
+
+    currentNode.nodeHeartbeat(true);
+    dispatcher.await();
+    Assert.assertEquals(
+        "AppAttemptState should still be SCHEDULED if currentNode is " +
+            "blacklisted correctly",
+        RMAppAttemptState.SCHEDULED,
+        attempt.getAppAttemptState());
+
+    otherNode.nodeHeartbeat(true);
+    dispatcher.await();
+
+    MockAM am2 = rm1.sendAMLaunched(attempt.getAppAttemptId());
+    rm1.waitForState(attempt.getAppAttemptId(), RMAppAttemptState.LAUNCHED);
+
+    amContainer =
+        ContainerId.newContainerId(am2.getApplicationAttemptId(), 1);
+    rmContainer = scheduler.getRMContainer(amContainer);
+    nodeWhereAMRan = rmContainer.getAllocatedNode();
+    Assert.assertEquals(
+        "After blacklisting AM should have run on the other node",
+        otherNode.getNodeId(), nodeWhereAMRan);
+
+    am2.registerAppAttempt();
+    rm1.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+
+    List<Container> allocatedContainers =
+        allocateContainers(currentNode, am2, 1);
+    Assert.assertEquals(
+        "Even though AM is blacklisted from the node, application can still " +
+        "allocate containers there",
+        currentNode.getNodeId(), allocatedContainers.get(0).getNodeId());
+  }
+
   // AM container preempted, nm disk failure
   // should not be counted towards AM max retry count.
   @Test(timeout = 100000)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/TestBlacklistManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/TestBlacklistManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/TestBlacklistManager.java
new file mode 100644
index 0000000..96b373f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/blacklist/TestBlacklistManager.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.blacklist;
+
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.Collections;
+import java.util.List;
+
+public class TestBlacklistManager {
+
+  @Test
+  public void testSimpleBlacklistBelowFailureThreshold() {
+    final int numberOfNodeManagerHosts = 3;
+    final double blacklistDisableFailureThreshold = 0.8;
+    BlacklistManager manager = new SimpleBlacklistManager(
+        numberOfNodeManagerHosts, blacklistDisableFailureThreshold);
+    String anyNode = "foo";
+    String anyNode2 = "bar";
+    manager.addNode(anyNode);
+    manager.addNode(anyNode2);
+    BlacklistUpdates blacklist = manager
+        .getBlacklistUpdates();
+
+    List<String> blacklistAdditions = blacklist.getAdditions();
+    Collections.sort(blacklistAdditions);
+    List<String> blacklistRemovals = blacklist.getRemovals();
+    String[] expectedBlacklistAdditions = new String[]{anyNode2, anyNode};
+    Assert.assertArrayEquals(
+        "Blacklist additions was not as expected",
+        expectedBlacklistAdditions,
+        blacklistAdditions.toArray());
+    Assert.assertTrue(
+        "Blacklist removals should be empty but was " +
+            blacklistRemovals,
+        blacklistRemovals.isEmpty());
+  }
+
+  @Test
+  public void testSimpleBlacklistAboveFailureThreshold() {
+    // Create a threshold of 0.5 * 3 i.e at 1.5 node failures.
+    BlacklistManager manager = new SimpleBlacklistManager(3, 0.5);
+    String anyNode = "foo";
+    String anyNode2 = "bar";
+    manager.addNode(anyNode);
+    BlacklistUpdates blacklist = manager
+        .getBlacklistUpdates();
+
+    List<String> blacklistAdditions = blacklist.getAdditions();
+    Collections.sort(blacklistAdditions);
+    List<String> blacklistRemovals = blacklist.getRemovals();
+    String[] expectedBlacklistAdditions = new String[]{anyNode};
+    Assert.assertArrayEquals(
+        "Blacklist additions was not as expected",
+        expectedBlacklistAdditions,
+        blacklistAdditions.toArray());
+    Assert.assertTrue(
+        "Blacklist removals should be empty but was " +
+            blacklistRemovals,
+        blacklistRemovals.isEmpty());
+
+    manager.addNode(anyNode2);
+
+    blacklist = manager
+        .getBlacklistUpdates();
+    blacklistAdditions = blacklist.getAdditions();
+    Collections.sort(blacklistAdditions);
+    blacklistRemovals = blacklist.getRemovals();
+    Collections.sort(blacklistRemovals);
+    String[] expectedBlacklistRemovals = new String[] {anyNode2, anyNode};
+    Assert.assertTrue(
+        "Blacklist additions should be empty but was " +
+            blacklistAdditions,
+        blacklistAdditions.isEmpty());
+    Assert.assertArrayEquals(
+        "Blacklist removals was not as expected",
+        expectedBlacklistRemovals,
+        blacklistRemovals.toArray());
+  }
+
+  @Test
+  public void testDisabledBlacklist() {
+    BlacklistManager disabled = new DisabledBlacklistManager();
+    String anyNode = "foo";
+    disabled.addNode(anyNode);
+    BlacklistUpdates blacklist = disabled
+        .getBlacklistUpdates();
+
+    List<String> blacklistAdditions = blacklist.getAdditions();
+    List<String> blacklistRemovals = blacklist.getRemovals();
+    Assert.assertTrue(
+        "Blacklist additions should be empty but was " +
+            blacklistAdditions,
+        blacklistAdditions.isEmpty());
+    Assert.assertTrue(
+        "Blacklist removals should be empty but was " +
+            blacklistRemovals,
+        blacklistRemovals.isEmpty());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java
index fccfa19..484a1b6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java
@@ -489,7 +489,7 @@ public class TestRMAppLogAggregationStatus {
           2, Resource.newInstance(10, 2), "test");
     return new RMAppImpl(this.appId, this.rmContext,
       conf, "test", "test", "default", submissionContext,
-      this.rmContext.getScheduler(),
+      scheduler,
       this.rmContext.getApplicationMasterService(),
       System.currentTimeMillis(), "test",
       null, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index 2e64d61..a5e3308 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -970,7 +970,7 @@ public class TestRMAppTransitions {
             appState.getApplicationSubmissionContext().getApplicationId(),
             rmContext, conf,
             submissionContext.getApplicationName(), null,
-            submissionContext.getQueue(), submissionContext, null, null,
+            submissionContext.getQueue(), submissionContext, scheduler, null,
             appState.getSubmitTime(), submissionContext.getApplicationType(),
             submissionContext.getApplicationTags(),
             BuilderUtils.newResourceRequest(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
index 44773be..76a1351 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
@@ -655,6 +656,11 @@ public class TestCapacityScheduler {
     RMAppImpl app = mock(RMAppImpl.class);
     when(app.getApplicationId()).thenReturn(appId);
     RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
+    Container container = mock(Container.class);
+    when(attempt.getMasterContainer()).thenReturn(container);
+    ApplicationSubmissionContext submissionContext = mock(
+        ApplicationSubmissionContext.class);
+    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
     when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
     when(app.getCurrentAppAttempt()).thenReturn(attempt);
@@ -715,6 +721,11 @@ public class TestCapacityScheduler {
     RMAppImpl app1 = mock(RMAppImpl.class);
     when(app1.getApplicationId()).thenReturn(appId1);
     RMAppAttemptImpl attempt1 = mock(RMAppAttemptImpl.class);
+    Container container = mock(Container.class);
+    when(attempt1.getMasterContainer()).thenReturn(container);
+    ApplicationSubmissionContext submissionContext = mock(
+        ApplicationSubmissionContext.class);
+    when(attempt1.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt1.getAppAttemptId()).thenReturn(appAttemptId1);
     when(attempt1.getRMAppAttemptMetrics()).thenReturn(attemptMetric1);
     when(app1.getCurrentAppAttempt()).thenReturn(attempt1);
@@ -739,6 +750,8 @@ public class TestCapacityScheduler {
     RMAppImpl app2 = mock(RMAppImpl.class);
     when(app2.getApplicationId()).thenReturn(appId2);
     RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
+    when(attempt2.getMasterContainer()).thenReturn(container);
+    when(attempt2.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
     when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
     when(app2.getCurrentAppAttempt()).thenReturn(attempt2);
@@ -2876,6 +2889,11 @@ public class TestCapacityScheduler {
     RMAppImpl app = mock(RMAppImpl.class);
     when(app.getApplicationId()).thenReturn(appId);
     RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
+    Container container = mock(Container.class);
+    when(attempt.getMasterContainer()).thenReturn(container);
+    ApplicationSubmissionContext submissionContext = mock(
+        ApplicationSubmissionContext.class);
+    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
     when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
     when(app.getCurrentAppAttempt()).thenReturn(attempt);
@@ -2953,6 +2971,11 @@ public class TestCapacityScheduler {
     RMAppImpl app = mock(RMAppImpl.class);
     when(app.getApplicationId()).thenReturn(appId);
     RMAppAttemptImpl attempt = mock(RMAppAttemptImpl.class);
+    Container container = mock(Container.class);
+    when(attempt.getMasterContainer()).thenReturn(container);
+    ApplicationSubmissionContext submissionContext = mock(
+        ApplicationSubmissionContext.class);
+    when(attempt.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt.getAppAttemptId()).thenReturn(appAttemptId);
     when(attempt.getRMAppAttemptMetrics()).thenReturn(attemptMetric);
     when(app.getCurrentAppAttempt()).thenReturn(attempt);
@@ -2976,6 +2999,8 @@ public class TestCapacityScheduler {
     RMAppImpl app2 = mock(RMAppImpl.class);
     when(app2.getApplicationId()).thenReturn(appId2);
     RMAppAttemptImpl attempt2 = mock(RMAppAttemptImpl.class);
+    when(attempt2.getMasterContainer()).thenReturn(container);
+    when(attempt2.getSubmissionContext()).thenReturn(submissionContext);
     when(attempt2.getAppAttemptId()).thenReturn(appAttemptId2);
     when(attempt2.getRMAppAttemptMetrics()).thenReturn(attemptMetric2);
     when(app2.getCurrentAppAttempt()).thenReturn(attempt2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81df7b58/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
index 403c8ea..1c9801d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java
@@ -220,7 +220,7 @@ public class FairSchedulerTestBase {
     ApplicationId appId = attId.getApplicationId();
     RMApp rmApp = new RMAppImpl(appId, rmContext, conf,
         null, user, null, ApplicationSubmissionContext.newInstance(appId, null,
-        queue, null, null, false, false, 0, amResource, null), null, null,
+        queue, null, null, false, false, 0, amResource, null), scheduler, null,
         0, null, null, null);
     rmContext.getRMApps().put(appId, rmApp);
     RMAppEvent event = new RMAppEvent(appId, RMAppEventType.START);


[25/50] [abbrv] hadoop git commit: Fix up CHANGES.txt

Posted by ec...@apache.org.
Fix up CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fba06a78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fba06a78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fba06a78

Branch: refs/heads/HADOOP-11890
Commit: fba06a789ce093f4f2a9b1459a630e5e91b6b456
Parents: ea4bb27
Author: Kihwal Lee <ki...@apache.org>
Authored: Fri Sep 11 16:02:05 2015 -0500
Committer: Kihwal Lee <ki...@apache.org>
Committed: Fri Sep 11 16:02:54 2015 -0500

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fba06a78/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 842627f..6051807 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -454,9 +454,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
     classes at runtime. (Sean Busbey via atm)
 
-    HDFS-8099. Change "DFSInputStream has been closed already" message to
-    debug log level (Charles Lamb via Colin P. McCabe)
-
     HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
     (wheat9)
 
@@ -1331,6 +1328,9 @@ Release 2.7.2 - UNRELEASED
     HADOOP-5323. Trash documentation should describe its directory structure and
     configurations. (Weiwei Yang via ozawa)
 
+    HDFS-8099. Change "DFSInputStream has been closed already" message to
+    debug log level (Charles Lamb via Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)


[12/50] [abbrv] hadoop git commit: YARN-4086. Allow Aggregated Log readers to handle HAR files (rkanter)

Posted by ec...@apache.org.
YARN-4086. Allow Aggregated Log readers to handle HAR files (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6dd6ca44
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6dd6ca44
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6dd6ca44

Branch: refs/heads/HADOOP-11890
Commit: 6dd6ca442aba8612c3780399a42bb473e4483021
Parents: 119cc75
Author: Robert Kanter <rk...@apache.org>
Authored: Wed Sep 9 18:03:04 2015 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Wed Sep 9 18:03:04 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   2 +
 .../hadoop-yarn/hadoop-yarn-client/pom.xml      |  12 ++++
 .../hadoop/yarn/client/cli/TestLogsCLI.java     |  50 +++++++++++++++
 .../application_1440536969523_0001.har/_SUCCESS |   0
 .../application_1440536969523_0001.har/_index   |   3 +
 .../_masterindex                                |   2 +
 .../application_1440536969523_0001.har/part-0   | Bin 0 -> 795 bytes
 .../hadoop-yarn/hadoop-yarn-common/pom.xml      |   4 ++
 .../yarn/logaggregation/LogCLIHelpers.java      |  16 ++++-
 .../yarn/webapp/log/AggregatedLogsBlock.java    |   7 +++
 .../logaggregation/TestAggregatedLogsBlock.java |  63 ++++++++++++++++++-
 .../application_1440536969523_0001.har/_SUCCESS |   0
 .../application_1440536969523_0001.har/_index   |   3 +
 .../_masterindex                                |   2 +
 .../application_1440536969523_0001.har/part-0   | Bin 0 -> 795 bytes
 15 files changed, 160 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7308075..aef0d31 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -428,6 +428,8 @@ Release 2.8.0 - UNRELEASED
     YARN-4121. Fix typos in capacity scheduler documentation.
     (Kai Sasaki via vvasudev)
 
+    YARN-4086. Allow Aggregated Log readers to handle HAR files (rkanter)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
index 77a6583..90f2bc7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
@@ -135,6 +135,18 @@
   <build>
     <plugins>
       <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <configuration>
+          <excludes>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_index</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/part-0</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_masterindex</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_SUCCESS</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
         <groupId>org.apache.hadoop</groupId>
         <artifactId>hadoop-maven-plugins</artifactId>
         <executions>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
index 7d20cf2..a353811 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestLogsCLI.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.client.cli;
 
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doReturn;
@@ -32,6 +33,7 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.Writer;
+import java.net.URL;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -318,6 +320,54 @@ public class TestLogsCLI {
     fs.delete(new Path(rootLogDir), true);
   }
 
+  @Test (timeout = 15000)
+  public void testFetchApplictionLogsHar() throws Exception {
+    String remoteLogRootDir = "target/logs/";
+    Configuration configuration = new Configuration();
+    configuration.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
+    configuration
+        .set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogRootDir);
+    configuration.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
+    configuration.set(YarnConfiguration.YARN_ADMIN_ACL, "admin");
+    FileSystem fs = FileSystem.get(configuration);
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    URL harUrl = ClassLoader.getSystemClassLoader()
+        .getResource("application_1440536969523_0001.har");
+    assertNotNull(harUrl);
+    Path path =
+        new Path(remoteLogRootDir + ugi.getShortUserName()
+            + "/logs/application_1440536969523_0001");
+    if (fs.exists(path)) {
+      fs.delete(path, true);
+    }
+    assertTrue(fs.mkdirs(path));
+    Path harPath = new Path(path, "application_1440536969523_0001.har");
+    fs.copyFromLocalFile(false, new Path(harUrl.toURI()), harPath);
+    assertTrue(fs.exists(harPath));
+
+    YarnClient mockYarnClient =
+        createMockYarnClient(YarnApplicationState.FINISHED);
+    LogsCLI cli = new LogsCLIForTest(mockYarnClient);
+    cli.setConf(configuration);
+    int exitCode = cli.run(new String[]{"-applicationId",
+        "application_1440536969523_0001"});
+    assertTrue(exitCode == 0);
+    String out = sysOutStream.toString();
+    assertTrue(
+        out.contains("container_1440536969523_0001_01_000001 on host1_1111"));
+    assertTrue(out.contains("Hello stderr"));
+    assertTrue(out.contains("Hello stdout"));
+    assertTrue(out.contains("Hello syslog"));
+    assertTrue(
+        out.contains("container_1440536969523_0001_01_000002 on host2_2222"));
+    assertTrue(out.contains("Goodbye stderr"));
+    assertTrue(out.contains("Goodbye stdout"));
+    assertTrue(out.contains("Goodbye syslog"));
+    sysOutStream.reset();
+
+    fs.delete(new Path(remoteLogRootDir), true);
+  }
+
   private static void createContainerLogInLocalDir(Path appLogsDir,
       ContainerId containerId, FileSystem fs, List<String> logTypes) throws Exception {
     Path containerLogsDir = new Path(appLogsDir, containerId.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_SUCCESS
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_SUCCESS b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_SUCCESS
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_index
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_index b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_index
new file mode 100644
index 0000000..92ee728
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_index
@@ -0,0 +1,3 @@
+%2F dir 1440540845855+504+rkanter+supergroup 0 0 host1_1111 host2_2222 
+%2Fhost1_1111 file part-0 0 394 1440540845834+420+rkanter+supergroup 
+%2Fhost2_2222 file part-0 394 400 1440540845854+420+rkanter+supergroup 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_masterindex
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_masterindex b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_masterindex
new file mode 100644
index 0000000..086d2b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/_masterindex
@@ -0,0 +1,2 @@
+3 
+0 1520266628 0 214 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/part-0
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/part-0 b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/part-0
new file mode 100644
index 0000000..fef262a
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/resources/application_1440536969523_0001.har/part-0 differ

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
index 3b47cdd..d4913b2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
@@ -215,6 +215,10 @@
             <exclude>src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css</exclude>
             <exclude>src/main/resources/webapps/static/dt-1.9.4/images/Sorting icons.psd</exclude>
             <exclude>src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_index</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/part-0</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_masterindex</exclude>
+            <exclude>src/test/resources/application_1440536969523_0001.har/_SUCCESS</exclude>
           </excludes>
         </configuration>
       </plugin>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
index 39fd95e..fb4d3cd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogCLIHelpers.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.HarFs;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -61,8 +62,9 @@ public class LogCLIHelpers implements Configurable {
         YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
         YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
     String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(getConf());
+    ApplicationId applicationId = ConverterUtils.toApplicationId(appId);
     Path remoteAppLogDir = LogAggregationUtils.getRemoteAppLogDir(
-        remoteRootLogDir, ConverterUtils.toApplicationId(appId), jobOwner,
+        remoteRootLogDir, applicationId, jobOwner,
         suffix);
     RemoteIterator<FileStatus> nodeFiles;
     try {
@@ -80,6 +82,12 @@ public class LogCLIHelpers implements Configurable {
     while (nodeFiles.hasNext()) {
       FileStatus thisNodeFile = nodeFiles.next();
       String fileName = thisNodeFile.getPath().getName();
+      if (fileName.equals(applicationId + ".har")) {
+        Path p = new Path("har:///"
+            + thisNodeFile.getPath().toUri().getRawPath());
+        nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
+        continue;
+      }
       if (fileName.contains(LogAggregationUtils.getNodeString(nodeId))
           && !fileName.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
         AggregatedLogFormat.LogReader reader = null;
@@ -207,6 +215,12 @@ public class LogCLIHelpers implements Configurable {
     boolean foundAnyLogs = false;
     while (nodeFiles.hasNext()) {
       FileStatus thisNodeFile = nodeFiles.next();
+      if (thisNodeFile.getPath().getName().equals(appId + ".har")) {
+        Path p = new Path("har:///"
+            + thisNodeFile.getPath().toUri().getRawPath());
+        nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
+        continue;
+      }
       if (!thisNodeFile.getPath().getName()
         .endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
         AggregatedLogFormat.LogReader reader =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
index 620d097..69fc347 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/log/AggregatedLogsBlock.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.HarFs;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -120,6 +121,12 @@ public class AggregatedLogsBlock extends HtmlBlock {
         AggregatedLogFormat.LogReader reader = null;
         try {
           FileStatus thisNodeFile = nodeFiles.next();
+          if (thisNodeFile.getPath().getName().equals(applicationId + ".har")) {
+            Path p = new Path("har:///"
+                + thisNodeFile.getPath().toUri().getRawPath());
+            nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
+            continue;
+          }
           if (!thisNodeFile.getPath().getName()
             .contains(LogAggregationUtils.getNodeString(nodeId))
               || thisNodeFile.getPath().getName()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
index 2a5762c..798406d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogsBlock.java
@@ -23,6 +23,7 @@ import java.io.File;
 import java.io.FileWriter;
 import java.io.PrintWriter;
 import java.io.Writer;
+import java.net.URL;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
@@ -30,6 +31,7 @@ import java.util.Map;
 
 import javax.servlet.http.HttpServletRequest;
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
@@ -117,7 +119,8 @@ public class TestAggregatedLogsBlock {
   }
 
   /**
-   * All ok and the AggregatedLogsBlockFor should aggregate logs and show it.
+   * Reading from logs should succeed and they should be shown in the
+   * AggregatedLogsBlock html.
    * 
    * @throws Exception
    */
@@ -144,8 +147,56 @@ public class TestAggregatedLogsBlock {
     assertTrue(out.contains("test log1"));
     assertTrue(out.contains("test log2"));
     assertTrue(out.contains("test log3"));
+  }
+
+  /**
+   * Reading from logs should succeed (from a HAR archive) and they should be
+   * shown in the AggregatedLogsBlock html.
+   *
+   * @throws Exception
+   */
+  @Test
+  public void testAggregatedLogsBlockHar() throws Exception {
+    FileUtil.fullyDelete(new File("target/logs"));
+    Configuration configuration = getConfiguration();
+
+    URL harUrl = ClassLoader.getSystemClassLoader()
+        .getResource("application_1440536969523_0001.har");
+    assertNotNull(harUrl);
+    String path = "target/logs/admin/logs/application_1440536969523_0001" +
+        "/application_1440536969523_0001.har";
+    FileUtils.copyDirectory(new File(harUrl.getPath()), new File(path));
+
+    AggregatedLogsBlockForTest aggregatedBlock = getAggregatedLogsBlockForTest(
+        configuration, "admin",
+        "container_1440536969523_0001_01_000001", "host1:1111");
+    ByteArrayOutputStream data = new ByteArrayOutputStream();
+    PrintWriter printWriter = new PrintWriter(data);
+    HtmlBlock html = new HtmlBlockForTest();
+    HtmlBlock.Block block = new BlockForTest(html, printWriter, 10, false);
+    aggregatedBlock.render(block);
 
+    block.getWriter().flush();
+    String out = data.toString();
+    assertTrue(out.contains("Hello stderr"));
+    assertTrue(out.contains("Hello stdout"));
+    assertTrue(out.contains("Hello syslog"));
+
+    aggregatedBlock = getAggregatedLogsBlockForTest(
+        configuration, "admin",
+        "container_1440536969523_0001_01_000002", "host2:2222");
+    data = new ByteArrayOutputStream();
+    printWriter = new PrintWriter(data);
+    html = new HtmlBlockForTest();
+    block = new BlockForTest(html, printWriter, 10, false);
+    aggregatedBlock.render(block);
+    block.getWriter().flush();
+    out = data.toString();
+    assertTrue(out.contains("Goodbye stderr"));
+    assertTrue(out.contains("Goodbye stdout"));
+    assertTrue(out.contains("Goodbye syslog"));
   }
+
   /**
    * Log files was deleted.
    * @throws Exception
@@ -188,14 +239,20 @@ public class TestAggregatedLogsBlock {
 
   private AggregatedLogsBlockForTest getAggregatedLogsBlockForTest(
       Configuration configuration, String user, String containerId) {
+    return getAggregatedLogsBlockForTest(configuration, user, containerId,
+        "localhost:1234");
+  }
+
+  private AggregatedLogsBlockForTest getAggregatedLogsBlockForTest(
+      Configuration configuration, String user, String containerId,
+      String nodeName) {
     HttpServletRequest request = mock(HttpServletRequest.class);
     when(request.getRemoteUser()).thenReturn(user);
     AggregatedLogsBlockForTest aggregatedBlock = new AggregatedLogsBlockForTest(
         configuration);
     aggregatedBlock.setRequest(request);
     aggregatedBlock.moreParams().put(YarnWebParams.CONTAINER_ID, containerId);
-    aggregatedBlock.moreParams().put(YarnWebParams.NM_NODENAME,
-        "localhost:1234");
+    aggregatedBlock.moreParams().put(YarnWebParams.NM_NODENAME, nodeName);
     aggregatedBlock.moreParams().put(YarnWebParams.APP_OWNER, user);
     aggregatedBlock.moreParams().put("start", "");
     aggregatedBlock.moreParams().put("end", "");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_SUCCESS
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_SUCCESS b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_SUCCESS
new file mode 100644
index 0000000..e69de29

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_index
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_index b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_index
new file mode 100644
index 0000000..92ee728
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_index
@@ -0,0 +1,3 @@
+%2F dir 1440540845855+504+rkanter+supergroup 0 0 host1_1111 host2_2222 
+%2Fhost1_1111 file part-0 0 394 1440540845834+420+rkanter+supergroup 
+%2Fhost2_2222 file part-0 394 400 1440540845854+420+rkanter+supergroup 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_masterindex
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_masterindex b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_masterindex
new file mode 100644
index 0000000..086d2b8
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/_masterindex
@@ -0,0 +1,2 @@
+3 
+0 1520266628 0 214 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6dd6ca44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/part-0
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/part-0 b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/part-0
new file mode 100644
index 0000000..fef262a
Binary files /dev/null and b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/resources/application_1440536969523_0001.har/part-0 differ


[03/50] [abbrv] hadoop git commit: HADOOP-12388. Fix components' version information in the web page About the Cluster. Contributed by Jun Gong.

Posted by ec...@apache.org.
HADOOP-12388. Fix components' version information in the web page About the Cluster. Contributed by Jun Gong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9c1fab2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9c1fab2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9c1fab2

Branch: refs/heads/HADOOP-11890
Commit: d9c1fab2ec2930a011b7cca4a393881d39b8f6ec
Parents: 16b9037
Author: Zhihai Xu <zx...@apache.org>
Authored: Tue Sep 8 13:06:36 2015 -0700
Committer: Zhihai Xu <zx...@apache.org>
Committed: Tue Sep 8 13:06:36 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../src/main/java/org/apache/hadoop/util/VersionInfo.java         | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9c1fab2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 573b2de..95e58af 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1100,6 +1100,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-10318. Incorrect reference to nodeFile in RumenToSLSConverter
     error message. (Wei Yan via ozawa)
 
+    HADOOP-12388. Fix components' version information in the web page
+    'About the Cluster'. (Jun Gong via zxu)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9c1fab2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
index 1d96d99..324c57f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
@@ -86,7 +86,7 @@ public class VersionInfo {
   }
 
   protected String _getBuildVersion(){
-    return getVersion() +
+    return _getVersion() +
       " from " + _getRevision() +
       " by " + _getUser() +
       " source checksum " + _getSrcChecksum();


[48/50] [abbrv] hadoop git commit: YARN-3717. Expose app/am/queue's node-label-expression to RM web UI / CLI / REST-API. (Naganarasimha G R via wangda)

Posted by ec...@apache.org.
YARN-3717. Expose app/am/queue's node-label-expression to RM web UI / CLI / REST-API. (Naganarasimha G R via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae5308fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae5308fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae5308fe

Branch: refs/heads/HADOOP-11890
Commit: ae5308fe1d08479da0f3929cc6a57816411e9121
Parents: b2017d9
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Sep 15 11:40:50 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Tue Sep 15 11:40:50 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../yarn/api/records/ApplicationReport.java     | 29 +++++++++++++-
 .../hadoop/yarn/api/records/NodeLabel.java      | 16 ++++++++
 .../src/main/proto/yarn_protos.proto            |  2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  6 ++-
 .../apache/hadoop/yarn/client/cli/QueueCLI.java | 11 +++---
 .../hadoop/yarn/client/cli/TestYarnCLI.java     |  8 +++-
 .../impl/pb/ApplicationReportPBImpl.java        | 38 +++++++++++++++++++
 .../hadoop/yarn/api/TestApplicatonReport.java   |  2 +-
 ...pplicationHistoryManagerOnTimelineStore.java | 29 +++++++++++---
 .../metrics/ApplicationMetricsConstants.java    |  6 +++
 .../hadoop/yarn/server/webapp/AppBlock.java     |  6 +++
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  | 12 ++++++
 .../metrics/ApplicationCreatedEvent.java        | 16 +++++++-
 .../metrics/SystemMetricsPublisher.java         | 14 ++++++-
 .../server/resourcemanager/rmapp/RMApp.java     |  6 +++
 .../server/resourcemanager/rmapp/RMAppImpl.java | 27 +++++++++++++
 .../webapp/CapacitySchedulerPage.java           | 15 +++++---
 .../resourcemanager/webapp/NodeLabelsPage.java  |  6 +--
 .../resourcemanager/webapp/dao/AppInfo.java     | 23 +++++++++--
 .../dao/CapacitySchedulerLeafQueueInfo.java     |  8 +++-
 .../resourcemanager/TestClientRMService.java    | 40 +++++++++++++++++---
 .../applicationsmanager/MockAsm.java            | 10 +++++
 .../metrics/TestSystemMetricsPublisher.java     | 20 +++++++++-
 .../server/resourcemanager/rmapp/MockRMApp.java | 13 +++++++
 .../rmapp/TestRMAppTransitions.java             |  7 ++--
 .../webapp/TestRMWebServicesApps.java           | 32 ++++++++++++++--
 .../src/site/markdown/ResourceManagerRest.md    | 32 +++++++++++-----
 .../src/site/markdown/TimelineServer.md         | 38 +++++++++++++++----
 29 files changed, 412 insertions(+), 63 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c2bee70..b4c5c5e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -436,6 +436,9 @@ Release 2.8.0 - UNRELEASED
 
     YARN-2005. Blacklisting support for scheduling AMs. (Anubhav Dhoot via kasha)
 
+    YARN-3717. Expose app/am/queue's node-label-expression to RM web UI / 
+    CLI / REST-API. (Naganarasimha G R via wangda)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
index 5de7858..33116a4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
@@ -91,8 +91,9 @@ public abstract class ApplicationReport {
       YarnApplicationState state, String diagnostics, String url,
       long startTime, long finishTime, FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
-      float progress, String applicationType, Token amRmToken,
-      Set<String> tags, boolean unmanagedApplication, Priority priority) {
+      float progress, String applicationType, Token amRmToken, Set<String> tags,
+      boolean unmanagedApplication, Priority priority,
+      String appNodeLabelExpression, String amNodeLabelExpression) {
     ApplicationReport report =
         newInstance(applicationId, applicationAttemptId, user, queue, name,
           host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
@@ -101,6 +102,8 @@ public abstract class ApplicationReport {
     report.setApplicationTags(tags);
     report.setUnmanagedApp(unmanagedApplication);
     report.setPriority(priority);
+    report.setAppNodeLabelExpression(appNodeLabelExpression);
+    report.setAmNodeLabelExpression(amNodeLabelExpression);
     return report;
   }
 
@@ -422,4 +425,26 @@ public abstract class ApplicationReport {
   @Private
   @Unstable
   public abstract void setPriority(Priority priority);
+
+  /**
+   * Get the default Node Label expression for all the application's containers
+   *
+   * @return Application's NodeLabelExpression
+   */
+  @Unstable
+  public abstract String getAppNodeLabelExpression();
+
+  @Unstable
+  public abstract void setAppNodeLabelExpression(String appNodeLabelExpression);
+
+  /**
+   * Get the default Node Label expression for all the application's containers
+   *
+   * @return Application's NodeLabelExpression
+   */
+  @Unstable
+  public abstract String getAmNodeLabelExpression();
+
+  @Unstable
+  public abstract void setAmNodeLabelExpression(String amNodeLabelExpression);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeLabel.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeLabel.java
index aba6962..af914d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeLabel.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeLabel.java
@@ -27,6 +27,22 @@ import org.apache.hadoop.yarn.util.Records;
 @Public
 @Unstable
 public abstract class NodeLabel implements Comparable<NodeLabel> {
+
+  /**
+   * Default node label partition.
+   */
+  @Private
+  @Unstable
+  public static final String DEFAULT_NODE_LABEL_PARTITION =
+      "<DEFAULT_PARTITION>";
+
+  /**
+   * Node Label expression not set .
+   */
+  @Private
+  @Unstable
+  public static final String NODE_LABEL_EXPRESSION_NOT_SET = "<Not set>";
+
   /**
    * By default, node label is exclusive or not
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
index 1bd3dda..0bccfc4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
@@ -197,6 +197,8 @@ message ApplicationReportProto {
   optional LogAggregationStatusProto log_aggregation_status = 21;
   optional bool unmanaged_application = 22 [default = false];
   optional PriorityProto priority = 23;
+  optional string appNodeLabelExpression = 24;
+  optional string amNodeLabelExpression = 25;
 }
 
 enum LogAggregationStatusProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
index 9c87eae..55692f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
@@ -554,7 +554,11 @@ public class ApplicationCLI extends YarnCLI {
       appReportStr.print("\tDiagnostics : ");
       appReportStr.println(appReport.getDiagnostics());
       appReportStr.print("\tUnmanaged Application : ");
-      appReportStr.print(appReport.isUnmanagedApp());
+      appReportStr.println(appReport.isUnmanagedApp());
+      appReportStr.print("\tApplication Node Label Expression : ");
+      appReportStr.println(appReport.getAppNodeLabelExpression());
+      appReportStr.print("\tAM container Node Label Expression : ");
+      appReportStr.print(appReport.getAmNodeLabelExpression());
     } else {
       appReportStr.print("Application with id '" + applicationId
           + "' doesn't exist in RM.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
index 8a5521d..b5db536 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java
@@ -32,6 +32,7 @@ import org.apache.commons.cli.Options;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
@@ -135,11 +136,11 @@ public class QueueCLI extends YarnCLI {
     writer.print("\tMaximum Capacity : ");
     writer.println(df.format(queueInfo.getMaximumCapacity() * 100) + "%");
     writer.print("\tDefault Node Label expression : ");
-    if (null != queueInfo.getDefaultNodeLabelExpression()) {
-      writer.println(queueInfo.getDefaultNodeLabelExpression());
-    } else {
-      writer.println();
-    }
+    String nodeLabelExpression = queueInfo.getDefaultNodeLabelExpression();
+    nodeLabelExpression =
+        (nodeLabelExpression == null || nodeLabelExpression.trim().isEmpty())
+            ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : nodeLabelExpression;
+    writer.println(nodeLabelExpression);
 
     Set<String> nodeLabels = queueInfo.getAccessibleNodeLabels();
     StringBuilder labelList = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
index f942a4d..de50467 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Priority;
@@ -104,7 +105,7 @@ public class TestYarnCLI {
           "user", "queue", "appname", "host", 124, null,
           YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
           FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
-          null, null, false, Priority.newInstance(0));
+          null, null, false, Priority.newInstance(0), "high-mem", "high-mem");
       newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED);
       newApplicationReport.setPriority(Priority.newInstance(0));
       when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
@@ -134,6 +135,8 @@ public class TestYarnCLI {
       pw.println("\tLog Aggregation Status : SUCCEEDED");
       pw.println("\tDiagnostics : diagnostics");
       pw.println("\tUnmanaged Application : false");
+      pw.println("\tApplication Node Label Expression : high-mem");
+      pw.println("\tAM container Node Label Expression : high-mem");
       pw.close();
       String appReportStr = baos.toString("UTF-8");
       Assert.assertEquals(appReportStr, sysOutStream.toString());
@@ -1311,7 +1314,8 @@ public class TestYarnCLI {
     pw.println("\tCapacity : " + "40.0%");
     pw.println("\tCurrent Capacity : " + "50.0%");
     pw.println("\tMaximum Capacity : " + "80.0%");
-    pw.println("\tDefault Node Label expression : ");
+    pw.println("\tDefault Node Label expression : "
+        + NodeLabel.DEFAULT_NODE_LABEL_PARTITION);
     pw.println("\tAccessible Node Labels : ");
     pw.close();
     String queueInfoStr = baos.toString("UTF-8");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
index 2e50e0d..1072815 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
@@ -630,4 +630,42 @@ public class ApplicationReportPBImpl extends ApplicationReport {
       builder.clearPriority();
     this.priority = priority;
   }
+
+  @Override
+  public String getAppNodeLabelExpression() {
+    ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasAppNodeLabelExpression()) {
+      return null;
+    }
+    return p.getAppNodeLabelExpression();
+  }
+
+  @Override
+  public void setAppNodeLabelExpression(String appNodeLabelExpression) {
+    maybeInitBuilder();
+    if (appNodeLabelExpression == null) {
+      builder.clearAppNodeLabelExpression();
+      return;
+    }
+    builder.setAppNodeLabelExpression((appNodeLabelExpression));
+  }
+
+  @Override
+  public String getAmNodeLabelExpression() {
+    ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+    if (!p.hasAmNodeLabelExpression()) {
+      return null;
+    }
+    return p.getAmNodeLabelExpression();
+  }
+
+  @Override
+  public void setAmNodeLabelExpression(String amNodeLabelExpression) {
+    maybeInitBuilder();
+    if (amNodeLabelExpression == null) {
+      builder.clearAmNodeLabelExpression();
+      return;
+    }
+    builder.setAmNodeLabelExpression((amNodeLabelExpression));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
index d0d1d40..46fc4d5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
@@ -60,7 +60,7 @@ public class TestApplicatonReport {
           "appname", "host", 124, null, YarnApplicationState.FINISHED,
           "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
           "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
-          null, false, Priority.newInstance(0));
+          null, false, Priority.newInstance(0),"","");
     return appReport;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index f02e83c..7dac716 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -260,6 +260,8 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
     Set<String> appTags = null;
     Map<ApplicationAccessType, String> appViewACLs =
         new HashMap<ApplicationAccessType, String>();
+    String appNodeLabelExpression = null;
+    String amNodeLabelExpression = null;
     Map<String, Object> entityInfo = entity.getOtherInfo();
     if (entityInfo != null) {
       if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) {
@@ -280,8 +282,8 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
             latestApplicationAttemptId, user, queue, name, null, -1, null,
             state, diagnosticsInfo, null, createdTime, finishedTime,
             finalStatus, null, null, progress, type, null, appTags,
-            unmanagedApplication, Priority.newInstance(applicationPriority)),
-            appViewACLs);
+            unmanagedApplication, Priority.newInstance(applicationPriority),
+            appNodeLabelExpression, amNodeLabelExpression), appViewACLs);
       }
       if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
         queue =
@@ -298,6 +300,11 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
             entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO)
                 .toString();
       }
+      if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) {
+        type =
+            entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO)
+                .toString();
+      }
       if (entityInfo
           .containsKey(ApplicationMetricsConstants.UNMANAGED_APPLICATION_ENTITY_INFO)) {
         unmanagedApplication =
@@ -310,6 +317,18 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         applicationPriority = Integer.parseInt(entityInfo.get(
             ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO).toString());
       }
+      if (entityInfo
+          .containsKey(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION)) {
+        appNodeLabelExpression = entityInfo
+            .get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION).toString();
+      }
+      if (entityInfo
+          .containsKey(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION)) {
+        amNodeLabelExpression =
+            entityInfo.get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION)
+                .toString();
+      }
+
       if (entityInfo.containsKey(ApplicationMetricsConstants.APP_CPU_METRICS)) {
         long vcoreSeconds=Long.parseLong(entityInfo.get(
                 ApplicationMetricsConstants.APP_CPU_METRICS).toString());
@@ -381,9 +400,9 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         ConverterUtils.toApplicationId(entity.getEntityId()),
         latestApplicationAttemptId, user, queue, name, null, -1, null, state,
         diagnosticsInfo, null, createdTime, finishedTime, finalStatus,
-        appResources, null, progress, type, null, appTags,
-        unmanagedApplication, Priority.newInstance(applicationPriority)),
-        appViewACLs);
+        appResources, null, progress, type, null, appTags, unmanagedApplication,
+        Priority.newInstance(applicationPriority), appNodeLabelExpression,
+        amNodeLabelExpression), appViewACLs);
   }
 
   private static ApplicationAttemptReport convertToApplicationAttemptReport(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
index 481b84c..3cbcc1e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java
@@ -80,4 +80,10 @@ public class ApplicationMetricsConstants {
 
   public static final String APPLICATION_PRIORITY_INFO =
       "YARN_APPLICATION_PRIORITY";
+
+  public static final String APP_NODE_LABEL_EXPRESSION =
+      "YARN_APP_NODE_LABEL_EXPRESSION";
+
+  public static final String AM_NODE_LABEL_EXPRESSION =
+      "YARN_AM_NODE_LABEL_EXPRESSION";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
index 31a2c8a..44ed223 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
@@ -208,6 +208,12 @@ public class AppBlock extends HtmlBlock {
     overviewTable._("Diagnostics:",
         app.getDiagnosticsInfo() == null ? "" : app.getDiagnosticsInfo());
     overviewTable._("Unmanaged Application:", app.isUnmanagedApp());
+    overviewTable._("Application Node Label expression:",
+        app.getAppNodeLabelExpression() == null ? "<Not set>"
+            : app.getAppNodeLabelExpression());
+    overviewTable._("AM container Node Label expression:",
+        app.getAmNodeLabelExpression() == null ? "<Not set>"
+            : app.getAmNodeLabelExpression());
 
     Collection<ApplicationAttemptReport> attempts;
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
index 7efbcb9..cad3b2e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
@@ -62,6 +62,8 @@ public class AppInfo {
   private int allocatedCpuVcores;
   private int allocatedMemoryMB;
   protected boolean unmanagedApplication;
+  private String appNodeLabelExpression;
+  private String amNodeLabelExpression;
 
   public AppInfo() {
     // JAXB needs this
@@ -106,6 +108,8 @@ public class AppInfo {
       this.applicationTags = CSV_JOINER.join(app.getApplicationTags());
     }
     unmanagedApplication = app.isUnmanagedApp();
+    appNodeLabelExpression = app.getAppNodeLabelExpression();
+    amNodeLabelExpression = app.getAmNodeLabelExpression();
   }
 
   public String getAppId() {
@@ -203,4 +207,12 @@ public class AppInfo {
   public int getPriority() {
     return priority;
   }
+
+  public String getAppNodeLabelExpression() {
+    return appNodeLabelExpression;
+  }
+
+  public String getAmNodeLabelExpression() {
+    return amNodeLabelExpression;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
index 3436413..a684dfc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/ApplicationCreatedEvent.java
@@ -35,6 +35,8 @@ public class ApplicationCreatedEvent extends
   private Set<String> appTags;
   private boolean unmanagedApplication;
   private Priority applicationPriority;
+  private String appNodeLabelsExpression;
+  private String amNodeLabelsExpression;
 
   public ApplicationCreatedEvent(ApplicationId appId,
       String name,
@@ -45,7 +47,9 @@ public class ApplicationCreatedEvent extends
       long createdTime,
       Set<String> appTags,
       boolean unmanagedApplication,
-      Priority applicationPriority) {
+      Priority applicationPriority,
+      String appNodeLabelsExpression,
+      String amNodeLabelsExpression) {
     super(SystemMetricsEventType.APP_CREATED, createdTime);
     this.appId = appId;
     this.name = name;
@@ -56,6 +60,8 @@ public class ApplicationCreatedEvent extends
     this.appTags = appTags;
     this.unmanagedApplication = unmanagedApplication;
     this.applicationPriority = applicationPriority;
+    this.appNodeLabelsExpression = appNodeLabelsExpression;
+    this.amNodeLabelsExpression = amNodeLabelsExpression;
   }
 
   @Override
@@ -98,4 +104,12 @@ public class ApplicationCreatedEvent extends
   public Priority getApplicationPriority() {
     return applicationPriority;
   }
+
+  public String getAppNodeLabelsExpression() {
+    return appNodeLabelsExpression;
+  }
+
+  public String getAmNodeLabelsExpression() {
+    return amNodeLabelsExpression;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
index 3d7ac9f..0852ff4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -98,6 +100,8 @@ public class SystemMetricsPublisher extends CompositeService {
   @SuppressWarnings("unchecked")
   public void appCreated(RMApp app, long createdTime) {
     if (publishSystemMetrics) {
+      ApplicationSubmissionContext appSubmissionContext =
+          app.getApplicationSubmissionContext();
       dispatcher.getEventHandler().handle(
           new ApplicationCreatedEvent(
               app.getApplicationId(),
@@ -107,8 +111,10 @@ public class SystemMetricsPublisher extends CompositeService {
               app.getQueue(),
               app.getSubmitTime(),
               createdTime, app.getApplicationTags(),
-              app.getApplicationSubmissionContext().getUnmanagedAM(),
-              app.getApplicationSubmissionContext().getPriority()));
+              appSubmissionContext.getUnmanagedAM(),
+              appSubmissionContext.getPriority(),
+              app.getAppNodeLabelExpression(),
+              app.getAmNodeLabelExpression()));
     }
   }
 
@@ -260,6 +266,10 @@ public class SystemMetricsPublisher extends CompositeService {
         event.isUnmanagedApp());
     entityInfo.put(ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO,
         event.getApplicationPriority().getPriority());
+    entityInfo.put(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION,
+        event.getAppNodeLabelsExpression());
+    entityInfo.put(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION,
+        event.getAmNodeLabelsExpression());
     entity.setOtherInfo(entityInfo);
     TimelineEvent tEvent = new TimelineEvent();
     tEvent.setEventType(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
index be9dfaf..720d863 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java
@@ -248,4 +248,10 @@ public interface RMApp extends EventHandler<RMAppEvent> {
   Map<NodeId, LogAggregationReport> getLogAggregationReportsForApp();
 
   LogAggregationStatus getLogAggregationStatusForAppReport();
+  /**
+   * Return the node label expression of the AM container.
+   */
+  String getAmNodeLabelExpression();
+
+  String getAppNodeLabelExpression();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 7cf39b8..ea9aa70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -683,6 +684,8 @@ public class RMAppImpl implements RMApp, Recoverable {
           this.submissionContext.getPriority());
       report.setLogAggregationStatus(logAggregationStatus);
       report.setUnmanagedApp(submissionContext.getUnmanagedAM());
+      report.setAppNodeLabelExpression(getAppNodeLabelExpression());
+      report.setAmNodeLabelExpression(getAmNodeLabelExpression());
       return report;
     } finally {
       this.readLock.unlock();
@@ -1700,4 +1703,28 @@ public class RMAppImpl implements RMApp, Recoverable {
       this.readLock.unlock();
     }
   }
+
+  @Override
+  public String getAppNodeLabelExpression() {
+    String appNodeLabelExpression =
+        getApplicationSubmissionContext().getNodeLabelExpression();
+    appNodeLabelExpression = (appNodeLabelExpression == null)
+        ? NodeLabel.NODE_LABEL_EXPRESSION_NOT_SET : appNodeLabelExpression;
+    appNodeLabelExpression = (appNodeLabelExpression.trim().isEmpty())
+        ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : appNodeLabelExpression;
+    return appNodeLabelExpression;
+  }
+
+  @Override
+  public String getAmNodeLabelExpression() {
+    String amNodeLabelExpression = null;
+    if (!getApplicationSubmissionContext().getUnmanagedAM()) {
+      amNodeLabelExpression = getAMResourceRequest().getNodeLabelExpression();
+      amNodeLabelExpression = (amNodeLabelExpression == null)
+          ? NodeLabel.NODE_LABEL_EXPRESSION_NOT_SET : amNodeLabelExpression;
+      amNodeLabelExpression = (amNodeLabelExpression.trim().isEmpty())
+          ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : amNodeLabelExpression;
+    }
+    return amNodeLabelExpression;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index d8971b7..9e27627 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -27,6 +27,7 @@ import java.util.Map;
 
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
@@ -91,7 +92,8 @@ class CapacitySchedulerPage extends RmView {
     }
 
     private void renderLeafQueueInfoWithPartition(Block html) {
-      nodeLabel = nodeLabel.length() == 0 ? "<DEFAULT_PARTITION>" : nodeLabel;
+      nodeLabel = nodeLabel.length() == 0
+          ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : nodeLabel;
       // first display the queue's label specific details :
       ResponseInfo ri =
           info("\'" + lqinfo.getQueuePath().substring(5)
@@ -152,7 +154,11 @@ class CapacitySchedulerPage extends RmView {
           "%.1f", lqinfo.getUserLimitFactor())).
       _("Accessible Node Labels:", StringUtils.join(",", lqinfo.getNodeLabels())).
       _("Ordering Policy: ", lqinfo.getOrderingPolicyInfo()).
-      _("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled");
+      _("Preemption:", lqinfo.getPreemptionDisabled() ? "disabled" : "enabled").
+      _("Default Node Label Expression:",
+              lqinfo.getDefaultNodeLabelExpression() == null
+                  ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION
+                  : lqinfo.getDefaultNodeLabelExpression());
     }
   }
 
@@ -363,9 +369,8 @@ class CapacitySchedulerPage extends RmView {
             csqinfo.csinfo = sinfo;
             csqinfo.qinfo = null;
             csqinfo.label = label.getLabelName();
-            String nodeLabel =
-                csqinfo.label.length() == 0 ? "<DEFAULT_PARTITION>"
-                    : csqinfo.label;
+            String nodeLabel = csqinfo.label.length() == 0
+                ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : csqinfo.label;
             QueueCapacities queueCapacities = root.getQueueCapacities();
             used = queueCapacities.getUsedCapacity(label.getLabelName());
             String partitionUiTag =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
index b0b301a..ea85d13 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeLabelsPage.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
 
+import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.nodelabels.RMNodeLabel;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
@@ -58,9 +59,8 @@ public class NodeLabelsPage extends RmView {
       RMNodeLabelsManager nlm = rm.getRMContext().getNodeLabelManager();
       for (RMNodeLabel info : nlm.pullRMNodeLabelsInfo()) {
         TR<TBODY<TABLE<Hamlet>>> row =
-            tbody.tr().td(
-                info.getLabelName().isEmpty() ? "<DEFAULT_PARTITION>" : info
-                    .getLabelName());
+            tbody.tr().td(info.getLabelName().isEmpty()
+                ? NodeLabel.DEFAULT_NODE_LABEL_PARTITION : info.getLabelName());
         String type =
             (info.getIsExclusive()) ? "Exclusive Partition"
                 : "Non Exclusive Partition";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
index 46f0533..7f80315 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java
@@ -26,6 +26,7 @@ import javax.xml.bind.annotation.XmlTransient;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
+import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
@@ -98,6 +99,8 @@ public class AppInfo {
 
   protected LogAggregationStatus logAggregationStatus;
   protected boolean unmanagedApplication;
+  protected String appNodeLabelExpression;
+  protected String amNodeLabelExpression;
 
   public AppInfo() {
   } // JAXB needs this
@@ -132,8 +135,10 @@ public class AppInfo {
       this.name = app.getName().toString();
       this.queue = app.getQueue().toString();
       this.priority = 0;
-      if (app.getApplicationSubmissionContext().getPriority() != null) {
-        this.priority = app.getApplicationSubmissionContext().getPriority()
+      ApplicationSubmissionContext appSubmissionContext =
+          app.getApplicationSubmissionContext();
+      if (appSubmissionContext.getPriority() != null) {
+        this.priority = appSubmissionContext.getPriority()
             .getPriority();
       }
       this.progress = app.getProgress() * 100;
@@ -191,7 +196,11 @@ public class AppInfo {
       memorySeconds = appMetrics.getMemorySeconds();
       vcoreSeconds = appMetrics.getVcoreSeconds();
       unmanagedApplication =
-          app.getApplicationSubmissionContext().getUnmanagedAM();
+          appSubmissionContext.getUnmanagedAM();
+      appNodeLabelExpression =
+          app.getApplicationSubmissionContext().getNodeLabelExpression();
+      amNodeLabelExpression = (unmanagedApplication) ? null
+          : app.getAMResourceRequest().getNodeLabelExpression();
     }
   }
 
@@ -338,4 +347,12 @@ public class AppInfo {
   public int getPriority() {
     return this.priority;
   }
+
+  public String getAppNodeLabelExpression() {
+    return this.appNodeLabelExpression;
+  }
+
+  public String getAmNodeLabelExpression() {
+    return this.amNodeLabelExpression;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
index ae8d747..f31a256 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
@@ -40,7 +40,8 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected ResourceInfo usedAMResource;
   protected ResourceInfo userAMResourceLimit;
   protected boolean preemptionDisabled;
-  
+  protected String defaultNodeLabelExpression;
+
   @XmlTransient
   protected String orderingPolicyInfo;
 
@@ -62,6 +63,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     userAMResourceLimit = new ResourceInfo(q.getUserAMResourceLimit());
     preemptionDisabled = q.getPreemptionDisabled();
     orderingPolicyInfo = q.getOrderingPolicy().getInfo();
+    defaultNodeLabelExpression = q.getDefaultNodeLabelExpression();
   }
 
   public int getNumActiveApplications() {
@@ -116,4 +118,8 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   public String getOrderingPolicyInfo() {
     return orderingPolicyInfo;
   }
+
+  public String getDefaultNodeLabelExpression() {
+    return defaultNodeLabelExpression;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
index 0be8bc2..39964da 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
@@ -300,6 +300,33 @@ public class TestClientRMService {
           report.getApplicationResourceUsageReport();
       Assert.assertEquals(10, usageReport.getMemorySeconds());
       Assert.assertEquals(3, usageReport.getVcoreSeconds());
+      Assert.assertEquals("<Not set>", report.getAmNodeLabelExpression());
+      Assert.assertEquals("<Not set>", report.getAppNodeLabelExpression());
+
+      // if application has am node label set to blank
+      ApplicationId appId2 = getApplicationId(2);
+      when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),
+          ApplicationAccessType.VIEW_APP, null, appId2)).thenReturn(true);
+      request.setApplicationId(appId2);
+      response = rmService.getApplicationReport(request);
+      report = response.getApplicationReport();
+
+      Assert.assertEquals(NodeLabel.DEFAULT_NODE_LABEL_PARTITION,
+          report.getAmNodeLabelExpression());
+      Assert.assertEquals(NodeLabel.NODE_LABEL_EXPRESSION_NOT_SET,
+          report.getAppNodeLabelExpression());
+
+      // if application has am node label set to blank
+      ApplicationId appId3 = getApplicationId(3);
+      when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(),
+          ApplicationAccessType.VIEW_APP, null, appId3)).thenReturn(true);
+
+      request.setApplicationId(appId3);
+      response = rmService.getApplicationReport(request);
+      report = response.getApplicationReport();
+
+      Assert.assertEquals("high-mem", report.getAmNodeLabelExpression());
+      Assert.assertEquals("high-mem", report.getAppNodeLabelExpression());
 
       // if application id is null
       GetApplicationReportRequest invalidRequest = recordFactory
@@ -951,11 +978,11 @@ public class TestClientRMService {
     ApplicationId applicationId3 = getApplicationId(3);
     YarnConfiguration config = new YarnConfiguration();
     apps.put(applicationId1, getRMApp(rmContext, yarnScheduler, applicationId1,
-        config, "testqueue", 10, 3));
+        config, "testqueue", 10, 3,null,null));
     apps.put(applicationId2, getRMApp(rmContext, yarnScheduler, applicationId2,
-        config, "a", 20, 2));
+        config, "a", 20, 2,null,""));
     apps.put(applicationId3, getRMApp(rmContext, yarnScheduler, applicationId3,
-        config, "testqueue", 40, 5));
+        config, "testqueue", 40, 5,"high-mem","high-mem"));
     return apps;
   }
   
@@ -978,10 +1005,11 @@ public class TestClientRMService {
 
   private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler,
       ApplicationId applicationId3, YarnConfiguration config, String queueName,
-      final long memorySeconds, final long vcoreSeconds) {
+      final long memorySeconds, final long vcoreSeconds,
+      String appNodeLabelExpression, String amNodeLabelExpression) {
     ApplicationSubmissionContext asContext = mock(ApplicationSubmissionContext.class);
     when(asContext.getMaxAppAttempts()).thenReturn(1);
-
+    when(asContext.getNodeLabelExpression()).thenReturn(appNodeLabelExpression);
     RMAppImpl app =
         spy(new RMAppImpl(applicationId3, rmContext, config, null, null,
             queueName, asContext, yarnScheduler, null,
@@ -1002,7 +1030,7 @@ public class TestClientRMService {
                     return report;
                   }
               });
-
+    app.getAMResourceRequest().setNodeLabelExpression(amNodeLabelExpression);
     ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(
         ApplicationId.newInstance(123456, 1), 1);
     RMAppAttemptImpl rmAppAttemptImpl = spy(new RMAppAttemptImpl(attemptId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index a23c789..87f96eb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -202,6 +202,16 @@ public abstract class MockAsm extends MockApps {
     public LogAggregationStatus getLogAggregationStatusForAppReport() {
       throw new UnsupportedOperationException("Not supported yet.");
     }
+
+    @Override
+    public String getAmNodeLabelExpression() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
+
+    @Override
+    public String getAppNodeLabelExpression() {
+      throw new UnsupportedOperationException("Not supported yet.");
+    }
   }
 
   public static RMApp newApplication(int i) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
index eb48cc7..0498a4f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
@@ -47,6 +48,7 @@ import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants;
 import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
@@ -146,6 +148,14 @@ public class TestSystemMetricsPublisher {
           entity.getOtherInfo().get(
               ApplicationMetricsConstants.APPLICATION_PRIORITY_INFO));
 
+      Assert.assertEquals(app.getAmNodeLabelExpression(), entity.getOtherInfo()
+          .get(ApplicationMetricsConstants.AM_NODE_LABEL_EXPRESSION));
+
+      Assert.assertEquals(
+          app.getApplicationSubmissionContext().getNodeLabelExpression(),
+          entity.getOtherInfo()
+              .get(ApplicationMetricsConstants.APP_NODE_LABEL_EXPRESSION));
+
       Assert
           .assertEquals(
               app.getUser(),
@@ -351,7 +361,7 @@ public class TestSystemMetricsPublisher {
   }
 
   private static RMApp createRMApp(ApplicationId appId) {
-    RMApp app = mock(RMApp.class);
+    RMApp app = mock(RMAppImpl.class);
     when(app.getApplicationId()).thenReturn(appId);
     when(app.getName()).thenReturn("test app");
     when(app.getApplicationType()).thenReturn("test app type");
@@ -376,8 +386,14 @@ public class TestSystemMetricsPublisher {
     when(app.getApplicationTags()).thenReturn(appTags);
     ApplicationSubmissionContext asc = mock(ApplicationSubmissionContext.class);
     when(asc.getUnmanagedAM()).thenReturn(false);
-    when(asc.getPriority()).thenReturn(Priority.newInstance(0));
+    when(asc.getPriority()).thenReturn(Priority.newInstance(10));
+    when(asc.getNodeLabelExpression()).thenReturn("high-cpu");
     when(app.getApplicationSubmissionContext()).thenReturn(asc);
+    when(app.getAppNodeLabelExpression()).thenCallRealMethod();
+    ResourceRequest amReq = mock(ResourceRequest.class);
+    when(amReq.getNodeLabelExpression()).thenReturn("high-mem");
+    when(app.getAMResourceRequest()).thenReturn(amReq);
+    when(app.getAmNodeLabelExpression()).thenCallRealMethod();
     return app;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
index c6ee3ba..7d4e6fd 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.LogAggregationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
@@ -64,6 +65,8 @@ public class MockRMApp implements RMApp {
     finish = time;
     id = MockApps.newAppID(newid);
     state = newState;
+    amReq = ResourceRequest.newInstance(Priority.UNDEFINED, "0.0.0.0",
+        Resource.newInstance(0, 0), 1);
   }
 
   public MockRMApp(int newid, long time, RMAppState newState, String userName) {
@@ -283,4 +286,14 @@ public class MockRMApp implements RMApp {
   public LogAggregationStatus getLogAggregationStatusForAppReport() {
     return null;
   }
+
+  @Override
+  public String getAmNodeLabelExpression() {
+    return null;
+  }
+
+  @Override
+  public String getAppNodeLabelExpression() {
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
index a5e3308..afe95cf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java
@@ -261,10 +261,9 @@ public class TestRMAppTransitions {
     // but applicationId is still set for safety
     submissionContext.setApplicationId(applicationId);
 
-    RMApp application =
-        new RMAppImpl(applicationId, rmContext, conf, name, user, queue,
-          submissionContext, scheduler, masterService,
-          System.currentTimeMillis(), "YARN", null, null);
+    RMApp application = new RMAppImpl(applicationId, rmContext, conf, name,
+        user, queue, submissionContext, scheduler, masterService,
+        System.currentTimeMillis(), "YARN", null, mock(ResourceRequest.class));
 
     testAppStartState(applicationId, user, name, queue, application);
     this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
index a784295..47b44d2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
@@ -1310,14 +1310,29 @@ public class TestRMWebServicesApps extends JerseyTestBase {
           WebServicesTestUtils.getXmlInt(element, "numNonAMContainerPreempted"),
           WebServicesTestUtils.getXmlInt(element, "numAMContainerPreempted"),
           WebServicesTestUtils.getXmlString(element, "logAggregationStatus"),
-          WebServicesTestUtils.getXmlBoolean(element, "unmanagedApplication"));
+          WebServicesTestUtils.getXmlBoolean(element, "unmanagedApplication"),
+          WebServicesTestUtils.getXmlString(element, "appNodeLabelExpression"),
+          WebServicesTestUtils.getXmlString(element, "amNodeLabelExpression"));
     }
   }
 
   public void verifyAppInfo(JSONObject info, RMApp app) throws JSONException,
       Exception {
 
-    assertEquals("incorrect number of elements", 30, info.length());
+    int expectedNumberOfElements = 30;
+    String appNodeLabelExpression = null;
+    String amNodeLabelExpression = null;
+    if (app.getApplicationSubmissionContext()
+        .getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+      appNodeLabelExpression = info.getString("appNodeLabelExpression");
+    }
+    if (app.getAMResourceRequest().getNodeLabelExpression() != null) {
+      expectedNumberOfElements++;
+      amNodeLabelExpression = info.getString("amNodeLabelExpression");
+    }
+    assertEquals("incorrect number of elements", expectedNumberOfElements,
+        info.length());
 
     verifyAppInfoGeneric(app, info.getString("id"), info.getString("user"),
         info.getString("name"), info.getString("applicationType"),
@@ -1334,7 +1349,9 @@ public class TestRMWebServicesApps extends JerseyTestBase {
         info.getInt("numNonAMContainerPreempted"),
         info.getInt("numAMContainerPreempted"),
         info.getString("logAggregationStatus"),
-        info.getBoolean("unmanagedApplication"));
+        info.getBoolean("unmanagedApplication"),
+        appNodeLabelExpression,
+        amNodeLabelExpression);
   }
 
   public void verifyAppInfoGeneric(RMApp app, String id, String user,
@@ -1345,7 +1362,8 @@ public class TestRMWebServicesApps extends JerseyTestBase {
       int allocatedMB, int allocatedVCores, int numContainers,
       int preemptedResourceMB, int preemptedResourceVCores,
       int numNonAMContainerPreempted, int numAMContainerPreempted,
-      String logAggregationStatus, boolean unmanagedApplication)
+      String logAggregationStatus, boolean unmanagedApplication,
+      String appNodeLabelExpression, String amNodeLabelExpression)
       throws JSONException,
       Exception {
 
@@ -1400,6 +1418,12 @@ public class TestRMWebServicesApps extends JerseyTestBase {
     assertEquals("unmanagedApplication doesn't match", app
         .getApplicationSubmissionContext().getUnmanagedAM(),
         unmanagedApplication);
+    assertEquals("unmanagedApplication doesn't match",
+        app.getApplicationSubmissionContext().getNodeLabelExpression(),
+        appNodeLabelExpression);
+    assertEquals("unmanagedApplication doesn't match",
+        app.getAMResourceRequest().getNodeLabelExpression(),
+        amNodeLabelExpression);
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
index f8b8061..53df195 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md
@@ -1379,11 +1379,13 @@ Response Body:
           "allocatedVCores" : 0,
           "runningContainers" : 0,
 	  "applicationType" : "MAPREDUCE",
-	  "applicationTags" : ""
+	  "applicationTags" : "",
           "memorySeconds" : 151730,
           "vcoreSeconds" : 103,
-          "unmanagedApplication":"false"
-          "applicationPriority":0
+          "unmanagedApplication" : "false",
+          "applicationPriority" : 0,
+          "appNodeLabelExpression" : "",
+          "amnodeLabelExpression" : ""
        },
        {
           "finishedTime" : 1326815789546,
@@ -1406,11 +1408,13 @@ Response Body:
           "allocatedVCores" : 0,
           "runningContainers" : 1,
 	  "applicationType" : "YARN",
-	  "applicationTags" : "tag1"
+	  "applicationTags" : "tag1",
           "memorySeconds" : 640064,
-          "vcoreSeconds" : 442
-          "unmanagedApplication":"false"
-          "applicationPriority":0
+          "vcoreSeconds" : 442,
+          "unmanagedApplication" : "false",
+          "applicationPriority" : 0,
+          "appNodeLabelExpression" : "",
+          "amNodeLabelExpression" : ""
        }
     ]
   }
@@ -1462,6 +1466,8 @@ Response Body:
     <vcoreSeconds>103</vcoreSeconds>
     <unmanagedApplication>false</unmanagedApplication>
     <applicationPriority>0</applicationPriority>
+    <appNodeLabelExpression></appNodeLabelExpression>
+    <amNodeLabelExpression></amNodeLabelExpression>
   </app>
   <app>
     <id>application_1326815542473_0002</id>
@@ -1489,6 +1495,8 @@ Response Body:
     <vcoreSeconds>442</vcoreSeconds>
     <unmanagedApplication>false</unmanagedApplication>
     <applicationPriority>0</applicationPriority>
+    <appNodeLabelExpression></appNodeLabelExpression>
+    <amNodeLabelExpression></amNodeLabelExpression>
   </app>
 </apps>
 ```
@@ -1650,6 +1658,8 @@ Note that depending on security settings a user might not be able to see all the
 | vcoreSeconds | long | The amount of CPU resources the application has allocated (virtual core-seconds) |
 | unmanagedApplication | boolean | Is the application unmanaged. |
 | applicationPriority | int | priority of the submitted application |
+| appNodeLabelExpression | string | Node Label expression which is used to identify the nodes on which application's containers are expected to run by default.|
+| amNodeLabelExpression | string | Node Label expression which is used to identify the node on which application's  AM container is expected to run.|
 
 ### Response Examples
 
@@ -1690,8 +1700,10 @@ Response Body:
       "queue" : "a1",
       "memorySeconds" : 151730,
       "vcoreSeconds" : 103,
-      "unmanagedApplication":"false"
-      "applicationPriority":0
+      "unmanagedApplication" : "false",
+      "applicationPriority" : 0,
+      "appNodeLabelExpression" : "",
+      "amNodeLabelExpression" : ""
    }
 }
 ```
@@ -1735,6 +1747,8 @@ Response Body:
   <vcoreSeconds>103</vcoreSeconds>
   <unmanagedApplication>false</unmanagedApplication>
   <applicationPriority>0</applicationPriority>
+  <appNodeLabelExpression></appNodeLabelExpression>
+  <amNodeLabelExpression></amNodeLabelExpression>
 </app>
 ```
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5308fe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
index 72b813a..1f0388e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md
@@ -1089,7 +1089,10 @@ Response Body:
           "finishedTime":1430425008861,
           "elapsedTime":7857,
           "unmanagedApplication":"false",
-          "applicationPriority":0},
+          "applicationPriority":0,
+          "appNodeLabelExpression":"",
+          "amNodeLabelExpression":""
+          },
           {
           "appId":"application_1430424020775_0003",
           "currentAppAttemptId":"appattempt_1430424020775_0003_000001",
@@ -1110,7 +1113,10 @@ Response Body:
           "finishedTime":1430424963907,
           "elapsedTime":7257,
           "unmanagedApplication":"false",
-          "applicationPriority":0},
+          "applicationPriority":0,
+          "appNodeLabelExpression":"",
+          "amNodeLabelExpression":""
+          },
           {
           "appId":"application_1430424020775_0002",
           "currentAppAttemptId":"appattempt_1430424020775_0002_000001",
@@ -1131,7 +1137,10 @@ Response Body:
           "finishedTime":1430424776594,
           "elapsedTime":7199,
           "unmanagedApplication":"false",
-          "applicationPriority":0},
+          "applicationPriority":0,
+          "appNodeLabelExpression":"",
+          "amNodeLabelExpression":""
+          },
           {
           "appId":"application_1430424020775_0001",
           "currentAppAttemptId":"appattempt_1430424020775_0001_000001",
@@ -1153,7 +1162,9 @@ Response Body:
           "elapsedTime":18344,
           "applicationTags":"mrapplication,ta-example",
           "unmanagedApplication":"false",
-          "applicationPriority":0
+          "applicationPriority":0,
+          "appNodeLabelExpression":"",
+          "amNodeLabelExpression":""
           }
       ]
     }
@@ -1197,6 +1208,8 @@ Response Body:
         <elapsedTime>7857</elapsedTime>
         <unmanagedApplication>false</unmanagedApplication>
         <applicationPriority>0</applicationPriority>
+        <appNodeLabelExpression></appNodeLabelExpression>
+        <amNodeLabelExpression></amNodeLabelExpression>
       </app>
       <app>
         <appId>application_1430424020775_0003</appId>
@@ -1219,6 +1232,8 @@ Response Body:
         <elapsedTime>7257</elapsedTime>
         <unmanagedApplication>false</unmanagedApplication>
         <applicationPriority>0</applicationPriority>
+        <appNodeLabelExpression></appNodeLabelExpression>
+        <amNodeLabelExpression></amNodeLabelExpression>
       </app>
       <app>
         <appId>application_1430424020775_0002</appId>
@@ -1241,6 +1256,8 @@ Response Body:
         <elapsedTime>7199</elapsedTime>
         <unmanagedApplication>false</unmanagedApplication>
         <applicationPriority>0</applicationPriority>
+        <appNodeLabelExpression></appNodeLabelExpression>
+        <amNodeLabelExpression></amNodeLabelExpression>
       </app>
       <app>
         <appId>application_1430424020775_0001</appId>
@@ -1264,6 +1281,8 @@ Response Body:
         <applicationTags>mrapplication,ta-example</applicationTags>
         <unmanagedApplication>false</unmanagedApplication>
         <applicationPriority>0</applicationPriority>
+        <appNodeLabelExpression></appNodeLabelExpression>
+        <amNodeLabelExpression></amNodeLabelExpression>
       </app>
     </apps>
 
@@ -1316,7 +1335,8 @@ None
 | `applicationTags` | string | The application tags. |
 | `unmanagedApplication` | boolean | Is the application unmanaged. |
 | `applicationPriority` | int | Priority of the submitted application. |
-
+| `appNodeLabelExpression` | string |Node Label expression which is used to identify the nodes on which application's containers are expected to run by default.|
+| `amNodeLabelExpression` | string | Node Label expression which is used to identify the node on which application's  AM container is expected to run.|
 ### Response Examples:
 
 #### JSON response
@@ -1353,8 +1373,10 @@ Response Body:
       "finishedTime": 1430424072153,
       "elapsedTime": 18344,
       "applicationTags": mrapplication,tag-example,
-      "unmanagedApplication":"false"
-      "applicationPriority":0
+      "unmanagedApplication": "false",
+      "applicationPriority": 0,
+      "appNodeLabelExpression": "",
+      "amNodeLabelExpression": ""
     }
 
 #### XML response
@@ -1395,6 +1417,8 @@ Response Body:
        <applicationTags>mrapplication,ta-example</applicationTags>
        <unmanagedApplication>false</unmanagedApplication>
        <applicationPriority>0</applicationPriority>
+       <appNodeLabelExpression><appNodeLabelExpression>
+       <amNodeLabelExpression><amNodeLabelExpression>
      </app>
 
 ## <a name="REST_API_APPLICATION_ATTEMPT_LIST"></a>Application Attempt List


[16/50] [abbrv] hadoop git commit: MAPREDUCE-6474. ShuffleHandler can possibly exhaust nodemanager file descriptors. Contributed by Kuhu Shukla

Posted by ec...@apache.org.
MAPREDUCE-6474. ShuffleHandler can possibly exhaust nodemanager file descriptors. Contributed by Kuhu Shukla


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e615588
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e615588
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e615588

Branch: refs/heads/HADOOP-11890
Commit: 8e615588d5216394d0251a9c97bd706537856c6d
Parents: a40342b
Author: Jason Lowe <jl...@apache.org>
Authored: Thu Sep 10 16:00:17 2015 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Thu Sep 10 16:00:17 2015 +0000

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../apache/hadoop/mapred/ShuffleHandler.java    | 177 +++++++++++++++++--
 .../hadoop/mapred/TestShuffleHandler.java       | 129 ++++++++++++++
 3 files changed, 293 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e615588/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 428d37e..02c1f1f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -590,6 +590,9 @@ Release 2.7.2 - UNRELEASED
     MAPREDUCE-6442. Stack trace is missing when error occurs in client protocol
     provider's constructor (Chang Li via ozawa)
 
+    MAPREDUCE-6474. ShuffleHandler can possibly exhaust nodemanager file
+    descriptors (Kuhu Shukla via jlowe)
+
 Release 2.7.1 - 2015-07-06 
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e615588/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
index ee1be23..7a078a8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
@@ -49,6 +49,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.regex.Pattern;
 
 import javax.crypto.SecretKey;
@@ -170,6 +171,7 @@ public class ShuffleHandler extends AuxiliaryService {
   private int maxShuffleConnections;
   private int shuffleBufferSize;
   private boolean shuffleTransferToAllowed;
+  private int maxSessionOpenFiles;
   private ReadaheadPool readaheadPool = ReadaheadPool.getInstance();
 
   private Map<String,String> userRsrc;
@@ -220,6 +222,13 @@ public class ShuffleHandler extends AuxiliaryService {
   public static final boolean WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED = 
       false;
 
+  /* the maximum number of files a single GET request can
+   open simultaneously during shuffle
+   */
+  public static final String SHUFFLE_MAX_SESSION_OPEN_FILES =
+      "mapreduce.shuffle.max.session-open-files";
+  public static final int DEFAULT_SHUFFLE_MAX_SESSION_OPEN_FILES = 3;
+
   boolean connectionKeepAliveEnabled = false;
   int connectionKeepAliveTimeOut;
   int mapOutputMetaInfoCacheSize;
@@ -248,6 +257,104 @@ public class ShuffleHandler extends AuxiliaryService {
 
   final ShuffleMetrics metrics;
 
+  class ReduceMapFileCount implements ChannelFutureListener {
+
+    private ReduceContext reduceContext;
+
+    public ReduceMapFileCount(ReduceContext rc) {
+      this.reduceContext = rc;
+    }
+
+    @Override
+    public void operationComplete(ChannelFuture future) throws Exception {
+      if (!future.isSuccess()) {
+        future.getChannel().close();
+        return;
+      }
+      int waitCount = this.reduceContext.getMapsToWait().decrementAndGet();
+      if (waitCount == 0) {
+        metrics.operationComplete(future);
+        future.getChannel().close();
+      } else {
+        pipelineFact.getSHUFFLE().sendMap(reduceContext);
+      }
+    }
+  }
+
+  /**
+   * Maintain parameters per messageReceived() Netty context.
+   * Allows sendMapOutput calls from operationComplete()
+   */
+  private static class ReduceContext {
+
+    private List<String> mapIds;
+    private AtomicInteger mapsToWait;
+    private AtomicInteger mapsToSend;
+    private int reduceId;
+    private ChannelHandlerContext ctx;
+    private String user;
+    private Map<String, Shuffle.MapOutputInfo> infoMap;
+    private String outputBasePathStr;
+
+    public ReduceContext(List<String> mapIds, int rId,
+                         ChannelHandlerContext context, String usr,
+                         Map<String, Shuffle.MapOutputInfo> mapOutputInfoMap,
+                         String outputBasePath) {
+
+      this.mapIds = mapIds;
+      this.reduceId = rId;
+      /**
+      * Atomic count for tracking the no. of map outputs that are yet to
+      * complete. Multiple futureListeners' operationComplete() can decrement
+      * this value asynchronously. It is used to decide when the channel should
+      * be closed.
+      */
+      this.mapsToWait = new AtomicInteger(mapIds.size());
+      /**
+      * Atomic count for tracking the no. of map outputs that have been sent.
+      * Multiple sendMap() calls can increment this value
+      * asynchronously. Used to decide which mapId should be sent next.
+      */
+      this.mapsToSend = new AtomicInteger(0);
+      this.ctx = context;
+      this.user = usr;
+      this.infoMap = mapOutputInfoMap;
+      this.outputBasePathStr = outputBasePath;
+    }
+
+    public int getReduceId() {
+      return reduceId;
+    }
+
+    public ChannelHandlerContext getCtx() {
+      return ctx;
+    }
+
+    public String getUser() {
+      return user;
+    }
+
+    public Map<String, Shuffle.MapOutputInfo> getInfoMap() {
+      return infoMap;
+    }
+
+    public String getOutputBasePathStr() {
+      return outputBasePathStr;
+    }
+
+    public List<String> getMapIds() {
+      return mapIds;
+    }
+
+    public AtomicInteger getMapsToSend() {
+      return mapsToSend;
+    }
+
+    public AtomicInteger getMapsToWait() {
+      return mapsToWait;
+    }
+  }
+
   ShuffleHandler(MetricsSystem ms) {
     super("httpshuffle");
     metrics = ms.register(new ShuffleMetrics());
@@ -357,6 +464,9 @@ public class ShuffleHandler extends AuxiliaryService {
          (Shell.WINDOWS)?WINDOWS_DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED:
                          DEFAULT_SHUFFLE_TRANSFERTO_ALLOWED);
 
+    maxSessionOpenFiles = conf.getInt(SHUFFLE_MAX_SESSION_OPEN_FILES,
+        DEFAULT_SHUFFLE_MAX_SESSION_OPEN_FILES);
+
     ThreadFactory bossFactory = new ThreadFactoryBuilder()
       .setNameFormat("ShuffleHandler Netty Boss #%d")
       .build();
@@ -638,6 +748,10 @@ public class ShuffleHandler extends AuxiliaryService {
       }
     }
 
+    public Shuffle getSHUFFLE() {
+      return SHUFFLE;
+    }
+
     public void destroy() {
       if (sslFactory != null) {
         sslFactory.destroy();
@@ -809,31 +923,62 @@ public class ShuffleHandler extends AuxiliaryService {
         return;
       }
       ch.write(response);
-      // TODO refactor the following into the pipeline
-      ChannelFuture lastMap = null;
-      for (String mapId : mapIds) {
+      //Initialize one ReduceContext object per messageReceived call
+      ReduceContext reduceContext = new ReduceContext(mapIds, reduceId, ctx,
+          user, mapOutputInfoMap, outputBasePathStr);
+      for (int i = 0; i < Math.min(maxSessionOpenFiles, mapIds.size()); i++) {
+        ChannelFuture nextMap = sendMap(reduceContext);
+        if(nextMap == null) {
+          return;
+        }
+      }
+    }
+
+    /**
+     * Calls sendMapOutput for the mapId pointed by ReduceContext.mapsToSend
+     * and increments it. This method is first called by messageReceived()
+     * maxSessionOpenFiles times and then on the completion of every
+     * sendMapOutput operation. This limits the number of open files on a node,
+     * which can get really large(exhausting file descriptors on the NM) if all
+     * sendMapOutputs are called in one go, as was done previous to this change.
+     * @param reduceContext used to call sendMapOutput with correct params.
+     * @return the ChannelFuture of the sendMapOutput, can be null.
+     */
+    public ChannelFuture sendMap(ReduceContext reduceContext)
+        throws Exception {
+
+      ChannelFuture nextMap = null;
+      if (reduceContext.getMapsToSend().get() <
+          reduceContext.getMapIds().size()) {
+        int nextIndex = reduceContext.getMapsToSend().getAndIncrement();
+        String mapId = reduceContext.getMapIds().get(nextIndex);
+
         try {
-          MapOutputInfo info = mapOutputInfoMap.get(mapId);
+          MapOutputInfo info = reduceContext.getInfoMap().get(mapId);
           if (info == null) {
-            info = getMapOutputInfo(outputBasePathStr + mapId,
-                mapId, reduceId, user);
+            info = getMapOutputInfo(reduceContext.getOutputBasePathStr() +
+                       mapId, mapId, reduceContext.getReduceId(),
+                       reduceContext.getUser());
           }
-          lastMap =
-              sendMapOutput(ctx, ch, user, mapId,
-                reduceId, info);
-          if (null == lastMap) {
-            sendError(ctx, NOT_FOUND);
-            return;
+          nextMap = sendMapOutput(
+              reduceContext.getCtx(),
+              reduceContext.getCtx().getChannel(),
+              reduceContext.getUser(), mapId,
+              reduceContext.getReduceId(), info);
+          if (null == nextMap) {
+            sendError(reduceContext.getCtx(), NOT_FOUND);
+            return null;
           }
+          nextMap.addListener(new ReduceMapFileCount(reduceContext));
         } catch (IOException e) {
           LOG.error("Shuffle error :", e);
           String errorMessage = getErrorMessage(e);
-          sendError(ctx,errorMessage , INTERNAL_SERVER_ERROR);
-          return;
+          sendError(reduceContext.getCtx(), errorMessage,
+              INTERNAL_SERVER_ERROR);
+          return null;
         }
       }
-      lastMap.addListener(metrics);
-      lastMap.addListener(ChannelFutureListener.CLOSE);
+      return nextMap;
     }
 
     private String getErrorMessage(Throwable t) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e615588/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
index bad9b2d..f16fe5a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
@@ -22,6 +22,7 @@ import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.apache.hadoop.test.MockitoMaker.make;
 import static org.apache.hadoop.test.MockitoMaker.stub;
+import static org.junit.Assert.assertTrue;
 import static org.jboss.netty.buffer.ChannelBuffers.wrappedBuffer;
 import static org.jboss.netty.handler.codec.http.HttpResponseStatus.OK;
 import static org.jboss.netty.handler.codec.http.HttpVersion.HTTP_1_1;
@@ -79,18 +80,66 @@ import org.apache.hadoop.yarn.server.records.Version;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelFuture;
 import org.jboss.netty.channel.ChannelHandlerContext;
+import org.jboss.netty.channel.MessageEvent;
+import org.jboss.netty.channel.AbstractChannel;
 import org.jboss.netty.handler.codec.http.DefaultHttpResponse;
 import org.jboss.netty.handler.codec.http.HttpRequest;
 import org.jboss.netty.handler.codec.http.HttpResponse;
 import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+import org.jboss.netty.handler.codec.http.HttpMethod;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.mockito.Mockito;
 import org.mortbay.jetty.HttpHeaders;
 
 public class TestShuffleHandler {
   static final long MiB = 1024 * 1024; 
   private static final Log LOG = LogFactory.getLog(TestShuffleHandler.class);
 
+  class MockShuffleHandler extends org.apache.hadoop.mapred.ShuffleHandler {
+    @Override
+    protected Shuffle getShuffle(final Configuration conf) {
+      return new Shuffle(conf) {
+        @Override
+        protected void verifyRequest(String appid, ChannelHandlerContext ctx,
+            HttpRequest request, HttpResponse response, URL requestUri)
+            throws IOException {
+        }
+        @Override
+        protected MapOutputInfo getMapOutputInfo(String base, String mapId,
+            int reduce, String user) throws IOException {
+          // Do nothing.
+          return null;
+        }
+        @Override
+        protected void populateHeaders(List<String> mapIds, String jobId,
+            String user, int reduce, HttpRequest request,
+            HttpResponse response, boolean keepAliveParam,
+            Map<String, MapOutputInfo> infoMap) throws IOException {
+          // Do nothing.
+        }
+        @Override
+        protected ChannelFuture sendMapOutput(ChannelHandlerContext ctx,
+            Channel ch, String user, String mapId, int reduce,
+            MapOutputInfo info) throws IOException {
+
+          ShuffleHeader header =
+              new ShuffleHeader("attempt_12345_1_m_1_0", 5678, 5678, 1);
+          DataOutputBuffer dob = new DataOutputBuffer();
+          header.write(dob);
+          ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
+          dob = new DataOutputBuffer();
+          for (int i = 0; i < 100; ++i) {
+            header.write(dob);
+          }
+          return ch.write(wrappedBuffer(dob.getData(), 0, dob.getLength()));
+        }
+      };
+    }
+  }
+
   /**
    * Test the validation of ShuffleHandler's meta-data's serialization and
    * de-serialization.
@@ -934,4 +983,84 @@ public class TestShuffleHandler {
       FileUtil.fullyDelete(absLogDir);
     }
   }
+
+  @Test(timeout = 4000)
+  public void testSendMapCount() throws Exception {
+    final List<ShuffleHandler.ReduceMapFileCount> listenerList =
+        new ArrayList<ShuffleHandler.ReduceMapFileCount>();
+
+    final ChannelHandlerContext mockCtx =
+        Mockito.mock(ChannelHandlerContext.class);
+    final MessageEvent mockEvt = Mockito.mock(MessageEvent.class);
+    final Channel mockCh = Mockito.mock(AbstractChannel.class);
+
+    // Mock HttpRequest and ChannelFuture
+    final HttpRequest mockHttpRequest = createMockHttpRequest();
+    final ChannelFuture mockFuture = createMockChannelFuture(mockCh,
+        listenerList);
+
+    // Mock Netty Channel Context and Channel behavior
+    Mockito.doReturn(mockCh).when(mockCtx).getChannel();
+    Mockito.when(mockCtx.getChannel()).thenReturn(mockCh);
+    Mockito.doReturn(mockFuture).when(mockCh).write(Mockito.any(Object.class));
+    Mockito.when(mockCh.write(Object.class)).thenReturn(mockFuture);
+
+    //Mock MessageEvent behavior
+    Mockito.doReturn(mockCh).when(mockEvt).getChannel();
+    Mockito.when(mockEvt.getChannel()).thenReturn(mockCh);
+    Mockito.doReturn(mockHttpRequest).when(mockEvt).getMessage();
+
+    final ShuffleHandler sh = new MockShuffleHandler();
+    Configuration conf = new Configuration();
+    sh.init(conf);
+    sh.start();
+    int maxOpenFiles =conf.getInt(ShuffleHandler.SHUFFLE_MAX_SESSION_OPEN_FILES,
+        ShuffleHandler.DEFAULT_SHUFFLE_MAX_SESSION_OPEN_FILES);
+    sh.getShuffle(conf).messageReceived(mockCtx, mockEvt);
+    assertTrue("Number of Open files should not exceed the configured " +
+            "value!-Not Expected",
+        listenerList.size() <= maxOpenFiles);
+    while(!listenerList.isEmpty()) {
+      listenerList.remove(0).operationComplete(mockFuture);
+      assertTrue("Number of Open files should not exceed the configured " +
+              "value!-Not Expected",
+          listenerList.size() <= maxOpenFiles);
+    }
+    sh.close();
+  }
+
+  public ChannelFuture createMockChannelFuture(Channel mockCh,
+      final List<ShuffleHandler.ReduceMapFileCount> listenerList) {
+    final ChannelFuture mockFuture = Mockito.mock(ChannelFuture.class);
+    Mockito.when(mockFuture.getChannel()).thenReturn(mockCh);
+    Mockito.doReturn(true).when(mockFuture).isSuccess();
+    Mockito.doAnswer(new Answer() {
+      @Override
+      public Object answer(InvocationOnMock invocation) throws Throwable {
+        //Add ReduceMapFileCount listener to a list
+        if (invocation.getArguments()[0].getClass() ==
+            ShuffleHandler.ReduceMapFileCount.class)
+          listenerList.add((ShuffleHandler.ReduceMapFileCount)
+              invocation.getArguments()[0]);
+        return null;
+      }
+    }).when(mockFuture).addListener(Mockito.any(
+        ShuffleHandler.ReduceMapFileCount.class));
+    return mockFuture;
+  }
+
+  public HttpRequest createMockHttpRequest() {
+    HttpRequest mockHttpRequest = Mockito.mock(HttpRequest.class);
+    Mockito.doReturn(HttpMethod.GET).when(mockHttpRequest).getMethod();
+    Mockito.doAnswer(new Answer() {
+      @Override
+      public Object answer(InvocationOnMock invocation) throws Throwable {
+        String uri = "/mapOutput?job=job_12345_1&reduce=1";
+        for (int i = 0; i < 100; i++)
+          uri = uri.concat("&map=attempt_12345_1_m_" + i + "_0");
+        return uri;
+      }
+    }).when(mockHttpRequest).getUri();
+    return mockHttpRequest;
+  }
 }


[27/50] [abbrv] hadoop git commit: HDFS-9042. Update document for the Storage policy name (Contributed by J.Andreina)

Posted by ec...@apache.org.
HDFS-9042. Update document for the Storage policy name (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8455479
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8455479
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8455479

Branch: refs/heads/HADOOP-11890
Commit: d8455479b83e369fe9b5fe305c7beece93722fed
Parents: 9538af0
Author: Vinayakumar B <vi...@apache.org>
Authored: Sat Sep 12 12:36:12 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Sat Sep 12 12:36:12 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 3 +++
 .../hadoop-hdfs/src/site/markdown/ArchivalStorage.md             | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8455479/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6051807..5a42499 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1356,6 +1356,9 @@ Release 2.7.2 - UNRELEASED
     HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%.
     (Brahma Reddy Battula via aajisaka)
 
+    HDFS-9042. Update document for the Storage policy name
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8455479/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index bde4e7b..cbfbaa1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -73,14 +73,14 @@ The following is a typical storage policy table.
 
 | **Policy** **ID** | **Policy** **Name** | **Block Placement** **(n  replicas)** | **Fallback storages** **for creation** | **Fallback storages** **for replication** |
 |:---- |:---- |:---- |:---- |:---- |
-| 15 | Lasy\_Persist | RAM\_DISK: 1, DISK: *n*-1 | DISK | DISK |
+| 15 | Lazy\_Persist | RAM\_DISK: 1, DISK: *n*-1 | DISK | DISK |
 | 12 | All\_SSD | SSD: *n* | DISK | DISK |
 | 10 | One\_SSD | SSD: 1, DISK: *n*-1 | SSD, DISK | SSD, DISK |
 | 7 | Hot (default) | DISK: *n* | \<none\> | ARCHIVE |
 | 5 | Warm | DISK: 1, ARCHIVE: *n*-1 | ARCHIVE, DISK | ARCHIVE, DISK |
 | 2 | Cold | ARCHIVE: *n* | \<none\> | \<none\> |
 
-Note that the Lasy\_Persist policy is useful only for single replica blocks. For blocks with more than one replicas, all the replicas will be written to DISK since writing only one of the replicas to RAM\_DISK does not improve the overall performance.
+Note that the Lazy\_Persist policy is useful only for single replica blocks. For blocks with more than one replicas, all the replicas will be written to DISK since writing only one of the replicas to RAM\_DISK does not improve the overall performance.
 
 ### Storage Policy Resolution
 


[17/50] [abbrv] hadoop git commit: YARN-4106. NodeLabels for NM in distributed mode is not updated even after clusterNodelabel addition in RM. (Bibin A Chundatt via wangda)

Posted by ec...@apache.org.
YARN-4106. NodeLabels for NM in distributed mode is not updated even after clusterNodelabel addition in RM. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77666105
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77666105
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77666105

Branch: refs/heads/HADOOP-11890
Commit: 77666105b4557d5706e5844a4ca286917d966c5f
Parents: 8e61558
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Sep 10 09:30:09 2015 -0700
Committer: Wangda Tan <wa...@apache.org>
Committed: Thu Sep 10 09:30:09 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../nodemanager/NodeStatusUpdaterImpl.java      | 25 ++++++-
 .../nodelabels/AbstractNodeLabelsProvider.java  |  7 +-
 .../ConfigurationNodeLabelsProvider.java        | 11 ----
 .../TestConfigurationNodeLabelsProvider.java    | 69 +++++++++++++-------
 5 files changed, 74 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77666105/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aef0d31..4815be3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -824,6 +824,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3591. Resource localization on a bad disk causes subsequent containers failure.
     (Lavkesh Lahngir via vvasudev)
 
+    YARN-4106. NodeLabels for NM in distributed mode is not updated even after
+    clusterNodelabel addition in RM. (Bibin A Chundatt via wangda)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77666105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
index 05efc69..aa51e5c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java
@@ -897,6 +897,9 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
     private final NodeLabelsProvider nodeLabelsProvider;
     private Set<NodeLabel> previousNodeLabels;
     private boolean updatedLabelsSentToRM;
+    private long lastNodeLabelSendFailMills = 0L;
+    // TODO : Need to check which conf to use.Currently setting as 1 min
+    private static final long FAILEDLABELRESENDINTERVAL = 60000;
 
     @Override
     public Set<NodeLabel> getNodeLabelsForRegistration() {
@@ -938,12 +941,15 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       // take some action only on modification of labels
       boolean areNodeLabelsUpdated =
           nodeLabelsForHeartbeat.size() != previousNodeLabels.size()
-              || !previousNodeLabels.containsAll(nodeLabelsForHeartbeat);
+              || !previousNodeLabels.containsAll(nodeLabelsForHeartbeat)
+              || checkResendLabelOnFailure();
 
       updatedLabelsSentToRM = false;
       if (areNodeLabelsUpdated) {
         previousNodeLabels = nodeLabelsForHeartbeat;
         try {
+          LOG.info("Modified labels from provider: "
+              + StringUtils.join(",", previousNodeLabels));
           validateNodeLabels(nodeLabelsForHeartbeat);
           updatedLabelsSentToRM = true;
         } catch (IOException e) {
@@ -980,16 +986,33 @@ public class NodeStatusUpdaterImpl extends AbstractService implements
       }
     }
 
+    /*
+     * In case of failure when RM doesnt accept labels need to resend Labels to
+     * RM. This method checks whether we need to resend
+     */
+    public boolean checkResendLabelOnFailure() {
+      if (lastNodeLabelSendFailMills > 0L) {
+        long lastFailTimePassed =
+            System.currentTimeMillis() - lastNodeLabelSendFailMills;
+        if (lastFailTimePassed > FAILEDLABELRESENDINTERVAL) {
+          return true;
+        }
+      }
+      return false;
+    }
+
     @Override
     public void verifyRMHeartbeatResponseForNodeLabels(
         NodeHeartbeatResponse response) {
       if (updatedLabelsSentToRM) {
         if (response.getAreNodeLabelsAcceptedByRM()) {
+          lastNodeLabelSendFailMills = 0L;
           LOG.info("Node Labels {" + StringUtils.join(",", previousNodeLabels)
               + "} were Accepted by RM ");
         } else {
           // case where updated labels from NodeLabelsProvider is sent to RM and
           // RM rejected the labels
+          lastNodeLabelSendFailMills = System.currentTimeMillis();
           LOG.error(
               "NM node labels {" + StringUtils.join(",", previousNodeLabels)
                   + "} were not accepted by RM and message from RM : "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77666105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
index bbc6710..dac0b09 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/AbstractNodeLabelsProvider.java
@@ -30,8 +30,6 @@ import org.apache.hadoop.yarn.api.records.NodeLabel;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager;
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
  * Provides base implementation of NodeLabelsProvider with Timer and expects
  * subclass to provide TimerTask which can fetch NodeLabels
@@ -55,8 +53,6 @@ public abstract class AbstractNodeLabelsProvider extends NodeLabelsProvider {
   protected Set<NodeLabel> nodeLabels =
       CommonNodeLabelsManager.EMPTY_NODELABEL_SET;
 
-  @VisibleForTesting
-  long startTime = 0;
 
   public AbstractNodeLabelsProvider(String name) {
     super(name);
@@ -77,12 +73,13 @@ public abstract class AbstractNodeLabelsProvider extends NodeLabelsProvider {
   @Override
   protected void serviceStart() throws Exception {
     timerTask = createTimerTask();
+    timerTask.run();
     if (intervalTime != DISABLE_NODE_LABELS_PROVIDER_FETCH_TIMER) {
       nodeLabelsScheduler =
           new Timer("DistributedNodeLabelsRunner-Timer", true);
       // Start the timer task and then periodically at the configured interval
       // time. Illegal values for intervalTime is handled by timer api
-      nodeLabelsScheduler.scheduleAtFixedRate(timerTask, startTime,
+      nodeLabelsScheduler.scheduleAtFixedRate(timerTask, intervalTime,
           intervalTime);
     }
     super.serviceStart();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77666105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
index f549d1a..fc78de3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/ConfigurationNodeLabelsProvider.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.yarn.server.nodemanager.nodelabels;
 
 import java.io.IOException;
 import java.util.Arrays;
-import java.util.Date;
 import java.util.HashSet;
 import java.util.TimerTask;
 
@@ -41,16 +40,6 @@ public class ConfigurationNodeLabelsProvider extends AbstractNodeLabelsProvider
   public ConfigurationNodeLabelsProvider() {
     super("Configuration Based NodeLabels Provider");
   }
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    super.serviceInit(conf);
-    // In case timer is not configured avoid calling timertask.run thus avoiding
-    // unnecessary creation of YarnConfiguration Object
-    updateNodeLabelsFromConfig(conf);
-    if (intervalTime != DISABLE_NODE_LABELS_PROVIDER_FETCH_TIMER) {
-      startTime = new Date().getTime() + intervalTime;
-    }
-  }
 
   private void updateNodeLabelsFromConfig(Configuration conf)
       throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77666105/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/TestConfigurationNodeLabelsProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/TestConfigurationNodeLabelsProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/TestConfigurationNodeLabelsProvider.java
index 27fd4cb..18f6a7f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/TestConfigurationNodeLabelsProvider.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/nodelabels/TestConfigurationNodeLabelsProvider.java
@@ -25,14 +25,17 @@ import java.net.MalformedURLException;
 import java.net.URL;
 import java.util.TimerTask;
 
+import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.nodelabels.NodeLabelTestBase;
 import org.junit.After;
+import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 public class TestConfigurationNodeLabelsProvider extends NodeLabelTestBase {
@@ -48,13 +51,17 @@ public class TestConfigurationNodeLabelsProvider extends NodeLabelTestBase {
 
   private ConfigurationNodeLabelsProvider nodeLabelsProvider;
 
-  @Before
-  public void setup() {
+  @BeforeClass
+  public static void create() {
     loader =
         new XMLPathClassLoader(
             TestConfigurationNodeLabelsProvider.class.getClassLoader());
     testRootDir.mkdirs();
+    Thread.currentThread().setContextClassLoader(loader);
+  }
 
+  @Before
+  public void setup() {
     nodeLabelsProvider = new ConfigurationNodeLabelsProvider();
   }
 
@@ -62,44 +69,43 @@ public class TestConfigurationNodeLabelsProvider extends NodeLabelTestBase {
   public void tearDown() throws Exception {
     if (nodeLabelsProvider != null) {
       nodeLabelsProvider.close();
+      nodeLabelsProvider.stop();
     }
+  }
+
+  @AfterClass
+  public static void remove() throws Exception {
     if (testRootDir.exists()) {
       FileContext.getLocalFSFileContext().delete(
           new Path(testRootDir.getAbsolutePath()), true);
     }
   }
 
-  private Configuration getConfForNodeLabels() {
-    Configuration conf = new Configuration();
-    conf.set(YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_LABELS, "A,B,CX");
-    return conf;
-  }
-
   @Test
   public void testNodeLabelsFromConfig() throws IOException,
       InterruptedException {
-    Configuration conf = getConfForNodeLabels();
+    Configuration conf = new Configuration();
+    modifyConf("A,B,CX");
     nodeLabelsProvider.init(conf);
     // test for ensuring labels are set during initialization of the class
     nodeLabelsProvider.start();
-    Thread.sleep(1000l); // sleep so that timer has run once during
-                         // initialization
     assertNLCollectionEquals(toNodeLabelSet("A", "B", "CX"),
         nodeLabelsProvider.getNodeLabels());
 
     // test for valid Modification
     TimerTask timerTask = nodeLabelsProvider.getTimerTask();
-    modifyConfAndCallTimer(timerTask, "X,y,Z");
+    modifyConf("X,y,Z");
+    timerTask.run();
     assertNLCollectionEquals(toNodeLabelSet("X", "y", "Z"),
         nodeLabelsProvider.getNodeLabels());
   }
 
   @Test
   public void testConfigForNoTimer() throws Exception {
-    Configuration conf = getConfForNodeLabels();
+    Configuration conf = new Configuration();
+    modifyConf("A,B,CX");
     conf.setLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS,
         AbstractNodeLabelsProvider.DISABLE_NODE_LABELS_PROVIDER_FETCH_TIMER);
-
     nodeLabelsProvider.init(conf);
     nodeLabelsProvider.start();
     Assert
@@ -112,18 +118,33 @@ public class TestConfigurationNodeLabelsProvider extends NodeLabelTestBase {
         nodeLabelsProvider.getNodeLabels());
   }
 
-  private static void modifyConfAndCallTimer(TimerTask timerTask,
-      String nodeLabels) throws FileNotFoundException, IOException {
+  @Test
+  public void testConfigTimer() throws Exception {
+    Configuration conf = new Configuration();
+    modifyConf("A,B,CX");
+    conf.setLong(YarnConfiguration.NM_NODE_LABELS_PROVIDER_FETCH_INTERVAL_MS,
+        1000);
+    nodeLabelsProvider.init(conf);
+    nodeLabelsProvider.start();
+    // Ensure that even though timer is not run, node labels are fetched at
+    // least once so
+    // that NM registers/updates Labels with RM
+    assertNLCollectionEquals(toNodeLabelSet("A", "B", "CX"),
+        nodeLabelsProvider.getNodeLabels());
+    modifyConf("X,y,Z");
+    Thread.sleep(1500);
+    assertNLCollectionEquals(toNodeLabelSet("X", "y", "Z"),
+        nodeLabelsProvider.getNodeLabels());
+
+  }
+
+  private static void modifyConf(String nodeLabels)
+      throws FileNotFoundException, IOException {
     Configuration conf = new Configuration();
     conf.set(YarnConfiguration.NM_PROVIDER_CONFIGURED_NODE_LABELS, nodeLabels);
-    conf.writeXml(new FileOutputStream(nodeLabelsConfigFile));
-    ClassLoader actualLoader = Thread.currentThread().getContextClassLoader();
-    try {
-      Thread.currentThread().setContextClassLoader(loader);
-      timerTask.run();
-    } finally {
-      Thread.currentThread().setContextClassLoader(actualLoader);
-    }
+    FileOutputStream confStream = new FileOutputStream(nodeLabelsConfigFile);
+    conf.writeXml(confStream);
+    IOUtils.closeQuietly(confStream);
   }
 
   private static class XMLPathClassLoader extends ClassLoader {


[18/50] [abbrv] hadoop git commit: Updating all CHANGES.txt files to move entires from future releases into 2.6.1 section given the large number of backports to 2.6.1.

Posted by ec...@apache.org.
Updating all CHANGES.txt files to move entires from future releases into 2.6.1
section given the large number of backports to 2.6.1.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbbb7ff1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbbb7ff1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbbb7ff1

Branch: refs/heads/HADOOP-11890
Commit: fbbb7ff1ed111884d0375c177239073153139848
Parents: 7766610
Author: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Authored: Thu Sep 10 13:45:57 2015 -0700
Committer: Vinod Kumar Vavilapalli (I am also known as @tshooter.) <vi...@apache.org>
Committed: Thu Sep 10 13:45:57 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 115 +++---
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 260 ++++++-------
 hadoop-mapreduce-project/CHANGES.txt            |  56 +--
 hadoop-yarn-project/CHANGES.txt                 | 389 +++++++++----------
 4 files changed, 408 insertions(+), 412 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbb7ff1/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 95e58af..c04bfd0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -342,9 +342,6 @@ Trunk (Unreleased)
     HADOOP-7256. Resource leak during failure scenario of closing
     of resources. (Ramkrishna S. Vasudevan via harsh)
 
-    HADOOP-8151. Error handling in snappy decompressor throws invalid
-    exceptions. (Matt Foley via harsh)
-
     HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
     to RPC Server and Client classes. (Brandon Li via suresh)
 
@@ -742,9 +739,6 @@ Release 2.8.0 - UNRELEASED
     command-line arguments passed by the user (Masatake Iwasaki via Colin P.
     McCabe)
 
-    HADOOP-12280. Skip unit tests based on maven profile rather than
-    NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
-
     HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
     Yoder via atm)
 
@@ -776,9 +770,6 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11785. Reduce the number of listStatus operation in distcp
     buildListing (Zoran Dimitrijevic via Colin P. McCabe)
 
-    HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
-    split calculation (gera)
-
     HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7
     ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
 
@@ -814,9 +805,6 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
     before deleting (Casey Brotherton via harsh)
 
-    HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
-    is an I/O error during requestShortCircuitShm (cmccabe)
-
     HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
     jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
 
@@ -865,9 +853,6 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11866. increase readability and reliability of checkstyle,
     shellcheck, and whitespace reports (aw)
 
-    HADOOP-11491. HarFs incorrectly declared as requiring an authority.
-    (Brahma Reddy Battula via gera)
-
     HADOOP-11889. Make checkstyle runnable from root project
     (Gera Shegalov via jeagles)
 
@@ -1129,9 +1114,6 @@ Release 2.7.2 - UNRELEASED
     HADOOP-12304. Applications using FileContext fail with the default file
     system configured to be wasb/s3/etc. (cnauroth)
 
-    HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
-    (Brahma Reddy Battula via jianhe)
-
     HADOOP-12061. Incorrect command in single cluster setup document.
     (Kengo Seki via aajisaka)
 
@@ -1162,12 +1144,6 @@ Release 2.7.1 - 2015-07-06
     HADOOP-11868. Invalid user logins trigger large backtraces in server log
     (Chang Li via jlowe)
 
-    HADOOP-11730. Regression: s3n read failure recovery broken.
-    (Takenori Sato via stevel)
-
-    HADOOP-11802. DomainSocketWatcher thread terminates sometimes after there
-    is an I/O error during requestShortCircuitShm (cmccabe)
-
     HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
     HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
@@ -1182,9 +1158,6 @@ Release 2.7.1 - 2015-07-06
     HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
     created with ACLs. (Gregory Chanan via asuresh)
 
-    HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
-    infinite loop. (Larry McCay via cnauroth)
-
     HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
     (Kazuho Fujii via aajisaka)
 
@@ -1448,18 +1421,12 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.
     (Wilfred Spiegelenburg via wang)
 
-    HADOOP-11238. Update the NameNode's Group Cache in the background when
-    possible (Chris Li via Colin P. McCabe)
-
     HADOOP-10809. hadoop-azure: page blob support. (Dexter Bradshaw,
     Mostafa Elhemali, Eric Hanson, and Mike Liddell via cnauroth)
 
     HADOOP-11188. hadoop-azure: automatically expand page blobs when they become
     full. (Eric Hanson via cnauroth)
 
-    HADOOP-11506. Configuration variable expansion regex expensive for long
-    values. (Gera Shegalov via gera)
-
     HADOOP-11620. Add support for load balancing across a group of KMS for HA.
     (Arun Suresh via wang)
 
@@ -1566,9 +1533,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11355. When accessing data in HDFS and the key has been deleted,
     a Null Pointer Exception is shown. (Arun Suresh via wang)
 
-    HADOOP-11343. Overflow is not properly handled in caclulating final iv for
-    AES CTR. (Jerry Chen via wang)
-
     HADOOP-11354. ThrottledInputStream doesn't perform effective throttling.
     (Ted Yu via jing9)
 
@@ -1584,9 +1548,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
     non-core directories. (Li Lu via wheat9)
 
-    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
-    KMSClientProvider. (Arun Suresh via wang)
-
     HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
     (Li Lu via wheat9)
 
@@ -1682,9 +1643,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11462. TestSocketIOWithTimeout needs change for PowerPC platform.
     (Ayappan via cnauroth)
 
-    HADOOP-11350. The size of header buffer of HttpServer is too small when
-    HTTPS is enabled. (Benoy Antony via wheat9)
-
     HADOOP-10542 Potential null pointer dereference in Jets3tFileSystemStore
     retrieveBlock(). (Ted Yu via stevel)	
 
@@ -1709,9 +1667,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11507 Hadoop RPC Authentication problem with different user locale.
     (Talat UYARER via stevel)
 
-    HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
-    user. Contributed by Arun Suresh.
-
     HADOOP-11499. Check of executorThreadsStarted in
     ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
@@ -1780,9 +1735,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
     the object from S3. (Dan Hecht via stevel).
 
-    HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
-    full. (Ming Ma via kihwal)
-
     HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
     IPC_CLIENT_PING_KEY is not configured. (zhihai xu via ozawa)
 
@@ -1795,9 +1747,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-9087. Queue size metric for metric sinks isn't actually maintained
     (Akira AJISAKA via jlowe)
 
-    HADOOP-11604. Prevent ConcurrentModificationException while closing domain
-    sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
-
     HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+.
     (rkanter)
 
@@ -1831,9 +1780,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
     to wrapped FS. (gera)
 
-    HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
-    should be non static. (Sean Busbey via yliu)
-
     HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
     HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
@@ -1847,9 +1793,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
     aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
 
-    HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
-    synchronization. (Sean Busbey via yliu)
-
     HADOOP-11558. Fix dead links to doc of hadoop-tools. (Jean-Pierre 
     Matsumoto via ozawa)
 
@@ -1925,7 +1868,7 @@ Release 2.6.2 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
   INCOMPATIBLE CHANGES
 
@@ -1936,11 +1879,20 @@ Release 2.6.1 - UNRELEASED
     HADOOP-7139. Allow appending to existing SequenceFiles
     (kanaka kumar avvaru via vinayakumarb)
 
+    HADOOP-12280. Skip unit tests based on maven profile rather than
+    NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
+
   OPTIMIZATIONS
 
+    HADOOP-11238. Update the NameNode's Group Cache in the background when
+    possible (Chris Li via Colin P. McCabe)
+
+    HADOOP-11506. Configuration variable expansion regex expensive for long
+    values. (Gera Shegalov via gera)
+
   BUG FIXES
 
-    HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+    HADOOP-11466: FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
     architecture because it is slower there (Suman Somasundar via Colin P.
     McCabe)
 
@@ -1949,6 +1901,51 @@ Release 2.6.1 - UNRELEASED
     HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
     pipe is full (zhaoyunjiong via cmccabe)
 
+    HADOOP-11343. Overflow is not properly handled in caclulating final iv for
+    AES CTR. (Jerry Chen via wang)
+
+    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
+    KMSClientProvider. (Arun Suresh via wang)
+
+    HADOOP-11350. The size of header buffer of HttpServer is too small when
+    HTTPS is enabled. (Benoy Antony via wheat9)
+
+    HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
+    user. Contributed by Arun Suresh.
+
+    HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
+    full. (Ming Ma via kihwal)
+
+    HADOOP-11604. Prevent ConcurrentModificationException while closing domain
+    sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
+
+    HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+    should be non static. (Sean Busbey via yliu)
+
+    HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
+    synchronization. (Sean Busbey via yliu)
+
+    HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
+    split calculation (gera)
+
+    HADOOP-11730. Regression: s3n read failure recovery broken.
+    (Takenori Sato via stevel)
+
+    HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
+    is an I/O error during requestShortCircuitShm (cmccabe)
+
+    HADOOP-11491. HarFs incorrectly declared as requiring an authority.
+    (Brahma Reddy Battula via gera)
+
+    HADOOP-8151. Error handling in snappy decompressor throws invalid
+    exceptions. (Matt Foley via harsh)
+
+    HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
+    (Brahma Reddy Battula via jianhe)
+
+    HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
+    infinite loop. (Larry McCay via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbb7ff1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e241460..0c2645d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -441,8 +441,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
     (szetszwo)
 
-    HDFS-8046. Allow better control of getContentSummary (kihwal)
-
     HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
     LocatedBlock when possible. (Zhe Zhang via wang)
 
@@ -843,9 +841,6 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
 
-    HDFS-8846. Add a unit test for INotify functionality across a layout
-    version upgrade (Zhe Zhang via Colin P. McCabe)
-
     HDFS-8951. Move the shortcircuit package to hdfs-client.
     (Mingliang Liu via wheat9)
 
@@ -894,9 +889,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to
     hadoop-hdfs-client module. (Mingliang Liu via wheat9)
 
-    HDFS-8384. Allow NN to startup if there are files having a lease but are not
-    under construction. (jing9)
-
     HDFS-8984. Move replication queues related methods in FSNamesystem to
     BlockManager. (wheat9)
 
@@ -1041,9 +1033,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8290. WebHDFS calls before namesystem initialization can cause
     NullPointerException. (cnauroth)
 
-    HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart.
-    (surendra singh lilhore via Xiaoyu Yao)
-
     HDFS-8310. Fix TestCLI.testAll "help: help for find" on Windows.
     (Kiran Kumar M R via Xiaoyu Yao)
 
@@ -1150,18 +1139,12 @@ Release 2.8.0 - UNRELEASED
     HDFS-8268. Port conflict log for data node server is not sufficient
     (Mohammad Shahid Khan via vinayakumarb)
 
-    HDFS-8431. hdfs crypto class not found in Windows.
-    (Anu Engineer via cnauroth)
-
     HDFS-8407. hdfsListDirectory must set errno to 0 on success (Masatake
     Iwasaki via Colin P. McCabe)
 
     HDFS-7401. Add block info to DFSInputStream' WARN message when it adds
     node to deadNodes (Arshad Mohammad via vinayakumarb)
 
-    HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits.
-    (Ming Ma via jing9)
-
     HDFS-8490. Typo in trace enabled log in ExceptionHandler of WebHDFS.
     (Archana T via ozawa)
 
@@ -1365,9 +1348,6 @@ Release 2.7.2 - UNRELEASED
 
     HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
 
-    HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
-    flawed. (Kihwal Lee via yliu)
-
     HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
     HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
@@ -1412,15 +1392,9 @@ Release 2.7.1 - 2015-07-06
     (Surendra Singh Lilhore via szetszwo)
 
   OPTIMIZATIONS
-    HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
-    hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
-    via Colin P. McCabe)
 
   BUG FIXES
 
-    HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
-    finalize upgrade. (jing9)
-
     HDFS-8151. Always use snapshot path as source when invalid snapshot names
     are used for diff based distcp. (jing9)
 
@@ -1442,9 +1416,6 @@ Release 2.7.1 - 2015-07-06
     HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
     (surendra singh lilhore via szetszwo)
 
-    HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
-    post-HDFS-7915 DataNode (cmccabe)
-
     HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
     lock. (wheat9)
 
@@ -1457,17 +1428,8 @@ Release 2.7.1 - 2015-07-06
     HDFS-8305: HDFS INotify: the destination field of RenameOp should always
     end with the file name (cmccabe)
 
-    HDFS-7980. Incremental BlockReport will dramatically slow down namenode
-    startup.  (Walter Su via szetszwo)
-
     HDFS-8226. Non-HA rollback compatibility broken (J.Andreina via vinayakumarb)
 
-    HDFS-7894. Rolling upgrade readiness is not updated in jmx until query
-    command is issued. (Brahma Reddy Battula  via kihwal)
-
-    HDFS-8254. Standby namenode doesn't process DELETED_BLOCK if the add block
-    request is in edit log. (Rushabh S Shah via kihwal)
-
     HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
     goes for infinite loop (Rushabh S Shah  via kihwal)
 
@@ -1476,9 +1438,6 @@ Release 2.7.1 - 2015-07-06
 
     HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
-    HDFS-8404. Pending block replication can get stuck using older genstamp
-    (Nathan Roberts via kihwal)
-
     HDFS-8451. DFSClient probe for encryption testing interprets empty URI
     property for "enabled". (Steve Loughran via xyao)
 
@@ -1527,9 +1486,6 @@ Release 2.7.0 - 2015-04-20
 
   NEW FEATURES
     
-    HDFS-7278. Add a command that allows sysadmins to manually trigger full
-    block reports from a DN (cmccabe)
-
     HDFS-6663. Admin command to track file and locations from block id.
     (Chen He via kihwal)
 
@@ -1623,9 +1579,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could
     have a better error message. (Yongjun Zhang via wang)
 
-    HDFS-7035. Make adding a new data directory to the DataNode an atomic
-    operation and improve error handling (Lei Xu via Colin P. McCabe)
-    
     HDFS-6917. Add an hdfs debug command to validate blocks, call recoverlease,
     etc. (cmccabe)
 
@@ -1711,9 +1664,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7462. Consolidate implementation of mkdirs() into a single class.
     (wheat9)
 
-    HDFS-7446. HDFS inotify should have the ability to determine what txid it
-    has read up to (cmccabe)
-
     HDFS-6735. A minor optimization to avoid pread() be blocked by read()
     inside the same DFSInputStream (Lars Hofhansl via stack)
     
@@ -1756,9 +1706,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7528. Consolidate symlink-related implementation into a single class.
     (wheat9)
 
-    HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin
-    P. McCabe)
-
     HDFS-7373. Clean up temporary files after fsimage transfer failures.
     (kihwal)
 
@@ -1776,8 +1723,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop
     codebase. (Sangjin Lee via Colin P. McCabe)
 
-    HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9)
-
     HDFS-7323. Move the get/setStoragePolicy commands out from dfsadmin.
     (jing9 via yliu)
 
@@ -1971,9 +1916,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7410. Support CreateFlags with append() to support hsync() for
     appending streams (Vinayakumar B via Colin P. McCabe)
 
-    HDFS-7742. Favoring decommissioning node for replication can cause a block 
-    to stay underreplicated for long periods (Nathan Roberts via kihwal)
-
     HDFS-8008. Support client-side back off when the datanodes are congested.
     (wheat9)
 
@@ -2121,9 +2063,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7431. log message for InvalidMagicNumberException may be incorrect.
     (Yi Liu via cnauroth)
 
-    HDFS-7552. Change FsVolumeList toString() to fix
-    TestDataNodeVolumeFailureToleration (Liang Xie via Colin P. McCabe)
-
     HDFS-7557. Fix spacing for a few keys in DFSConfigKeys.java 
     (Colin P.McCabe)
 
@@ -2150,21 +2089,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-7589. Break the dependency between libnative_mini_dfs and libhdfs.
     (Zhanwei Wang via cnauroth)
 
-    HDFS-7579. Improve log reporting during block report rpc failure.
-    (Charles Lamb via cnauroth)
-
-    HDFS-7596. NameNode should prune dead storages from storageMap.
-    (Arpit Agarwal via cnauroth)
-
-    HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade
-    shutdown command (Eric Payne via kihwal)
-
     HDFS-5445. PacketReceiver populates the packetLen field in PacketHeader
     incorrectly (Jonathan Mace via Colin P. McCabe)
 
-    HDFS-7470. SecondaryNameNode need twice memory when calling
-    reloadFromImageFile. (zhaoyunjiong via cnauroth)
-
     HDFS-7585. Get TestEnhancedByteBufferAccess working on CPU architectures
     with page sizes other than 4096 (Sam Liu via Colin P. McCabe)
 
@@ -2182,15 +2109,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-7496. Fix FsVolume removal race conditions on the DataNode by
     reference-counting the volume instances (lei via cmccabe)
 
-    HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
-    Colin P. McCabe)
-
     HDFS-7548. Corrupt block reporting delayed until datablock scanner thread
     detects it (Rushabh Shah via kihwal)
 
-    HDFS-7575. Upgrade should generate a unique storage ID for each
-    volume. (Arpit Agarwal)
-
     HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
     (Ming Ma via cnauroth)
 
@@ -2235,9 +2156,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-6651. Deletion failure can leak inodes permanently.
     (Jing Zhao via wheat9)
 
-    HDFS-7707. Edit log corruption due to delayed block removal again.
-    (Yongjun Zhang via kihwal)
-
     HDFS-7734. Class cast exception in NameNode#main. (yliu via wang)
 
     HDFS-7719. BlockPoolSliceStorage#removeVolumes fails to remove some
@@ -2264,9 +2182,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
     threads when using FileContext (Arun Suresh via Colin P. McCabe)
 
-    HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
-    DataNode to register successfully with only one NameNode.(vinayakumarb)
-
     HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.
     (szetszwo)
 
@@ -2300,15 +2215,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-6662. WebHDFS cannot open a file if its path contains "%".
     (Gerson Carlos via wheat9)
 
-    HDFS-7788. Post-2.6 namenode may not start up with an image containing
-    inodes created with an old release. (Rushabh Shah via kihwal)
-
     HDFS-7814. Fix usage string of storageType parameter for
     "dfsadmin -setSpaceQuota/clrSpaceQuota". (Xiaoyu Yao via cnauroth)
 
-    HDFS-7009. Active NN and standby NN have different live nodes.
-    (Ming Ma via cnauroth)
-
     HDFS-7807. libhdfs htable.c: fix htable resizing, add unit test (cmccabe)
 
     HDFS-7805. NameNode recovery prompt should be printed on console (Surendra
@@ -2320,9 +2229,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7831. Fix the starting index and end condition of the loop in
     FileDiffList.findEarlierSnapshotBlocks(). (Konstantin Shvachko via jing9)
 
-    HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
-    case. (Liang Xie via wang)
-
     HDFS-7843. A truncated file is corrupted after rollback from a rolling
     upgrade.  (szetszwo)
 
@@ -2335,9 +2241,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7785. Improve diagnostics information for HttpPutFailedException.
     (Chengbing Liu via wheat9)
 
-    HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
-    message. (jing9)
-
     HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
     via Arpit Agarwal)
 
@@ -2355,15 +2258,9 @@ Release 2.7.0 - 2015-04-20
 
     HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
 
-    HDFS-7885. Datanode should not trust the generation stamp provided by
-    client. (Tsz Wo Nicholas Sze via jing9)
-
     HDFS-7818. OffsetParam should return the default value instead of throwing
     NPE when the value is unspecified. (Eric Payne via wheat9)
 
-    HDFS-7830. DataNode does not release the volume lock when adding a volume
-    fails. (Lei Xu via Colin P. Mccabe)
-
     HDFS-6833.  DirectoryScanner should not register a deleting block with
     memory of DataNode.  (Shinichi Yamashita via szetszwo)
 
@@ -2373,9 +2270,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7903. Cannot recover block after truncate and delete snapshot.
     (Plamen Jeliazkov via shv)
 
-    HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
-    fail to tell the DFSClient about it because of a network error (cmccabe)
-
     HDFS-7886. Fix TestFileTruncate falures. (Plamen Jeliazkov and shv)
 
     HDFS-7946. TestDataNodeVolumeFailureReporting NPE on Windows. (Xiaoyu Yao
@@ -2409,18 +2303,10 @@ Release 2.7.0 - 2015-04-20
     HDFS-7943. Append cannot handle the last block with length greater than
     the preferred block size. (jing9)
 
-    HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
-    starts (Zhe Zhang via Colin P. McCabe)
-
-    HDFS-7587. Edit log corruption can happen if append fails with a quota
-    violation. (jing9)
-
     HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
 
     HDFS-7932. Speed up the shutdown of datanode during rolling upgrade.(kihwal)
 
-    HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
-
     HDFS-7957. Truncate should verify quota before making changes. (jing9)
 
     HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now()
@@ -2428,13 +2314,6 @@ Release 2.7.0 - 2015-04-20
 
     HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts (brandonli)
 
-    HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp
-    provided by the client is larger than the one stored in the datanode.
-    (Brahma Reddy Battula via szetszwo)
-
-    HDFS-7960. The full block report should prune zombie storages even if
-    they're not empty. (cmccabe and Eddy Xu via wang)
-
     HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang)
 
     HDFS-7977. NFS couldn't take percentile intervals (brandonli)
@@ -2467,15 +2346,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-8051. FsVolumeList#addVolume should release volume reference if not
     put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
 
-    HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
-    lock for a very long time (sinago via cmccabe)
-
     HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
     platform-specific format. (Xiaoyu Yao via cnauroth)
 
-    HDFS-8072. Reserved RBW space is not released if client terminates while
-    writing block. (Arpit Agarwal)
-
     HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
     via Colin P. McCabe)
 
@@ -2517,16 +2390,42 @@ Release 2.6.2 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
+    HDFS-7278. Add a command that allows sysadmins to manually trigger full
+    block reports from a DN (cmccabe)
+
   IMPROVEMENTS
 
+    HDFS-7035. Make adding a new data directory to the DataNode an atomic
+    operation and improve error handling (Lei Xu via Colin P. McCabe)
+
+    HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin
+    P. McCabe)
+
+    HDFS-7579. Improve log reporting during block report rpc failure.
+    (Charles Lamb via cnauroth)
+
+    HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9)
+
+    HDFS-7596. NameNode should prune dead storages from storageMap.
+    (Arpit Agarwal via cnauroth)
+
+    HDFS-8046. Allow better control of getContentSummary (kihwal)
+
+    HDFS-8384. Allow NN to startup if there are files having a lease but are not
+    under construction. (jing9)
+
   OPTIMIZATIONS
 
+    HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
+    hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
+    via Colin P. McCabe)
+
   BUG FIXES
 
     HDFS-7425. NameNode block deletion logging uses incorrect appender.
@@ -2550,8 +2449,7 @@ Release 2.6.1 - UNRELEASED
     HDFS-7733. NFS: readdir/readdirplus return null directory
     attribute on failure. (Arpit Agarwal)
 
-    HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
-    McCabe)
+    HDFS-8486. DN startup may cause severe data loss. (daryn via cmccabe)
 
     HDFS-7213. processIncrementalBlockReport performance degradation.
     (Eric Payne via kihwal)
@@ -2565,9 +2463,111 @@ Release 2.6.1 - UNRELEASED
     HDFS-7225. Remove stale block invalidation work when DN re-registers with
     different UUID. (Zhe Zhang and Andrew Wang)
 
+    HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade
+    shutdown command (Eric Payne via kihwal)
+
+    HDFS-7575. Upgrade should generate a unique storage ID for each
+    volume. (Arpit Agarwal)
+
+    HDFS-7707. Edit log corruption due to delayed block removal again.
+    (Yongjun Zhang via kihwal)
+
+    HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
+    DataNode to register successfully with only one NameNode.(vinayakumarb)
+
+    HDFS-7788. Post-2.6 namenode may not start up with an image containing
+    inodes created with an old release. (Rushabh Shah via kihwal)
+
+    HDFS-7009. Active NN and standby NN have different live nodes.
+    (Ming Ma via cnauroth)
+
+    HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
+    case. (Liang Xie via wang)
+
+    HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
+    message. (jing9)
+
+    HDFS-7885. Datanode should not trust the generation stamp provided by
+    client. (Tsz Wo Nicholas Sze via jing9)
+
+    HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
+    Colin P. McCabe)
+
+    HDFS-7830. DataNode does not release the volume lock when adding a volume
+    fails. (Lei Xu via Colin P. Mccabe)
+
+    HDFS-7587. Edit log corruption can happen if append fails with a quota
+    violation. (jing9)
+
+    HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
+    starts (Zhe Zhang via Colin P. McCabe)
+
+    HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
+    HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp
+    provided by the client is larger than the one stored in the datanode.
+    (Brahma Reddy Battula via szetszwo)
+
+    HDFS-7960. The full block report should prune zombie storages even if
+    they're not empty. (cmccabe and Eddy Xu via wang)
+
+    HDFS-7742. Favoring decommissioning node for replication can cause a block
+    to stay underreplicated for long periods (Nathan Roberts via kihwal)
+
+    HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
+    lock for a very long time (sinago via cmccabe)
+
+    HDFS-8072. Reserved RBW space is not released if client terminates while
+    writing block. (Arpit Agarwal)
+
+    HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
+    finalize upgrade. (jing9)
+
+    HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
+    fail to tell the DFSClient about it because of a network error (cmccabe)
+
+    HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
+    post-HDFS-7915 DataNode (cmccabe)
+
+    HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart.
+    (surendra singh lilhore via Xiaoyu Yao)
+
+    HDFS-7894. Rolling upgrade readiness is not updated in jmx until query
+    command is issued. (Brahma Reddy Battula  via kihwal)
+
+    HDFS-8254. Standby namenode doesn't process DELETED_BLOCK if the add block
+    request is in edit log. (Rushabh S Shah via kihwal)
+
+    HDFS-8404. Pending block replication can get stuck using older genstamp
+    (Nathan Roberts via kihwal)
+
+    HDFS-8431. hdfs crypto class not found in Windows.
+    (Anu Engineer via cnauroth)
+
+    HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits.
+    (Ming Ma via jing9)
+
     HDFS-8270. create() always retried with hardcoded timeout when file already
     exists with open lease (J.Andreina via vinayakumarb)
 
+    HDFS-7980. Incremental BlockReport will dramatically slow down namenode
+    startup.  (Walter Su via szetszwo)
+
+    HDFS-7446. HDFS inotify should have the ability to determine what txid it
+    has read up to (cmccabe)
+
+    HDFS-8846. Add a unit test for INotify functionality across a layout
+    version upgrade (Zhe Zhang via Colin P. McCabe)
+
+    HDFS-7470. SecondaryNameNode need twice memory when calling
+    reloadFromImageFile. (zhaoyunjiong via cnauroth)
+
+    HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
+    flawed. (Kihwal Lee via yliu)
+
+    HDFS-7552. Change FsVolumeList toString() to fix
+    TestDataNodeVolumeFailureToleration (Liang Xie via Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbb7ff1/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 02c1f1f..8c64738 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -448,9 +448,6 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.
     lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. (Ray Chiang via ozawa)
 
-    MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
-    (Gera Shegalov via jlowe)
-
     MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8.
     (Akira AJISAKA via ozawa)
 
@@ -605,14 +602,6 @@ Release 2.7.1 - 2015-07-06
 
   BUG FIXES
 
-    MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
-
-    MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
-    which is a regression from MR1 (zxu via rkanter)
-
-    MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM
-    tokens when they roll-over. (Jason Lowe via vinodkv)
-
     MAPREDUCE-6252. JobHistoryServer should not fail when encountering a 
     missing directory. (Craig Welch via devaraj)
 
@@ -629,10 +618,6 @@ Release 2.7.1 - 2015-07-06
     that they don't fail on history-server backed by DFSes with not so strong
     guarantees. (Craig Welch via vinodkv)
 
-    MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
-    copySucceeded() in one thread and copyFailed() in another thread on the
-    same host. (Junping Du via ozawa)
-
     MAPREDUCE-6387. Serialize the recently added Task#encryptedSpillKey field at 
     the end. (Arun Suresh via kasha)
 
@@ -714,9 +699,6 @@ Release 2.7.0 - 2015-04-20
     MAPREDUCE-6248. Exposed the internal MapReduce job's information as a public
     API in DistCp. (Jing Zhao via vinodkv)
 
-    MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
-    class. (Chris Trezzo via kasha)
-
     MAPREDUCE-6263. Configurable timeout between YARNRunner terminate the 
     application and forcefully kill. (Eric Payne via junping_du)
 
@@ -766,9 +748,6 @@ Release 2.7.0 - 2015-04-20
     MAPREDUCE-4879. TeraOutputFormat may overwrite an existing output
     directory. (gera)
 
-    MAPREDUCE-6166. Reducers do not validate checksum of map outputs when
-    fetching directly to disk. (Eric Payne via gera)
-
     MAPREDUCE-6045. need close the DataInputStream after open it in
     TestMapReduce.java (zxu via rkanter)
 
@@ -778,9 +757,6 @@ Release 2.7.0 - 2015-04-20
     MAPREDUCE-3283. mapred classpath CLI does not display the complete classpath
     (Varun Saxena via cnauroth)
 
-    MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken
-    service name properly. (Jason Lowe via jianhe)
-
     MAPREDUCE-6231. Grep example job is not working on a fully-distributed
     cluster. (aajisaka)
 
@@ -831,9 +807,6 @@ Release 2.7.0 - 2015-04-20
     MAPREDUCE-6285. ClientServiceDelegate should not retry upon
     AuthenticationException. (Jonathan Eagles via ozawa)
 
-    MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal 
-    to a reducer. (Jason Lowe via junping_du)
-
 Release 2.6.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -846,7 +819,7 @@ Release 2.6.2 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
   INCOMPATIBLE CHANGES
 
@@ -861,6 +834,33 @@ Release 2.6.1 - UNRELEASED
     MAPREDUCE-6237. Multiple mappers with DBInputFormat don't work because of
     reusing conections. (Kannan Rajah via ozawa)
 
+    MAPREDUCE-6166. Reducers do not validate checksum of map outputs when
+    fetching directly to disk. (Eric Payne via gera)
+
+    MAPREDUCE-6230. Fixed RMContainerAllocator to update the new AMRMToken
+    service name properly. (Jason Lowe via jianhe)
+
+    MAPREDUCE-6303. Read timeout when retrying a fetch error can be fatal 
+    to a reducer. (Jason Lowe via junping_du)
+
+    MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
+
+    MAPREDUCE-6267. Refactor JobSubmitter#copyAndConfigureFiles into it's own 
+    class. (Chris Trezzo via kasha)
+
+    MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
+    which is a regression from MR1 (zxu via rkanter)
+
+    MAPREDUCE-6324. Fixed MapReduce uber jobs to not fail the udpate of AM-RM
+    tokens when they roll-over. (Jason Lowe via vinodkv)
+
+    MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
+    (Gera Shegalov via jlowe)
+
+    MAPREDUCE-6361. NPE issue in shuffle caused by concurrent issue between
+    copySucceeded() in one thread and copyFailed() in another thread on the
+    same host. (Junping Du via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbbb7ff1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 4815be3..5a706a3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -338,9 +338,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3581. Deprecate -directlyAccessNodeLabelStore in RMAdminCLI. 
     (Naganarasimha G R via wangda)
 
-    YARN-3700. Made generic history service load a number of latest applications
-    according to the parameter or the configuration. (Xuan Gong via zjshen)
-
     YARN-3722. Merge multiple TestWebAppUtils into o.a.h.yarn.webapp.util.TestWebAppUtils.
     (Masatake Iwasaki via devaraj)
 
@@ -498,9 +495,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
     (Varun Vasudev via wangda)
 
-    YARN-2890. MiniYarnCluster should turn on timeline service if
-    configured to do so. (Mit Desai via hitesh)
-
     YARN-3266. RMContext#inactiveNodes should have NodeId as map key.
     (Chengbing Liu via jianhe)
 
@@ -797,9 +791,6 @@ Release 2.8.0 - UNRELEASED
     YARN-4005. Completed container whose app is finished is possibly not
     removed from NMStateStore. (Jun Gong via jianhe)
 
-    YARN-4047. ClientRMService getApplications has high scheduler lock contention.
-    (Jason Lowe via jianhe)
-
     YARN-3987. Send AM container completed msg to NM once AM finishes.
     (sandflee via jianhe)
 
@@ -844,9 +835,6 @@ Release 2.7.2 - UNRELEASED
     YARN-3967. Fetch the application report from the AHS if the RM does not know about it.
     (Mit Desai via xgong)
 
-    YARN-3978. Configurably turn off the saving of container info in Generic AHS
-    (Eric Payne via jeagles)
-
     YARN-4092. Fixed UI redirection to print useful messages when both RMs are
     in standby mode. (Xuan Gong via jianhe)
 
@@ -880,11 +868,6 @@ Release 2.7.2 - UNRELEASED
     YARN-3925. ContainerLogsUtils#getContainerLogFile fails to read container
     log files from full disks. (zhihai xu via jlowe)
 
-    YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when
-    Node is connected/disconnected (Bibin A Chundatt via jlowe)
-
-    YARN-3999. RM hangs on draing events. (Jian He via xgong)
-
     YARN-3857: Memory leak in ResourceManager with SIMPLE mode.
     (mujunchao via zxu)
 
@@ -933,9 +916,6 @@ Release 2.7.1 - 2015-07-06
 
   BUG FIXES
 
-    YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
-    calling getQueue (Jason Lowe via wangda)
-
     YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
     without making a copy. (Jason Lowe via jianhe)
 
@@ -959,9 +939,6 @@ Release 2.7.1 - 2015-07-06
     YARN-3516. killing ContainerLocalizer action doesn't take effect when
     private localizer receives FETCH_FAILURE status.(zhihai xu via xgong)
 
-    YARN-3464. Race condition in LocalizerRunner kills localizer before 
-    localizing all resources. (Zhihai Xu via kasha)
-    
     YARN-3485. FairScheduler headroom calculation doesn't consider 
     maxResources for Fifo and FairShare policies. (kasha)
 
@@ -986,9 +963,6 @@ Release 2.7.1 - 2015-07-06
     YARN-3434. Interaction between reservations and userlimit can result in 
     significant ULF violation (tgraves)
 
-    YARN-3493. RM fails to come up with error "Failed to load/recover state"
-    when mem settings are changed. (Jian He via wangda)
-
     YARN-3626. On Windows localized resources are not moved to the front
     of the classpath when they should be. (Craig Welch via xgong)
 
@@ -998,20 +972,11 @@ Release 2.7.1 - 2015-07-06
     YARN-3537. NPE when NodeManager.serviceInit fails and stopRecoveryStore
     invoked (Brahma Reddy Battula via jlowe)
 
-    YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when
-    exceptions happen in stopping NM's sub-services. (Junping Du via jlowe)
-
-    YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
-    on a QJM cluster. (Weiwei Yang via xgong)
-
     YARN-3601. Fix UT TestRMFailover.testRMWebAppRedirect. (Weiwei Yang via xgong)
 
     YARN-3677. Fix findbugs warnings in yarn-server-resourcemanager.
     (Vinod Kumar Vavilapalli via ozawa)
 
-    YARN-2918. RM should not fail on startup if queue's configured labels do
-    not exist in cluster-node-labels. (Wangda Tan via jianhe)
-
     YARN-3681. yarn cmd says "could not find main class 'queue'" in windows.
     (Craig Welch and Varun Saxena via xgong)
 
@@ -1036,18 +1001,6 @@ Release 2.7.1 - 2015-07-06
     YARN-3686. CapacityScheduler should trim default_node_label_expression. 
     (Sunil G via wangda)
 
-    YARN-2900. Application (Attempt and Container) Not Found in AHS results
-    in Internal Server Error (500). (Zhijie Shen and Mit Desai via xgong)
-
-    YARN-3725. App submission via REST API is broken in secure mode due to 
-    Timeline DT service address is empty. (Zhijie Shen via wangda)
-
-    YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
-    recovery is enabled (Rohith Sharmaks via jlowe)
-
-    YARN-3733. Fix DominantRC#compare() does not work as expected if 
-    cluster resource is empty. (Rohith Sharmaks via wangda)
-
     YARN-3764. CapacityScheduler should forbid moving LeafQueue from one parent
     to another. (Wangda Tan via jianhe)
 
@@ -1060,13 +1013,6 @@ Release 2.7.1 - 2015-07-06
     YARN-3809. Failed to launch new attempts because
     ApplicationMasterLauncher's threads all hang (Jun Gong via jlowe)
 
-    YARN-3832. Resource Localization fails on a cluster due to existing cache
-    directories (Brahma Reddy Battula via jlowe)
-
-    YARN-3850. NM fails to read files from full disks which can lead to
-    container logs being lost and other issues (Varun Saxena via jlowe)
-
-
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
@@ -1116,9 +1062,6 @@ Release 2.7.0 - 2015-04-20
 
     YARN-2994. Document work-preserving RM restart. (Jian He via ozawa)
 
-    YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
-    (Ryu Kobayashi via ozawa)
-
     YARN-2786. Created a yarn cluster CLI and seeded with one command for listing
     node-labels collection. (Wangda Tan via vinodkv)
 
@@ -1189,8 +1132,6 @@ Release 2.7.0 - 2015-04-20
     YARN-1156. Enhance NodeManager AllocatedGB and AvailableGB metrics 
     for aggregation of decimal values. (Tsuyoshi OZAWA via junping_du)
 
-    YARN-2301. Improved yarn container command. (Naganarasimha G R via jianhe)
-
     YARN-2056. Disable preemption at Queue level (Eric Payne via jlowe)
 
     YARN-2762. Fixed RMAdminCLI to trim and check node-label related arguments
@@ -1244,12 +1185,6 @@ Release 2.7.0 - 2015-04-20
     YARN-2800. Remove MemoryNodeLabelsStore and add a way to enable/disable
     node labels feature. (Wangda Tan via ozawa)
 
-    YARN-3024. LocalizerRunner should give DIE action when all resources are
-    localized. (Chengbing Liu via xgong)
-
-    YARN-3092. Created a common ResourceUsage class to track labeled resource
-    usages in Capacity Scheduler. (Wangda Tan via jianhe)
-
     YARN-3086. Make NodeManager memory configurable in MiniYARNCluster.
     (Robert Metzger via ozawa)
 
@@ -1275,9 +1210,6 @@ Release 2.7.0 - 2015-04-20
     YARN-3022. Expose Container resource information from NodeManager for
     monitoring (adhoot via ranter)
 
-    YARN-3098. Created common QueueCapacities class in Capacity Scheduler to
-    track capacities-by-labels of queues. (Wangda Tan via jianhe)
-
     YARN-3075. NodeLabelsManager implementation to retrieve label to node 
     mapping (Varun Saxena via wangda)
 
@@ -1318,9 +1250,6 @@ Release 2.7.0 - 2015-04-20
     YARN-2079. Recover NonAggregatingLogHandler state upon nodemanager
     restart. (Jason Lowe via junping_du) 
 
-    YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track
-    capacities-by-label. (Wangda Tan via jianhe)
-
     YARN-3158. Correct log messages in ResourceTrackerService.
     (Varun Saxena via xgong)
 
@@ -1345,8 +1274,6 @@ Release 2.7.0 - 2015-04-20
     YARN-2799. Cleanup TestLogAggregationService based on the change in YARN-90.
     (Zhihai Xu via junping_du)
 
-    YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
-
     YARN-3237. AppLogAggregatorImpl fails to log error cause.
     (Rushabh S Shah via xgong)
 
@@ -1380,9 +1307,6 @@ Release 2.7.0 - 2015-04-20
     YARN-3122. Metrics for container's actual CPU usage. 
     (Anubhav Dhoot via kasha)
 
-    YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
-    Xuan Gong via jianhe)
-
     YARN-2190. Added CPU and memory limit options to the default container
     executor for Windows containers. (Chuan Liu via jianhe)
 
@@ -1417,56 +1341,26 @@ Release 2.7.0 - 2015-04-20
     YARN-2713. "RM Home" link in NM should point to one of the RMs in an 
     HA setup. (kasha)
 
-    YARN-2766. Made ApplicationHistoryManager return a sorted list of apps,
-    attempts and containers. (Robert Kanter via zjshen)
-
-    YARN-2856. Fixed RMAppImpl to handle ATTEMPT_KILLED event at ACCEPTED state
-    on app recovery. (Rohith Sharmaks via jianhe)
-
     YARN-2857. ConcurrentModificationException in ContainerLogAppender
     (Mohammad Kamrul Islam via jlowe)
 
-    YARN-2816. NM fail to start with NPE during container recovery (Zhihai Xu
-    via jlowe)
-
     YARN-2432. RMStateStore should process the pending events before close.
     (Varun Saxena via jianhe)
 
     YARN-1703. Fixed ResourceManager web-proxy to close connections correctly.
     (Rohith Sharma via vinodkv)
 
-    YARN-2414. RM web UI: app page will crash if app is failed before any
-    attempt has been created (Wangda Tan via jlowe)
-
     YARN-2870. Updated the command to run the timeline server in the document.
     (Masatake Iwasaki via zjshen)
 
     YARN-2878. Fix DockerContainerExecutor.apt.vm formatting. (Abin Shahab via
     jianhe)
 
-    YARN-2865. Fixed RM to always create a new RMContext when transtions from
-    StandBy to Active. (Rohith Sharmaks via jianhe)
-
     YARN-2315. FairScheduler: Set current capacity in addition to capacity.
     (Zhihai Xu via kasha)
 
-    YARN-1984. LeveldbTimelineStore does not handle db exceptions properly
-    (Varun Saxena via jlowe)
-
     YARN-2697. Remove useless RMAuthenticationHandler. (Haosong Huang via zjshen)
 
-    YARN-2906. CapacitySchedulerPage shows HTML tags for a queue's Active Users.
-    (Jason Lowe via jianhe)
-
-    YARN-2905. AggregatedLogsBlock page can infinitely loop if the aggregated
-    log file is corrupted (Varun Saxena via jlowe)
-
-    YARN-2894. Fixed a bug regarding application view acl when RM fails over.
-    (Rohith Sharmaks via jianhe)
-
-    YARN-2874. Dead lock in "DelegationTokenRenewer" which blocks RM to execute
-    any further apps. (Naganarasimha G R via kasha)
-
     YARN-2461. Fix PROCFS_USE_SMAPS_BASED_RSS_ENABLED property in
     YarnConfiguration. (rchiang via rkanter)
 
@@ -1479,18 +1373,12 @@ Release 2.7.0 - 2015-04-20
     YARN-2931. PublicLocalizer may fail until directory is initialized by
     LocalizeRunner. (Anubhav Dhoot via kasha)
 
-    YARN-2910. FSLeafQueue can throw ConcurrentModificationException. 
-    (Wilfred Spiegelenburg via kasha)
-
     YARN-2930. Fixed TestRMRestart#testRMRestartRecoveringNodeLabelManager
     intermittent failure. (Wangda Tan via jianhe)
 
     YARN-2924. Fixed RMAdminCLI to not convert node labels to lower case.
     (Wangda Tan via jianhe)
 
-    YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
-    (Rohith Sharmaks via jianhe)
-
     YARN-2243. Order of arguments for Preconditions.checkNotNull() is wrong in 
     SchedulerApplicationAttempt ctor. (devaraj)
 
@@ -1508,15 +1396,9 @@ Release 2.7.0 - 2015-04-20
     YARN-2944. InMemorySCMStore can not be instantiated with ReflectionUtils#newInstance.
     (Chris Trezzo via kasha)
 
-    YARN-2964. RM prematurely cancels tokens for jobs that submit jobs (oozie)
-    (Jian He via jlowe)
-
     YARN-2675. containersKilled metrics is not updated when the container is killed 
     during localization. (Zhihai Xu via kasha)
 
-    YARN-2952. Fixed incorrect version check in StateStore. (Rohith Sharmaks
-    via jianhe)
-
     YARN-2975. FSLeafQueue app lists are accessed without required locks. (kasha)
 
     YARN-2977. Fixed intermittent TestNMClient failure.
@@ -1524,12 +1406,6 @@ Release 2.7.0 - 2015-04-20
 
     YARN-2939. Fix new findbugs warnings in hadoop-yarn-common. (Li Lu via junping_du)
 
-    YARN-2920. Changed CapacityScheduler to kill containers on nodes where
-    node labels are changed. (Wangda Tan via jianhe)
-
-    YARN-2340. Fixed NPE when queue is stopped during RM restart.
-    (Rohith Sharmaks via jianhe)
-
     YARN-2940. Fix new findbugs warnings in rest of the hadoop-yarn components. (Li Lu 
     via junping_du)
 
@@ -1541,9 +1417,6 @@ Release 2.7.0 - 2015-04-20
 
     YARN-2988. Graph#save() may leak file descriptors. (Ted Yu via ozawa)
 
-    YARN-2992. ZKRMStateStore crashes due to session expiry. (Karthik Kambatla
-    via jianhe)
-
     YARN-2938. Fixed new findbugs warnings in hadoop-yarn-resourcemanager and
     hadoop-yarn-applicationhistoryservice. (Varun Saxena via zjshen)
 
@@ -1553,15 +1426,9 @@ Release 2.7.0 - 2015-04-20
     YARN-2991. Fixed DrainDispatcher to reuse the draining code path in
     AsyncDispatcher. (Rohith Sharmaks via zjshen)
 
-    YARN-2922. ConcurrentModificationException in CapacityScheduler's LeafQueue.
-    (Rohith Sharmaks via ozawa)
-
     YARN-2958. Made RMStateStore not update the last sequence number when updating the
     delegation token. (Varun Saxena via zjshen)
 
-    YARN-2978. Fixed potential NPE while getting queue info. (Varun Saxena via
-    jianhe)
-
     YARN-2230. Fixed few configs description in yarn-default.xml. (Vijay Bhat
     via jianhe)
 
@@ -1571,18 +1438,12 @@ Release 2.7.0 - 2015-04-20
     YARN-2936. Changed YARNDelegationTokenIdentifier to set proto fields on
     getProto method. (Varun Saxena via jianhe)
 
-    YARN-2997. Fixed NodeStatusUpdater to not send alreay-sent completed
-    container statuses on heartbeat. (Chengbing Liu via jianhe)
-
     YARN-3014. Replaces labels on a host should update all NM's labels on that
     host. (Wangda Tan via jianhe)
 
     YARN-3027. Scheduler should use totalAvailable resource from node instead of
     availableResource for maxAllocation. (adhoot via rkanter)
 
-    YARN-2637. Fixed max-am-resource-percent calculation in CapacityScheduler
-    when activating applications. (Craig Welch via jianhe)
-
     YARN-2861. Fixed Timeline DT secret manager to not reuse RM's configs.
     (Zhijie Shen via jianhe)
 
@@ -1613,21 +1474,12 @@ Release 2.7.0 - 2015-04-20
     YARN-3088. LinuxContainerExecutor.deleteAsUser can throw NPE if native
     executor returns an error (Eric Payne via jlowe)
 
-    YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService
-    might lead NM to crash. (Varun Saxena via jianhe)
-
-    YARN-3103. AMRMClientImpl does not update AMRM token properly. (Jason Lowe
-    via jianhe)
-
     YARN-3079. Scheduler should also update maximumAllocation when updateNodeResource.
     (Zhihai Xu via wangda)
 
     YARN-3029. FSDownload.unpack() uses local locale for FS case conversion, may not
     work everywhere. (Varun Saxena via ozawa)
 
-    YARN-3099. Capacity Scheduler LeafQueue/ParentQueue should use ResourceUsage
-    to track used-resources-by-label.(Wangda Tan via jianhe)
-
     YARN-3077. Fixed RM to create zk root path recursively. (Chun Chen via jianhe)
 
     YARN-3113. Release audit warning for Sorting icons.psd. (stevel via kihwal)
@@ -1653,18 +1505,12 @@ Release 2.7.0 - 2015-04-20
     YARN-1537. Fix race condition in
     TestLocalResourcesTrackerImpl.testLocalResourceCache. (xgong via acmurthy)
 
-    YARN-2694. Ensure only single node label specified in ResourceRequest.
-    (Wangda Tan via jianhe)
-
     YARN-3089. LinuxContainerExecutor does not handle file arguments to
     deleteAsUser (Eric Payne via jlowe)
 
     YARN-3143. RM Apps REST API can return NPE or entries missing id and other
     fields (jlowe)
 
-    YARN-3094. Reset timer for liveness monitors after RM recovery. (Jun Gong
-    via jianhe)
-
     YARN-2971. RM uses conf instead of token service address to renew timeline
     delegation tokens (jeagles)
 
@@ -1674,10 +1520,6 @@ Release 2.7.0 - 2015-04-20
     YARN-2809. Implement workaround for linux kernel panic when removing
     cgroup (Nathan Roberts via jlowe)
 
-    YARN-2246. Made the proxy tracking URL always be
-    http(s)://proxy addr:port/proxy/<appId> to avoid duplicate sections. (Devaraj
-    K via zjshen)
-
     YARN-3160. Fix non-atomic operation on nodeUpdateQueue in RMNodeImpl. 
     (Chengbing Liu via junping_du)
 
@@ -1708,9 +1550,6 @@ Release 2.7.0 - 2015-04-20
     YARN-2749. Fix some testcases from TestLogAggregationService fails in trunk. 
     (Xuan Gong via junping_du)
 
-    YARN-3207. Secondary filter matches entites which do not have the key being
-    filtered for. (Zhijie Shen via xgong)
-
     YARN-3132. RMNodeLabelsManager should remove node from node-to-label mapping
     when node becomes deactivated. (Wangda Tan via jianhe)
 
@@ -1723,15 +1562,9 @@ Release 2.7.0 - 2015-04-20
     YARN-3194. RM should handle NMContainerStatuses sent by NM while
     registering if NM is Reconnected node (Rohith via jlowe)
 
-    YARN-3238. Connection timeouts to nodemanagers are retried at
-    multiple levels (Jason Lowe via xgong)
-
     YARN-3247. TestQueueMappings should use CapacityScheduler explicitly.
     (Zhihai Xu via ozawa)
 
-    YARN-3239. WebAppProxy does not support a final tracking url which has
-    query fragments and params (Jian He via jlowe)
-
     YARN-3256. TestClientToAMTokens#testClientTokenRace is not running against 
     all Schedulers even when using ParameterizedSchedulerTestBase. 
     (Anubhav Dhoot via devaraj)
@@ -1742,27 +1575,12 @@ Release 2.7.0 - 2015-04-20
     YARN-3265. Fixed a deadlock in CapacityScheduler by always passing a queue's
     available resource-limit from the parent queue. (Wangda Tan via vinodkv)
 
-    YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
-    node reconnects. (Rohith Sharma K S via jianhe)
-
     YARN-3131. YarnClientImpl should check FAILED and KILLED state in
     submitApplication (Chang Li via jlowe)
     
-    YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
-    jobs. (Siqi Li via kasha)
-
-    YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
-    events for old client. (Zhihai Xu via kasha)
-
-    YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
-    (Zhijie Shen via xgong)
-
     YARN-3275. CapacityScheduler: Preemption happening on non-preemptable
     queues (Eric Payne via jlowe)
 
-    YARN-3287. Made TimelineClient put methods do as the correct login context.
-    (Daryn Sharp and Jonathan Eagles via zjshen)
-
     YARN-3300. Outstanding_resource_requests table should not be shown in AHS.
     (Xuan Gong via jianhe)
 
@@ -1777,9 +1595,6 @@ Release 2.7.0 - 2015-04-20
     YARN-3154. Added additional APIs in LogAggregationContext to avoid aggregating
     running logs of application when rolling is enabled. (Xuan Gong via vinodkv)
 
-    YARN-3267. Timelineserver applies the ACL rules after applying the limit on
-    the number of records (Chang Li via jeagles)
-
     YARN-3171. Sort by Application id, AppAttempt and ContainerID doesn't work
     in ATS / RM web ui. (Naganarasimha G R via xgong)
 
@@ -1792,18 +1607,12 @@ Release 2.7.0 - 2015-04-20
     YARN-3379. Fixed missing data in localityTable and ResourceRequests table
     in RM WebUI. (Xuan Gong via jianhe)
 
-    YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die.
-    (Brahma Reddy Battula via wangda)
-
     YARN-3384. TestLogAggregationService.verifyContainerLogs fails after
     YARN-2777. (Naganarasimha G R via ozawa)
 
     YARN-3336. FileSystem memory leak in DelegationTokenRenewer.
     (Zhihai Xu via cnauroth)
 
-    YARN-3393. Getting application(s) goes wrong when app finishes before
-    starting the attempt. (Zhijie Shen via xgong)
-
     YARN-2213. Change proxy-user cookie log in AmIpFilter to DEBUG.
     (Varun Saxena via xgong)
 
@@ -1817,9 +1626,6 @@ Release 2.7.0 - 2015-04-20
     YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
     and node-label column (Jason Lowe via wangda)
 
-    YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
-    renewal of applications part of a bigger workflow. (Daryn Sharp via vinodkv)
-
 Release 2.6.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -1832,18 +1638,211 @@ Release 2.6.2 - UNRELEASED
 
   BUG FIXES
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
   INCOMPATIBLE CHANGES
 
   NEW FEATURES
 
+    YARN-3249. Add a 'kill application' button to Resource Manager's Web UI.
+    (Ryu Kobayashi via ozawa)
+
   IMPROVEMENTS
 
+    YARN-3230. Clarify application states on the web UI. (Jian He via wangda)
+
+    YARN-1809. Synchronize RM and TimeLineServer Web-UIs. (Zhijie Shen and
+    Xuan Gong via jianhe)
+
+    YARN-3092. Created a common ResourceUsage class to track labeled resource
+    usages in Capacity Scheduler. (Wangda Tan via jianhe)
+
+    YARN-3098. Created common QueueCapacities class in Capacity Scheduler to
+    track capacities-by-labels of queues. (Wangda Tan via jianhe)
+
+    YARN-2301. Improved yarn container command. (Naganarasimha G R via jianhe)
+
+    YARN-3978. Configurably turn off the saving of container info in Generic AHS
+    (Eric Payne via jeagles)
+
   OPTIMIZATIONS
 
   BUG FIXES
 
+    YARN-2856. Fixed RMAppImpl to handle ATTEMPT_KILLED event at ACCEPTED state
+    on app recovery. (Rohith Sharmaks via jianhe)
+
+    YARN-2816. NM fail to start with NPE during container recovery (Zhihai Xu
+    via jlowe)
+
+    YARN-2414. RM web UI: app page will crash if app is failed before any
+    attempt has been created (Wangda Tan via jlowe)
+
+    YARN-2865. Fixed RM to always create a new RMContext when transtions from
+    StandBy to Active. (Rohith Sharmaks via jianhe)
+
+    YARN-2906. CapacitySchedulerPage shows HTML tags for a queue's Active Users.
+    (Jason Lowe via jianhe)
+
+    YARN-2905. AggregatedLogsBlock page can infinitely loop if the aggregated
+    log file is corrupted (Varun Saxena via jlowe)
+
+    YARN-2890. MiniYARNCluster should start the timeline server based on the
+    configuration. (Mit Desai via zjshen)
+
+    YARN-2894. Fixed a bug regarding application view acl when RM fails over.
+    (Rohith Sharmaks via jianhe)
+
+    YARN-2874. Dead lock in "DelegationTokenRenewer" which blocks RM to execute
+    any further apps. (Naganarasimha G R via kasha)
+
+    YARN-2910. FSLeafQueue can throw ConcurrentModificationException. 
+    (Wilfred Spiegelenburg via kasha)
+
+    YARN-2917. Fixed potential deadlock when system.exit is called in AsyncDispatcher
+    (Rohith Sharmaks via jianhe)
+
+    YARN-2964. RM prematurely cancels tokens for jobs that submit jobs (oozie)
+    (Jian He via jlowe)
+
+    YARN-1984. LeveldbTimelineStore does not handle db exceptions properly
+    (Varun Saxena via jlowe)
+
+    YARN-2952. Fixed incorrect version check in StateStore. (Rohith Sharmaks
+    via jianhe)
+
+    YARN-2340. Fixed NPE when queue is stopped during RM restart.
+    (Rohith Sharmaks via jianhe)
+
+    YARN-2992. ZKRMStateStore crashes due to session expiry. (Karthik Kambatla
+    via jianhe)
+
+    YARN-2922. ConcurrentModificationException in CapacityScheduler's LeafQueue.
+    (Rohith Sharmaks via ozawa)
+
+    YARN-2997. Fixed NodeStatusUpdater to not send alreay-sent completed
+    container statuses on heartbeat. (Chengbing Liu via jianhe)
+
+    YARN-3011. Possible IllegalArgumentException in ResourceLocalizationService
+    might lead NM to crash. (Varun Saxena via jianhe)
+
+    YARN-3103. AMRMClientImpl does not update AMRM token properly. (Jason Lowe
+    via jianhe)
+
+    YARN-3094. Reset timer for liveness monitors after RM recovery. (Jun Gong
+    via jianhe)
+
+    YARN-2246. Made the proxy tracking URL always be
+    http(s)://proxy addr:port/proxy/<appId> to avoid duplicate sections. (Devaraj
+    K via zjshen)
+
+    YARN-3207. Secondary filter matches entites which do not have the key being
+    filtered for. (Zhijie Shen via xgong)
+
+    YARN-3238. Connection timeouts to nodemanagers are retried at
+    multiple levels (Jason Lowe via xgong)
+
+    YARN-3239. WebAppProxy does not support a final tracking url which has
+    query fragments and params (Jian He via jlowe)
+
+    YARN-3222. Fixed RMNode to send scheduler events in sequential order when a
+    node reconnects. (Rohith Sharma K S via jianhe)
+
+    YARN-3231. FairScheduler: Changing queueMaxRunningApps interferes with pending 
+    jobs. (Siqi Li via kasha)
+
+    YARN-3242. Asynchrony in ZK-close can lead to ZKRMStateStore watcher receiving 
+    events for old client. (Zhihai Xu via kasha)
+
+    YARN-3227. Timeline renew delegation token fails when RM user's TGT is expired
+    (Zhijie Shen via xgong)
+
+    YARN-3287. Made TimelineClient put methods do as the correct login context.
+    (Daryn Sharp and Jonathan Eagles via zjshen)
+
+    YARN-3267. Timelineserver applies the ACL rules after applying the limit on
+    the number of records (Chang Li via jeagles)
+
+    YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die.
+    (Brahma Reddy Battula via wangda)
+
+    YARN-3393. Getting application(s) goes wrong when app finishes before
+    starting the attempt. (Zhijie Shen via xgong)
+
+    YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
+    renewal of applications part of a bigger workflow. (Daryn Sharp via vinodkv)
+
+    YARN-3493. RM fails to come up with error "Failed to load/recover state" 
+    when mem settings are changed. (Jian He via wangda)
+
+    YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
+    calling getQueue (Jason Lowe via wangda)
+
+    YARN-3024. LocalizerRunner should give DIE action when all resources are
+    localized. (Chengbing Liu via xgong)
+
+    YARN-3464. Race condition in LocalizerRunner kills localizer before 
+    localizing all resources. (Zhihai Xu via kasha)
+
+    YARN-3641. NodeManager: stopRecoveryStore() shouldn't be skipped when
+    exceptions happen in stopping NM's sub-services. (Junping Du via jlowe)
+
+    YARN-3526. ApplicationMaster tracking URL is incorrectly redirected
+    on a QJM cluster. (Weiwei Yang via xgong)
+
+    YARN-2766. Made ApplicationHistoryManager return a sorted list of apps,
+    attempts and containers. (Robert Kanter via zjshen)
+
+    YARN-3700. Made generic history service load a number of latest applications
+    according to the parameter or the configuration. (Xuan Gong via zjshen)
+
+    YARN-2900. Application (Attempt and Container) Not Found in AHS results
+    in InternalServer Error (500). (Zhijie Shen and Mit Desai via xgong)
+
+    YARN-3725. App submission via REST API is broken in secure mode due to
+    Timeline DT service address is empty. (Zhijie Shen via wangda)
+
+    YARN-3585. NodeManager cannot exit on SHUTDOWN event triggered and NM
+    recovery is enabled (Rohith Sharmaks via jlowe)
+
+    YARN-3832. Resource Localization fails on a cluster due to existing cache
+    directories (Brahma Reddy Battula via jlowe)
+
+    YARN-3850. NM fails to read files from full disks which can lead to
+    container logs being lost and other issues (Varun Saxena via jlowe)
+
+    YARN-3990. AsyncDispatcher may overloaded with RMAppNodeUpdateEvent when
+    Node is connected/disconnected (Bibin A Chundatt via jlowe)
+
+    YARN-2637. Fixed max-am-resource-percent calculation in CapacityScheduler
+    when activating applications. (Craig Welch via jianhe)
+
+    YARN-3733. Fix DominantRC#compare() does not work as expected if
+    cluster resource is empty. (Rohith Sharmaks via wangda)
+
+    YARN-2920. Changed CapacityScheduler to kill containers on nodes where
+    node labels are changed. (Wangda Tan via jianhe)
+
+    YARN-2978. Fixed potential NPE while getting queue info. (Varun Saxena via
+    jianhe)
+
+    YARN-3099. Capacity Scheduler LeafQueue/ParentQueue should use ResourceUsage
+    to track used-resources-by-label.(Wangda Tan via jianhe)
+
+    YARN-2694. Ensure only single node label specified in ResourceRequest.
+    (Wangda Tan via jianhe)
+
+    YARN-3124. Fixed CS LeafQueue/ParentQueue to use QueueCapacities to track
+    capacities-by-label. (Wangda Tan via jianhe)
+
+    YARN-2918. RM should not fail on startup if queue's configured labels do
+    not exist in cluster-node-labels. (Wangda Tan via jianhe)
+
+    YARN-3999. RM hangs on draing events. (Jian He via xgong)
+
+    YARN-4047. ClientRMService getApplications has high scheduler lock contention.
+    (Jason Lowe via jianhe)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES


[47/50] [abbrv] hadoop git commit: HADOOP-12413. AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. Contributed by Zhihai Xu.

Posted by ec...@apache.org.
HADOOP-12413. AccessControlList should avoid calling getGroupNames in isUserInList with empty groups. Contributed by Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2017d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2017d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2017d9b

Branch: refs/heads/HADOOP-11890
Commit: b2017d9b032af20044fdf60ddbd1575a554ccb79
Parents: 083b44c
Author: cnauroth <cn...@apache.org>
Authored: Tue Sep 15 10:41:50 2015 -0700
Committer: cnauroth <cn...@apache.org>
Committed: Tue Sep 15 10:41:50 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt             | 3 +++
 .../apache/hadoop/security/authorize/AccessControlList.java | 2 +-
 .../hadoop/security/authorize/TestAccessControlList.java    | 9 +++++++++
 3 files changed, 13 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2017d9b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a7ea0aa..fe09120 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -776,6 +776,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12324. Better exception reporting in SaslPlainServer.
     (Mike Yoder via stevel)
 
+    HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+    isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2017d9b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
index f19776f..b1b474b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
@@ -230,7 +230,7 @@ public class AccessControlList implements Writable {
   public final boolean isUserInList(UserGroupInformation ugi) {
     if (allAllowed || users.contains(ugi.getShortUserName())) {
       return true;
-    } else {
+    } else if (!groups.isEmpty()) {
       for(String group: ugi.getGroupNames()) {
         if (groups.contains(group)) {
           return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2017d9b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
index 75b944d..ddf74d1 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
@@ -37,6 +37,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Test;
 
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 public class TestAccessControlList {
@@ -449,6 +453,11 @@ public class TestAccessControlList {
     assertUserAllowed(susan, acl);
     assertUserAllowed(barbara, acl);
     assertUserAllowed(ian, acl);
+
+    acl = new AccessControlList("");
+    UserGroupInformation spyUser = spy(drwho);
+    acl.isUserAllowed(spyUser);
+    verify(spyUser, never()).getGroupNames();
   }
 
   private void assertUserAllowed(UserGroupInformation ugi,


[14/50] [abbrv] hadoop git commit: HDFS-8974. Convert docs in xdoc format to markdown. Contributed by Masatake Iwasaki.

Posted by ec...@apache.org.
HDFS-8974. Convert docs in xdoc format to markdown. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b5b2c58
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b5b2c58
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b5b2c58

Branch: refs/heads/HADOOP-11890
Commit: 7b5b2c5822ac722893ef5db753144f18d5056f5b
Parents: f153710
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Sep 10 16:45:27 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Thu Sep 10 16:45:27 2015 +0900

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../src/site/markdown/HdfsRollingUpgrade.md     | 293 +++++++++++++++++
 .../src/site/markdown/HdfsSnapshots.md          | 301 +++++++++++++++++
 .../src/site/xdoc/HdfsRollingUpgrade.xml        | 329 -------------------
 .../hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml | 303 -----------------
 5 files changed, 597 insertions(+), 632 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5b2c58/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index bbb6066..445c50f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -911,6 +911,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7116. Add a command to get the balancer bandwidth
     (Rakesh R via vinayakumarb)
 
+    HDFS-8974. Convert docs in xdoc format to markdown.
+    (Masatake Iwasaki via aajisaka)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5b2c58/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
new file mode 100644
index 0000000..5415912
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md
@@ -0,0 +1,293 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+HDFS Rolling Upgrade
+====================
+
+* [Introduction](#Introduction)
+* [Upgrade](#Upgrade)
+    * [Upgrade without Downtime](#Upgrade_without_Downtime)
+        * [Upgrading Non-Federated Clusters](#Upgrading_Non-Federated_Clusters)
+        * [Upgrading Federated Clusters](#Upgrading_Federated_Clusters)
+    * [Upgrade with Downtime](#Upgrade_with_Downtime)
+        * [Upgrading Non-HA Clusters](#Upgrading_Non-HA_Clusters)
+* [Downgrade and Rollback](#Downgrade_and_Rollback)
+* [Downgrade](#Downgrade)
+* [Rollback](#Rollback)
+* [Commands and Startup Options for Rolling Upgrade](#Commands_and_Startup_Options_for_Rolling_Upgrade)
+    * [DFSAdmin Commands](#DFSAdmin_Commands)
+        * [dfsadmin -rollingUpgrade](#dfsadmin_-rollingUpgrade)
+        * [dfsadmin -getDatanodeInfo](#dfsadmin_-getDatanodeInfo)
+        * [dfsadmin -shutdownDatanode](#dfsadmin_-shutdownDatanode)
+    * [NameNode Startup Options](#NameNode_Startup_Options)
+        * [namenode -rollingUpgrade](#namenode_-rollingUpgrade)
+
+
+Introduction
+------------
+
+*HDFS rolling upgrade* allows upgrading individual HDFS daemons.
+For examples, the datanodes can be upgraded independent of the namenodes.
+A namenode can be upgraded independent of the other namenodes.
+The namenodes can be upgraded independent of datanods and journal nodes.
+
+
+Upgrade
+-------
+
+In Hadoop v2, HDFS supports highly-available (HA) namenode services and wire compatibility.
+These two capabilities make it feasible to upgrade HDFS without incurring HDFS downtime.
+In order to upgrade a HDFS cluster without downtime, the cluster must be setup with HA.
+
+If there is any new feature which is enabled in new software release, may not work with old software release after upgrade.
+In such cases upgrade should be done by following steps.
+
+1. Disable new feature.
+2. Upgrade the cluster.
+3. Enable the new feature.
+
+Note that rolling upgrade is supported only from Hadoop-2.4.0 onwards.
+
+
+### Upgrade without Downtime
+
+In a HA cluster, there are two or more *NameNodes (NNs)*, many *DataNodes (DNs)*,
+a few *JournalNodes (JNs)* and a few *ZooKeeperNodes (ZKNs)*.
+*JNs* is relatively stable and does not require upgrade when upgrading HDFS in most of the cases.
+In the rolling upgrade procedure described here,
+only *NNs* and *DNs* are considered but *JNs* and *ZKNs* are not.
+Upgrading *JNs* and *ZKNs* may incur cluster downtime.
+
+#### Upgrading Non-Federated Clusters
+
+Suppose there are two namenodes *NN1* and *NN2*,
+where *NN1* and *NN2* are respectively in active and standby states.
+The following are the steps for upgrading a HA cluster:
+
+1. Prepare Rolling Upgrade
+    1. Run "[`hdfs dfsadmin -rollingUpgrade prepare`](#dfsadmin_-rollingUpgrade)"
+       to create a fsimage for rollback.
+    1. Run "[`hdfs dfsadmin -rollingUpgrade query`](#dfsadmin_-rollingUpgrade)"
+       to check the status of the rollback image.
+       Wait and re-run the command until
+       the "`Proceed with rolling upgrade`" message is shown.
+1. Upgrade Active and Standby *NNs*
+    1. Shutdown and upgrade *NN2*.
+    1. Start *NN2* as standby with the
+       "[`-rollingUpgrade started`](#namenode_-rollingUpgrade)" option.
+    1. Failover from *NN1* to *NN2*
+       so that *NN2* becomes active and *NN1* becomes standby.
+    1. Shutdown and upgrade *NN1*.
+    1. Start *NN1* as standby with the
+       "[`-rollingUpgrade started`](#namenode_-rollingUpgrade)" option.
+1. Upgrade *DNs*
+    1. Choose a small subset of datanodes (e.g. all datanodes under a particular rack).
+        1. Run "[`hdfs dfsadmin -shutdownDatanode <DATANODE_HOST:IPC_PORT> upgrade`](#dfsadmin_-shutdownDatanode)"
+           to shutdown one of the chosen datanodes.
+        1. Run "[`hdfs dfsadmin -getDatanodeInfo <DATANODE_HOST:IPC_PORT>`](#dfsadmin_-getDatanodeInfo)"
+           to check and wait for the datanode to shutdown.
+        1. Upgrade and restart the datanode.
+        1. Perform the above steps for all the chosen datanodes in the subset in parallel.
+    1. Repeat the above steps until all datanodes in the cluster are upgraded.
+1. Finalize Rolling Upgrade
+    1. Run "[`hdfs dfsadmin -rollingUpgrade finalize`](#dfsadmin_-rollingUpgrade)"
+       to finalize the rolling upgrade.
+
+
+#### Upgrading Federated Clusters
+
+In a federated cluster, there are multiple namespaces
+and a pair of active and standby *NNs* for each namespace.
+The procedure for upgrading a federated cluster is similar to upgrading a non-federated cluster
+except that Step 1 and Step 4 are performed on each namespace
+and Step 2 is performed on each pair of active and standby *NNs*, i.e.
+
+1. Prepare Rolling Upgrade for Each Namespace
+1. Upgrade Active and Standby *NN* pairs for Each Namespace
+1. Upgrade *DNs*
+1. Finalize Rolling Upgrade for Each Namespace
+
+
+### Upgrade with Downtime
+
+For non-HA clusters,
+it is impossible to upgrade HDFS without downtime since it requires restarting the namenodes.
+However, datanodes can still be upgraded in a rolling manner.
+
+
+#### Upgrading Non-HA Clusters
+
+In a non-HA cluster, there are a *NameNode (NN)*, a *SecondaryNameNode (SNN)*
+and many *DataNodes (DNs)*.
+The procedure for upgrading a non-HA cluster is similar to upgrading a HA cluster
+except that Step 2 "Upgrade Active and Standby *NNs*" is changed to below:
+
+* Upgrade *NN* and *SNN*
+    1. Shutdown *SNN*
+    1. Shutdown and upgrade *NN*.
+    1. Start *NN* with the
+       "[`-rollingUpgrade started`](#namenode_-rollingUpgrade)" option.
+    1. Upgrade and restart *SNN*
+
+
+Downgrade and Rollback
+----------------------
+
+When the upgraded release is undesirable
+or, in some unlikely case, the upgrade fails (due to bugs in the newer release),
+administrators may choose to downgrade HDFS back to the pre-upgrade release,
+or rollback HDFS to the pre-upgrade release and the pre-upgrade state.
+
+Note that downgrade can be done in a rolling fashion but rollback cannot.
+Rollback requires cluster downtime.
+
+Note also that downgrade and rollback are possible only after a rolling upgrade is started and
+before the upgrade is terminated.
+An upgrade can be terminated by either finalize, downgrade or rollback.
+Therefore, it may not be possible to perform rollback after finalize or downgrade,
+or to perform downgrade after finalize.
+
+
+Downgrade
+---------
+
+*Downgrade* restores the software back to the pre-upgrade release
+and preserves the user data.
+Suppose time *T* is the rolling upgrade start time and the upgrade is terminated by downgrade.
+Then, the files created before or after *T* remain available in HDFS.
+The files deleted before or after *T* remain deleted in HDFS.
+
+A newer release is downgradable to the pre-upgrade release
+only if both the namenode layout version and the datenode layout version
+are not changed between these two releases.
+
+In a HA cluster,
+when a rolling upgrade from an old software release to a new software release is in progress,
+it is possible to downgrade, in a rolling fashion, the upgraded machines back to the old software release.
+Same as before, suppose *NN1* and *NN2* are respectively in active and standby states.
+Below are the steps for rolling downgrade without downtime:
+
+1. Downgrade *DNs*
+    1. Choose a small subset of datanodes (e.g. all datanodes under a particular rack).
+        1. Run "[`hdfs dfsadmin -shutdownDatanode <DATANODE_HOST:IPC_PORT> upgrade`](#dfsadmin_-shutdownDatanode)"
+           to shutdown one of the chosen datanodes.
+        1. Run "[`hdfs dfsadmin -getDatanodeInfo <DATANODE_HOST:IPC_PORT>`](#dfsadmin_-getDatanodeInfo)"
+           to check and wait for the datanode to shutdown.
+        1. Downgrade and restart the datanode.
+        1. Perform the above steps for all the chosen datanodes in the subset in parallel.
+    1. Repeat the above steps until all upgraded datanodes in the cluster are downgraded.
+1. Downgrade Active and Standby *NNs*
+    1. Shutdown and downgrade *NN2*.
+    1. Start *NN2* as standby normally.
+    1. Failover from *NN1* to *NN2*
+       so that *NN2* becomes active and *NN1* becomes standby.
+    1. Shutdown and upgrade *NN1*.
+    1. Start *NN1* as standby normally.
+1. Finalize Rolling Downgrade
+    1. Run "[`hdfs dfsadmin -rollingUpgrade finalize`](#dfsadmin_-rollingUpgrade)"
+       to finalize the rolling downgrade.
+
+Note that the datanodes must be downgraded before downgrading the namenodes
+since protocols may be changed in a backward compatible manner but not forward compatible,
+i.e. old datanodes can talk to the new namenodes but not vice versa.
+
+
+Rollback
+--------
+
+*Rollback* restores the software back to the pre-upgrade release
+but also reverts the user data back to the pre-upgrade state.
+Suppose time *T* is the rolling upgrade start time and the upgrade is terminated by rollback.
+The files created before *T* remain available in HDFS but the files created after *T* become unavailable.
+The files deleted before *T* remain deleted in HDFS but the files deleted after *T* are restored.
+
+Rollback from a newer release to the pre-upgrade release is always supported.
+However, it cannot be done in a rolling fashion.  It requires cluster downtime.
+Suppose *NN1* and *NN2* are respectively in active and standby states.
+Below are the steps for rollback:
+
+* Rollback HDFS
+    1. Shutdown all *NNs* and *DNs*.
+    1. Restore the pre-upgrade release in all machines.
+    1. Start *NN1* as Active with the
+       "[`-rollingUpgrade rollback`](#namenode_-rollingUpgrade)" option.
+    1. Run `-bootstrapStandby' on NN2 and start it normally as standby.
+    1. Start *DNs* with the "`-rollback`" option.
+
+
+Commands and Startup Options for Rolling Upgrade
+------------------------------------------------
+
+### DFSAdmin Commands
+
+#### `dfsadmin -rollingUpgrade`
+
+    hdfs dfsadmin -rollingUpgrade <query|prepare|finalize>
+
+Execute a rolling upgrade action.
+
+* Options:
+
+    | --- | --- |
+    | `query` | Query the current rolling upgrade status. |
+    | `prepare` | Prepare a new rolling upgrade. |
+    | `finalize` | Finalize the current rolling upgrade. |
+
+
+#### `dfsadmin -getDatanodeInfo`
+
+    hdfs dfsadmin -getDatanodeInfo <DATANODE_HOST:IPC_PORT>
+
+Get the information about the given datanode.
+This command can be used for checking if a datanode is alive
+like the Unix `ping` command.
+
+
+#### `dfsadmin -shutdownDatanode`
+
+    hdfs dfsadmin -shutdownDatanode <DATANODE_HOST:IPC_PORT> [upgrade]
+
+Submit a shutdown request for the given datanode.
+If the optional `upgrade` argument is specified,
+clients accessing the datanode will be advised to wait for it to restart
+and the fast start-up mode will be enabled.
+When the restart does not happen in time, clients will timeout and ignore the datanode.
+In such case, the fast start-up mode will also be disabled.
+
+Note that the command does not wait for the datanode shutdown to complete.
+The "[`dfsadmin -getDatanodeInfo`](#dfsadmin_-getDatanodeInfo)"
+command can be used for checking if the datanode shutdown is completed.
+
+
+### NameNode Startup Options
+
+#### `namenode -rollingUpgrade`
+
+    hdfs namenode -rollingUpgrade <rollback|started>
+
+When a rolling upgrade is in progress,
+the `-rollingUpgrade` namenode startup option is used to specify
+various rolling upgrade options.
+
+* Options:
+
+    | --- | --- |
+    | `rollback` | Restores the namenode back to the pre-upgrade release but also reverts the user data back to the pre-upgrade state. |
+    | `started` | Specifies a rolling upgrade already started so that the namenode should allow image directories with different layout versions during startup. |
+
+**WARN: downgrade options is obsolete.**
+It is not necessary to start namenode with downgrade options explicitly.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5b2c58/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
new file mode 100644
index 0000000..94a37cd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsSnapshots.md
@@ -0,0 +1,301 @@
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+HDFS Snapshots
+==============
+
+* [HDFS Snapshots](#HDFS_Snapshots)
+    * [Overview](#Overview)
+        * [Snapshottable Directories](#Snapshottable_Directories)
+        * [Snapshot Paths](#Snapshot_Paths)
+    * [Upgrading to a version of HDFS with snapshots](#Upgrading_to_a_version_of_HDFS_with_snapshots)
+    * [Snapshot Operations](#Snapshot_Operations)
+        * [Administrator Operations](#Administrator_Operations)
+            * [Allow Snapshots](#Allow_Snapshots)
+            * [Disallow Snapshots](#Disallow_Snapshots)
+        * [User Operations](#User_Operations)
+            * [Create Snapshots](#Create_Snapshots)
+            * [Delete Snapshots](#Delete_Snapshots)
+            * [Rename Snapshots](#Rename_Snapshots)
+            * [Get Snapshottable Directory Listing](#Get_Snapshottable_Directory_Listing)
+            * [Get Snapshots Difference Report](#Get_Snapshots_Difference_Report)
+
+
+Overview
+--------
+
+HDFS Snapshots are read-only point-in-time copies of the file system.
+Snapshots can be taken on a subtree of the file system or the entire file system.
+Some common use cases of snapshots are data backup, protection against user errors
+and disaster recovery.
+
+The implementation of HDFS Snapshots is efficient:
+
+
+* Snapshot creation is instantaneous:
+  the cost is *O(1)* excluding the inode lookup time.
+
+* Additional memory is used only when modifications are made relative to a snapshot:
+  memory usage is *O(M)*,
+  where *M* is the number of modified files/directories.
+
+* Blocks in datanodes are not copied:
+  the snapshot files record the block list and the file size.
+  There is no data copying.
+
+* Snapshots do not adversely affect regular HDFS operations:
+  modifications are recorded in reverse chronological order
+  so that the current data can be accessed directly.
+  The snapshot data is computed by subtracting the modifications
+  from the current data.
+
+
+### Snapshottable Directories
+
+Snapshots can be taken on any directory once the directory has been set as
+*snapshottable*.
+A snapshottable directory is able to accommodate 65,536 simultaneous snapshots.
+There is no limit on the number of snapshottable directories.
+Administrators may set any directory to be snapshottable.
+If there are snapshots in a snapshottable directory,
+the directory can be neither deleted nor renamed
+before all the snapshots are deleted.
+
+Nested snapshottable directories are currently not allowed.
+In other words, a directory cannot be set to snapshottable
+if one of its ancestors/descendants is a snapshottable directory.
+
+
+### Snapshot Paths
+
+For a snapshottable directory,
+the path component *".snapshot"* is used for accessing its snapshots.
+Suppose `/foo` is a snapshottable directory,
+`/foo/bar` is a file/directory in `/foo`,
+and `/foo` has a snapshot `s0`.
+Then, the path `/foo/.snapshot/s0/bar`
+refers to the snapshot copy of `/foo/bar`.
+The usual API and CLI can work with the ".snapshot" paths.
+The following are some examples.
+
+* Listing all the snapshots under a snapshottable directory:
+
+        hdfs dfs -ls /foo/.snapshot
+
+* Listing the files in snapshot `s0`:
+
+        hdfs dfs -ls /foo/.snapshot/s0
+
+* Copying a file from snapshot `s0`:
+
+        hdfs dfs -cp -ptopax /foo/.snapshot/s0/bar /tmp
+
+    Note that this example uses the preserve option to preserve
+    timestamps, ownership, permission, ACLs and XAttrs.
+
+
+Upgrading to a version of HDFS with snapshots
+---------------------------------------------
+
+The HDFS snapshot feature introduces a new reserved path name used to
+interact with snapshots: `.snapshot`. When upgrading from an
+older version of HDFS, existing paths named `.snapshot` need
+to first be renamed or deleted to avoid conflicting with the reserved path.
+See the upgrade section in
+[the HDFS user guide](HdfsUserGuide.html#Upgrade_and_Rollback)
+for more information.
+
+
+Snapshot Operations
+-------------------
+
+
+### Administrator Operations
+
+The operations described in this section require superuser privilege.
+
+
+#### Allow Snapshots
+
+
+Allowing snapshots of a directory to be created.
+If the operation completes successfully, the directory becomes snapshottable.
+
+* Command:
+
+        hdfs dfsadmin -allowSnapshot <path>
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+
+See also the corresponding Java API
+`void allowSnapshot(Path path)` in `HdfsAdmin`.
+
+
+#### Disallow Snapshots
+
+Disallowing snapshots of a directory to be created.
+All snapshots of the directory must be deleted before disallowing snapshots.
+
+* Command:
+
+        hdfs dfsadmin -disallowSnapshot <path>
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+
+See also the corresponding Java API
+`void disallowSnapshot(Path path)` in `HdfsAdmin`.
+
+
+### User Operations
+
+The section describes user operations.
+Note that HDFS superuser can perform all the operations
+without satisfying the permission requirement in the individual operations.
+
+
+#### Create Snapshots
+
+Create a snapshot of a snapshottable directory.
+This operation requires owner privilege of the snapshottable directory.
+
+* Command:
+
+        hdfs dfs -createSnapshot <path> [<snapshotName>]
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+    | snapshotName | The snapshot name, which is an optional argument. When it is omitted, a default name is generated using a timestamp with the format `"'s'yyyyMMdd-HHmmss.SSS"`, e.g. `"s20130412-151029.033"`. |
+
+See also the corresponding Java API
+`Path createSnapshot(Path path)` and
+`Path createSnapshot(Path path, String snapshotName)`
+in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html)
+The snapshot path is returned in these methods.
+
+
+#### Delete Snapshots
+
+Delete a snapshot of from a snapshottable directory.
+This operation requires owner privilege of the snapshottable directory.
+
+* Command:
+
+        hdfs dfs -deleteSnapshot <path> <snapshotName>
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+    | snapshotName | The snapshot name. |
+
+See also the corresponding Java API
+`void deleteSnapshot(Path path, String snapshotName)`
+in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).
+
+
+#### Rename Snapshots
+
+Rename a snapshot.
+This operation requires owner privilege of the snapshottable directory.
+
+* Command:
+
+        hdfs dfs -renameSnapshot <path> <oldName> <newName>
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+    | oldName | The old snapshot name. |
+    | newName | The new snapshot name. |
+
+See also the corresponding Java API
+`void renameSnapshot(Path path, String oldName, String newName)`
+in [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).
+
+
+#### Get Snapshottable Directory Listing
+
+Get all the snapshottable directories where the current user has permission to take snapshtos.
+
+* Command:
+
+        hdfs lsSnapshottableDir
+
+* Arguments: none
+
+See also the corresponding Java API
+`SnapshottableDirectoryStatus[] getSnapshottableDirectoryListing()`
+in `DistributedFileSystem`.
+
+
+#### Get Snapshots Difference Report
+
+Get the differences between two snapshots.
+This operation requires read access privilege for all files/directories in both snapshots.
+
+* Command:
+
+        hdfs snapshotDiff <path> <fromSnapshot> <toSnapshot>
+
+* Arguments:
+
+    | --- | --- |
+    | path | The path of the snapshottable directory. |
+    | fromSnapshot | The name of the starting snapshot. |
+    | toSnapshot | The name of the ending snapshot. |
+
+    Note that snapshotDiff can be used to get the difference report between two snapshots, or between
+    a snapshot and the current status of a directory. Users can use "." to represent the current status.
+
+* Results:
+
+    | --- | --- |
+    | \+  | The file/directory has been created. |
+    | \-  | The file/directory has been deleted. |
+    | M   | The file/directory has been modified. |
+    | R   | The file/directory has been renamed. |
+
+A *RENAME* entry indicates a file/directory has been renamed but
+is still under the same snapshottable directory. A file/directory is
+reported as deleted if it was renamed to outside of the snapshottble directory.
+A file/directory renamed from outside of the snapshottble directory is
+reported as newly created.
+
+The snapshot difference report does not guarantee the same operation sequence.
+For example, if we rename the directory *"/foo"* to *"/foo2"*, and
+then append new data to the file *"/foo2/bar"*, the difference report will
+be:
+
+    R. /foo -> /foo2
+    M. /foo/bar
+
+I.e., the changes on the files/directories under a renamed directory is
+reported using the original path before the rename (*"/foo/bar"* in
+the above example).
+
+See also the corresponding Java API
+`SnapshotDiffReport getSnapshotDiffReport(Path path, String fromSnapshot, String toSnapshot)`
+in `DistributedFileSystem`.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5b2c58/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
deleted file mode 100644
index f0b0ccf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsRollingUpgrade.xml
+++ /dev/null
@@ -1,329 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<document xmlns="http://maven.apache.org/XDOC/2.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
-
-  <properties>
-    <title>HDFS Rolling Upgrade</title>
-  </properties>
-
-  <body>
-
-  <h1>HDFS Rolling Upgrade</h1>
-  <macro name="toc">
-    <param name="section" value="0"/>
-    <param name="fromDepth" value="0"/>
-    <param name="toDepth" value="4"/>
-  </macro>
-
-  <section name="Introduction" id="Introduction">
-  <p>
-    <em>HDFS rolling upgrade</em> allows upgrading individual HDFS daemons.
-    For examples, the datanodes can be upgraded independent of the namenodes.
-    A namenode can be upgraded independent of the other namenodes.
-    The namenodes can be upgraded independent of datanods and journal nodes.
-  </p>
-  </section>
-
-  <section name="Upgrade" id="Upgrade">
-  <p>
-    In Hadoop v2, HDFS supports highly-available (HA) namenode services and wire compatibility.
-    These two capabilities make it feasible to upgrade HDFS without incurring HDFS downtime.
-    In order to upgrade a HDFS cluster without downtime, the cluster must be setup with HA.
-  </p>
-  <p>
-    If there is any new feature which is enabled in new software release, may not work with old software release after upgrade.
-    In such cases upgrade should be done by following steps.
-  </p>
-  <ol>
-    <li>Disable new feature.</li>
-    <li>Upgrade the cluster.</li>
-    <li>Enable the new feature.</li>
-  </ol>
-  <p>
-    Note that rolling upgrade is supported only from Hadoop-2.4.0 onwards.
-  </p>
-  <subsection name="Upgrade without Downtime" id="UpgradeWithoutDowntime">
-  <p>
-    In a HA cluster, there are two or more <em>NameNodes (NNs)</em>, many <em>DataNodes (DNs)</em>,
-    a few <em>JournalNodes (JNs)</em> and a few <em>ZooKeeperNodes (ZKNs)</em>.
-    <em>JNs</em> is relatively stable and does not require upgrade when upgrading HDFS in most of the cases.
-    In the rolling upgrade procedure described here,
-    only <em>NNs</em> and <em>DNs</em> are considered but <em>JNs</em> and <em>ZKNs</em> are not.
-    Upgrading <em>JNs</em> and <em>ZKNs</em> may incur cluster downtime.
-  </p>
-
-  <h4>Upgrading Non-Federated Clusters</h4>
-  <p>
-    Suppose there are two namenodes <em>NN1</em> and <em>NN2</em>,
-    where <em>NN1</em> and <em>NN2</em> are respectively in active and standby states.
-    The following are the steps for upgrading a HA cluster:
-  </p>
-  <ol>
-    <li>Prepare Rolling Upgrade<ol>
-      <li>Run "<code><a href="#dfsadmin_-rollingUpgrade">hdfs dfsadmin -rollingUpgrade prepare</a></code>"
-        to create a fsimage for rollback.
-      </li>
-      <li>Run "<code><a href="#dfsadmin_-rollingUpgrade">hdfs dfsadmin -rollingUpgrade query</a></code>"
-        to check the status of the rollback image.
-        Wait and re-run the command until
-        the "<tt>Proceed with rolling upgrade</tt>" message is shown.
-      </li>
-    </ol></li>
-    <li>Upgrade Active and Standby <em>NNs</em><ol>
-      <li>Shutdown and upgrade <em>NN2</em>.</li>
-      <li>Start <em>NN2</em> as standby with the
-        "<a href="#namenode_-rollingUpgrade"><code>-rollingUpgrade started</code></a>" option.</li>
-      <li>Failover from <em>NN1</em> to <em>NN2</em>
-               so that <em>NN2</em> becomes active and <em>NN1</em> becomes standby.</li>
-      <li>Shutdown and upgrade <em>NN1</em>.</li>
-      <li>Start <em>NN1</em> as standby with the
-        "<a href="#namenode_-rollingUpgrade"><code>-rollingUpgrade started</code></a>" option.</li>
-    </ol></li>
-    <li>Upgrade <em>DNs</em><ol>
-      <li>Choose a small subset of datanodes (e.g. all datanodes under a particular rack).</li>
-      <ol>
-        <li>Run "<code><a href="#dfsadmin_-shutdownDatanode">hdfs dfsadmin -shutdownDatanode &lt;DATANODE_HOST:IPC_PORT&gt; upgrade</a></code>"
-          to shutdown one of the chosen datanodes.</li>
-        <li>Run "<code><a href="#dfsadmin_-getDatanodeInfo">hdfs dfsadmin -getDatanodeInfo &lt;DATANODE_HOST:IPC_PORT&gt;</a></code>"
-          to check and wait for the datanode to shutdown.</li>
-        <li>Upgrade and restart the datanode.</li>
-        <li>Perform the above steps for all the chosen datanodes in the subset in parallel.</li>
-      </ol>
-      <li>Repeat the above steps until all datanodes in the cluster are upgraded.</li>
-    </ol></li>
-    <li>Finalize Rolling Upgrade<ul>
-      <li>Run "<code><a href="#dfsadmin_-rollingUpgrade">hdfs dfsadmin -rollingUpgrade finalize</a></code>"
-       to finalize the rolling upgrade.</li>
-    </ul></li>
-  </ol>
-
-  <h4>Upgrading Federated Clusters</h4>
-  <p>
-    In a federated cluster, there are multiple namespaces
-    and a pair of active and standby <em>NNs</em> for each namespace.
-    The procedure for upgrading a federated cluster is similar to upgrading a non-federated cluster
-    except that Step 1 and Step 4 are performed on each namespace
-    and Step 2 is performed on each pair of active and standby <em>NNs</em>, i.e.
-  </p>
-  <ol>
-    <li>Prepare Rolling Upgrade for Each Namespace</li>
-    <li>Upgrade Active and Standby <em>NN</em> pairs for Each Namespace</li>
-    <li>Upgrade <em>DNs</em></li>
-    <li>Finalize Rolling Upgrade for Each Namespace</li>
-  </ol>
-
-  </subsection>
-
-  <subsection name="Upgrade with Downtime" id="UpgradeWithDowntime">
-  <p>
-    For non-HA clusters,
-    it is impossible to upgrade HDFS without downtime since it requires restarting the namenodes.
-    However, datanodes can still be upgraded in a rolling manner.
-  </p>
-
-  <h4>Upgrading Non-HA Clusters</h4>
-  <p>
-    In a non-HA cluster, there are a <em>NameNode (NN)</em>, a <em>SecondaryNameNode (SNN)</em>
-    and many <em>DataNodes (DNs)</em>.
-    The procedure for upgrading a non-HA cluster is similar to upgrading a HA cluster
-    except that Step 2 "Upgrade Active and Standby <em>NNs</em>" is changed to below:
-  </p>
-  <ul>
-    <li>Upgrade <em>NN</em> and <em>SNN</em><ol>
-      <li>Shutdown <em>SNN</em></li>
-      <li>Shutdown and upgrade <em>NN</em>.</li>
-      <li>Start <em>NN</em> with the
-        "<a href="#namenode_-rollingUpgrade"><code>-rollingUpgrade started</code></a>" option.</li>
-      <li>Upgrade and restart <em>SNN</em></li>
-    </ol></li>
-  </ul>
-  </subsection>
-  </section>
-
-  <section name="Downgrade and Rollback" id="DowngradeAndRollback">
-  <p>
-    When the upgraded release is undesirable
-    or, in some unlikely case, the upgrade fails (due to bugs in the newer release),
-    administrators may choose to downgrade HDFS back to the pre-upgrade release,
-    or rollback HDFS to the pre-upgrade release and the pre-upgrade state.
-  </p>
-  <p>
-    Note that downgrade can be done in a rolling fashion but rollback cannot.
-    Rollback requires cluster downtime.
-  </p>
-  <p>
-    Note also that downgrade and rollback are possible only after a rolling upgrade is started and
-    before the upgrade is terminated.
-    An upgrade can be terminated by either finalize, downgrade or rollback.
-    Therefore, it may not be possible to perform rollback after finalize or downgrade,
-    or to perform downgrade after finalize.
-  </p>
-  </section>
-
-  <section name="Downgrade" id="Downgrade">
-  <p>
-    <em>Downgrade</em> restores the software back to the pre-upgrade release
-    and preserves the user data.
-    Suppose time <em>T</em> is the rolling upgrade start time and the upgrade is terminated by downgrade.
-    Then, the files created before or after <em>T</em> remain available in HDFS.
-    The files deleted before or after <em>T</em> remain deleted in HDFS.
-  </p>
-  <p>
-    A newer release is downgradable to the pre-upgrade release
-    only if both the namenode layout version and the datenode layout version
-    are not changed between these two releases.
-  </p>
-  <p>
-    In a HA cluster,
-    when a rolling upgrade from an old software release to a new software release is in progress,
-    it is possible to downgrade, in a rolling fashion, the upgraded machines back to the old software release.
-    Same as before, suppose <em>NN1</em> and <em>NN2</em> are respectively in active and standby states.
-    Below are the steps for rolling downgrade without downtime:
-  </p>
-  <ol>
-    <li>Downgrade <em>DNs</em><ol>
-      <li>Choose a small subset of datanodes (e.g. all datanodes under a particular rack).</li>
-      <ol>
-        <li>Run "<code><a href="#dfsadmin_-shutdownDatanode">hdfs dfsadmin -shutdownDatanode &lt;DATANODE_HOST:IPC_PORT&gt; upgrade</a></code>"
-          to shutdown one of the chosen datanodes.</li>
-        <li>Run "<code><a href="#dfsadmin_-getDatanodeInfo">hdfs dfsadmin -getDatanodeInfo &lt;DATANODE_HOST:IPC_PORT&gt;</a></code>"
-          to check and wait for the datanode to shutdown.</li>
-        <li>Downgrade and restart the datanode.</li>
-        <li>Perform the above steps for all the chosen datanodes in the subset in parallel.</li>
-      </ol>
-      <li>Repeat the above steps until all upgraded datanodes in the cluster are downgraded.</li>
-    </ol></li>
-    <li>Downgrade Active and Standby <em>NNs</em><ol>
-      <li>Shutdown and downgrade <em>NN2</em>.</li>
-      <li>Start <em>NN2</em> as standby normally.
-      </li>
-      <li>Failover from <em>NN1</em> to <em>NN2</em>
-        so that <em>NN2</em> becomes active and <em>NN1</em> becomes standby.</li>
-      <li>Shutdown and upgrade <em>NN1</em>.</li>
-      <li>Start <em>NN1</em> as standby normally.
-      </li>
-    </ol></li>
-    <li>Finalize Rolling Downgrade<ul>
-      <li>Run "<code><a href="#dfsadmin_-rollingUpgrade">hdfs dfsadmin -rollingUpgrade finalize</a></code>"
-       to finalize the rolling downgrade.</li>
-    </ul></li>
-  </ol>
-  <p>
-    Note that the datanodes must be downgraded before downgrading the namenodes
-    since protocols may be changed in a backward compatible manner but not forward compatible,
-    i.e. old datanodes can talk to the new namenodes but not vice versa.
-  </p>
-  </section>
-
-  <section name="Rollback" id="Rollback">
-  <p>
-    <em>Rollback</em> restores the software back to the pre-upgrade release
-    but also reverts the user data back to the pre-upgrade state.
-    Suppose time <em>T</em> is the rolling upgrade start time and the upgrade is terminated by rollback.
-    The files created before <em>T</em> remain available in HDFS but the files created after <em>T</em> become unavailable.
-    The files deleted before <em>T</em> remain deleted in HDFS but the files deleted after <em>T</em> are restored.
-  </p>
-  <p>
-    Rollback from a newer release to the pre-upgrade release is always supported.
-    However, it cannot be done in a rolling fashion.  It requires cluster downtime.
-    Suppose <em>NN1</em> and <em>NN2</em> are respectively in active and standby states.
-    Below are the steps for rollback:
-  </p>
-  <ul>
-    <li>Rollback HDFS<ol>
-      <li>Shutdown all <em>NNs</em> and <em>DNs</em>.</li>
-      <li>Restore the pre-upgrade release in all machines.</li>
-      <li>Start <em>NN1</em> as Active with the
-        "<a href="#namenode_-rollingUpgrade"><code>-rollingUpgrade rollback</code></a>" option.</li>
-      <li>Run `-bootstrapStandby' on NN2 and start it normally as standby.</li>
-      <li>Start <em>DNs</em> with the "<code>-rollback</code>" option.</li>
-    </ol></li>
-  </ul>
-
-  </section>
-  
-  <section name="Commands and Startup Options for Rolling Upgrade" id="dfsadminCommands">
-    
-  <subsection name="DFSAdmin Commands" id="dfsadminCommands">
-  <h4><code>dfsadmin -rollingUpgrade</code></h4>
-  <source>hdfs dfsadmin -rollingUpgrade &lt;query|prepare|finalize&gt;</source>
-  <p>
-    Execute a rolling upgrade action.
-    <ul><li>Options:<table>
-      <tr><td><code>query</code></td><td>Query the current rolling upgrade status.</td></tr>
-      <tr><td><code>prepare</code></td><td>Prepare a new rolling upgrade.</td></tr>
-      <tr><td><code>finalize</code></td><td>Finalize the current rolling upgrade.</td></tr>
-    </table></li></ul>
-  </p>
-
-  <h4><code>dfsadmin -getDatanodeInfo</code></h4>
-  <source>hdfs dfsadmin -getDatanodeInfo &lt;DATANODE_HOST:IPC_PORT&gt;</source>
-  <p>
-    Get the information about the given datanode.
-    This command can be used for checking if a datanode is alive
-    like the Unix <code>ping</code> command.
-  </p>
-
-  <h4><code>dfsadmin -shutdownDatanode</code></h4>
-  <source>hdfs dfsadmin -shutdownDatanode &lt;DATANODE_HOST:IPC_PORT&gt; [upgrade]</source>
-  <p>
-    Submit a shutdown request for the given datanode.
-    If the optional <code>upgrade</code> argument is specified,
-    clients accessing the datanode will be advised to wait for it to restart
-    and the fast start-up mode will be enabled.
-    When the restart does not happen in time, clients will timeout and ignore the datanode.
-    In such case, the fast start-up mode will also be disabled.
-  </p>
-  <p>
-    Note that the command does not wait for the datanode shutdown to complete.
-    The "<a href="#dfsadmin_-getDatanodeInfo">dfsadmin -getDatanodeInfo</a>"
-   command can be used for checking if the datanode shutdown is completed.
-  </p>
-  </subsection>
-
-  <subsection name="NameNode Startup Options" id="dfsadminCommands">
-
-  <h4><code>namenode -rollingUpgrade</code></h4>
-  <source>hdfs namenode -rollingUpgrade &lt;rollback|started&gt;</source>
-  <p>
-    When a rolling upgrade is in progress,
-    the <code>-rollingUpgrade</code> namenode startup option is used to specify
-    various rolling upgrade options.
-  </p>
-    <ul><li>Options:<table>
-      <tr><td><code>rollback</code></td>
-        <td>Restores the namenode back to the pre-upgrade release
-            but also reverts the user data back to the pre-upgrade state.</td>
-      </tr>
-      <tr><td><code>started</code></td>
-        <td>Specifies a rolling upgrade already started
-          so that the namenode should allow image directories
-          with different layout versions during startup.</td>
-      </tr>
-    </table></li></ul>
-  <p>
-    <b>WARN: downgrade options is obsolete.</b>
-      It is not necessary to start namenode with downgrade options explicitly.
-  </p>
-  </subsection>
-
-  </section>
-  </body>
-</document>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b5b2c58/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml b/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
deleted file mode 100644
index 330d00f..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
+++ /dev/null
@@ -1,303 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-      http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-<document xmlns="http://maven.apache.org/XDOC/2.0"
-  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-  xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
-
-  <properties>
-    <title>HDFS Snapshots</title>
-  </properties>
-
-  <body>
-
-  <h1>HDFS Snapshots</h1>
-  <macro name="toc">
-    <param name="section" value="0"/>
-    <param name="fromDepth" value="0"/>
-    <param name="toDepth" value="4"/>
-  </macro>
-
-  <section name="Overview" id="Overview">
-  <p>
-    HDFS Snapshots are read-only point-in-time copies of the file system.
-    Snapshots can be taken on a subtree of the file system or the entire file system.
-    Some common use cases of snapshots are data backup, protection against user errors
-    and disaster recovery.
-  </p>
-
-  <p>
-    The implementation of HDFS Snapshots is efficient:
-  </p>
-  <ul>
-    <li>Snapshot creation is instantaneous:
-        the cost is <em>O(1)</em> excluding the inode lookup time.</li>
-    <li>Additional memory is used only when modifications are made relative to a snapshot:
-        memory usage is <em>O(M)</em>,
-        where <em>M</em> is the number of modified files/directories.</li>
-    <li>Blocks in datanodes are not copied:
-        the snapshot files record the block list and the file size.
-        There is no data copying.</li>
-    <li>Snapshots do not adversely affect regular HDFS operations:
-        modifications are recorded in reverse chronological order
-        so that the current data can be accessed directly.
-        The snapshot data is computed by subtracting the modifications
-        from the current data.</li>
-  </ul>
-
-  <subsection name="Snapshottable Directories" id="SnapshottableDirectories">
-  <p>
-    Snapshots can be taken on any directory once the directory has been set as
-    <em>snapshottable</em>.
-    A snapshottable directory is able to accommodate 65,536 simultaneous snapshots.
-    There is no limit on the number of snapshottable directories.
-    Administrators may set any directory to be snapshottable.
-    If there are snapshots in a snapshottable directory,
-    the directory can be neither deleted nor renamed
-    before all the snapshots are deleted.
-  </p>
-
-  <p>
-    Nested snapshottable directories are currently not allowed.
-    In other words, a directory cannot be set to snapshottable
-    if one of its ancestors/descendants is a snapshottable directory.
-  </p>
-
-  </subsection>
-
-  <subsection name="Snapshot Paths" id="SnapshotPaths">
-  <p>
-    For a snapshottable directory,
-    the path component <em>".snapshot"</em> is used for accessing its snapshots.
-    Suppose <code>/foo</code> is a snapshottable directory,
-    <code>/foo/bar</code> is a file/directory in <code>/foo</code>,
-    and <code>/foo</code> has a snapshot <code>s0</code>.
-    Then, the path <source>/foo/.snapshot/s0/bar</source>
-    refers to the snapshot copy of <code>/foo/bar</code>.
-    The usual API and CLI can work with the ".snapshot" paths.
-    The following are some examples.
-  </p>
-  <ul>
-    <li>Listing all the snapshots under a snapshottable directory:
-      <source>hdfs dfs -ls /foo/.snapshot</source></li>
-    <li>Listing the files in snapshot <code>s0</code>:
-      <source>hdfs dfs -ls /foo/.snapshot/s0</source></li>
-    <li>Copying a file from snapshot <code>s0</code>:
-      <source>hdfs dfs -cp -ptopax /foo/.snapshot/s0/bar /tmp</source>
-      <p>Note that this example uses the preserve option to preserve
-         timestamps, ownership, permission, ACLs and XAttrs.</p></li>
-  </ul>
-  </subsection>
-  </section>
-
-  <section name="Upgrading to a version of HDFS with snapshots" id="Upgrade">
-
-  <p>
-    The HDFS snapshot feature introduces a new reserved path name used to
-    interact with snapshots: <tt>.snapshot</tt>. When upgrading from an
-    older version of HDFS, existing paths named <tt>.snapshot</tt> need
-    to first be renamed or deleted to avoid conflicting with the reserved path.
-    See the upgrade section in
-    <a href="HdfsUserGuide.html#Upgrade_and_Rollback">the HDFS user guide</a>
-    for more information.  </p>
-
-  </section>
-
-  <section name="Snapshot Operations" id="SnapshotOperations">
-  <subsection name="Administrator Operations" id="AdministratorOperations">
-  <p>
-    The operations described in this section require superuser privilege.
-  </p>
-
-  <h4>Allow Snapshots</h4>
-  <p>
-    Allowing snapshots of a directory to be created.
-    If the operation completes successfully, the directory becomes snapshottable.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs dfsadmin -allowSnapshot &lt;path&gt;</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-    </table></li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>void allowSnapshot(Path path)</code> in <code>HdfsAdmin</code>.
-  </p>
-
-  <h4>Disallow Snapshots</h4>
-  <p>
-    Disallowing snapshots of a directory to be created.
-    All snapshots of the directory must be deleted before disallowing snapshots.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs dfsadmin -disallowSnapshot &lt;path&gt;</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-    </table></li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>void disallowSnapshot(Path path)</code> in <code>HdfsAdmin</code>.
-  </p>
-  </subsection>
-
-  <subsection name="User Operations" id="UserOperations">
-  <p>
-    The section describes user operations.
-    Note that HDFS superuser can perform all the operations
-    without satisfying the permission requirement in the individual operations.
-  </p>
-
-  <h4>Create Snapshots</h4>
-  <p>
-    Create a snapshot of a snapshottable directory.
-    This operation requires owner privilege of the snapshottable directory.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs dfs -createSnapshot &lt;path&gt; [&lt;snapshotName&gt;]</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-      <tr><td>snapshotName</td><td>
-        The snapshot name, which is an optional argument.
-        When it is omitted, a default name is generated using a timestamp with the format
-        <code>"'s'yyyyMMdd-HHmmss.SSS"</code>, e.g. "s20130412-151029.033".
-      </td></tr>
-    </table></li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>Path createSnapshot(Path path)</code> and
-    <code>Path createSnapshot(Path path, String snapshotName)</code>
-    in <a href="../../api/org/apache/hadoop/fs/FileSystem.html"><code>FileSystem</code></a>.
-    The snapshot path is returned in these methods.
-  </p>
-
-  <h4>Delete Snapshots</h4>
-  <p>
-    Delete a snapshot of from a snapshottable directory.
-    This operation requires owner privilege of the snapshottable directory.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs dfs -deleteSnapshot &lt;path&gt; &lt;snapshotName&gt;</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-      <tr><td>snapshotName</td><td>The snapshot name.</td></tr>
-    </table></li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>void deleteSnapshot(Path path, String snapshotName)</code>
-    in <a href="../../api/org/apache/hadoop/fs/FileSystem.html"><code>FileSystem</code></a>.
-  </p>
-
-  <h4>Rename Snapshots</h4>
-  <p>
-    Rename a snapshot.
-    This operation requires owner privilege of the snapshottable directory.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs dfs -renameSnapshot &lt;path&gt; &lt;oldName&gt; &lt;newName&gt;</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-      <tr><td>oldName</td><td>The old snapshot name.</td></tr>
-      <tr><td>newName</td><td>The new snapshot name.</td></tr>
-    </table></li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>void renameSnapshot(Path path, String oldName, String newName)</code>
-    in <a href="../../api/org/apache/hadoop/fs/FileSystem.html"><code>FileSystem</code></a>.
-  </p>
-
-  <h4>Get Snapshottable Directory Listing</h4>
-  <p>
-    Get all the snapshottable directories where the current user has permission to take snapshtos.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs lsSnapshottableDir</source></li>
-    <li>Arguments: none</li>
-  </ul>
-  <p>
-    See also the corresponding Java API
-    <code>SnapshottableDirectoryStatus[] getSnapshottableDirectoryListing()</code>
-    in <code>DistributedFileSystem</code>.
-  </p>
-
-  <h4>Get Snapshots Difference Report</h4>
-  <p>
-    Get the differences between two snapshots.
-    This operation requires read access privilege for all files/directories in both snapshots.
-  </p>
-  <ul>
-    <li>Command:
-      <source>hdfs snapshotDiff &lt;path&gt; &lt;fromSnapshot&gt; &lt;toSnapshot&gt;</source></li>
-    <li>Arguments:<table>
-      <tr><td>path</td><td>The path of the snapshottable directory.</td></tr>
-      <tr><td>fromSnapshot</td><td>The name of the starting snapshot.</td></tr>
-      <tr><td>toSnapshot</td><td>The name of the ending snapshot.</td></tr>
-    </table></li>
-    <p>
-      Note that snapshotDiff can be used to get the difference report between two snapshots, or between
-      a snapshot and the current status of a directory.Users can use "." to represent the current status.
-    </p>
-    <li>Results:
-      <table>
-        <tr><td>+</td><td>The file/directory has been created.</td></tr>
-        <tr><td>-</td><td>The file/directory has been deleted.</td></tr>
-        <tr><td>M</td><td>The file/directory has been modified.</td></tr>
-        <tr><td>R</td><td>The file/directory has been renamed.</td></tr>
-      </table>
-    </li>
-  </ul>
-  <p>
-    A <em>RENAME</em> entry indicates a file/directory has been renamed but
-    is still under the same snapshottable directory. A file/directory is
-    reported as deleted if it was renamed to outside of the snapshottble directory.
-    A file/directory renamed from outside of the snapshottble directory is
-    reported as newly created.
-  </p>
-  <p>
-    The snapshot difference report does not guarantee the same operation sequence.
-    For example, if we rename the directory <em>"/foo"</em> to <em>"/foo2"</em>, and
-    then append new data to the file <em>"/foo2/bar"</em>, the difference report will
-    be:
-    <source>
-    R. /foo -> /foo2
-    M. /foo/bar
-    </source>
-    I.e., the changes on the files/directories under a renamed directory is
-    reported using the original path before the rename (<em>"/foo/bar"</em> in
-    the above example).
-  </p>
-  <p>
-    See also the corresponding Java API
-    <code>SnapshotDiffReport getSnapshotDiffReport(Path path, String fromSnapshot, String toSnapshot)</code>
-    in <code>DistributedFileSystem</code>.
-  </p>
-
-  </subsection>
-  </section>
-
-  </body>
-</document>


[19/50] [abbrv] hadoop git commit: HDFS-7314. Moving to 2.6.1 CHANGES.txt section.

Posted by ec...@apache.org.
HDFS-7314. Moving to 2.6.1 CHANGES.txt section.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f103a70a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f103a70a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f103a70a

Branch: refs/heads/HADOOP-11890
Commit: f103a70af5c5b01931b5cd2e5782eac5aeeb31cd
Parents: fbbb7ff
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Thu Sep 10 16:46:16 2015 -0700
Committer: Vinod Kumar Vavilapalli <vi...@apache.org>
Committed: Thu Sep 10 16:46:16 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f103a70a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0c2645d..b5be944 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -731,9 +731,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8742. Inotify: Support event for OP_TRUNCATE.
     (Surendra Singh Lilhore via aajisaka)
 
-    HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
-    files rather than the entire DFSClient. (mingma)
-
     HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
     HDFS-7483. Display information per tier on the Namenode UI.
@@ -2454,6 +2451,9 @@ Release 2.6.1 - 2015-09-09
     HDFS-7213. processIncrementalBlockReport performance degradation.
     (Eric Payne via kihwal)
 
+    HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
+    files rather than the entire DFSClient. (mingma)
+
     HDFS-7235. DataNode#transferBlock should report blocks that don't exist
     using reportBadBlock (yzhang via cmccabe)
 


[04/50] [abbrv] hadoop git commit: HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe)

Posted by ec...@apache.org.
HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a153b960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a153b960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a153b960

Branch: refs/heads/HADOOP-11890
Commit: a153b9601ad8628fdd608d8696310ca8c1f58ff0
Parents: d9c1fab
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Tue Sep 8 18:12:47 2015 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Tue Sep 8 18:37:22 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hdfs/server/datanode/FinalizedReplica.java  | 15 +---
 .../hdfs/server/datanode/ReplicaInfo.java       | 82 --------------------
 .../server/datanode/ReplicaUnderRecovery.java   | 10 ---
 .../datanode/ReplicaWaitingToBeRecovered.java   | 15 +---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  3 -
 .../org/apache/hadoop/hdfs/TestFileAppend.java  | 72 -----------------
 .../hdfs/server/datanode/DataNodeTestUtils.java |  5 --
 .../fsdataset/impl/FsDatasetTestUtil.java       |  6 --
 .../fsdataset/impl/TestDatanodeRestart.java     | 72 -----------------
 10 files changed, 4 insertions(+), 278 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8b50065..cfedb0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -903,6 +903,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-9019. Adding informative message to sticky bit permission denied
     exception. (xyao)
 
+    HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
index cc32874..8daeb51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FinalizedReplica.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
  * This class describes a replica that has been finalized.
  */
 public class FinalizedReplica extends ReplicaInfo {
-  private boolean unlinked;      // copy-on-write done for block
 
   /**
    * Constructor
@@ -58,7 +57,6 @@ public class FinalizedReplica extends ReplicaInfo {
    */
   public FinalizedReplica(FinalizedReplica from) {
     super(from);
-    this.unlinked = from.isUnlinked();
   }
 
   @Override  // ReplicaInfo
@@ -66,16 +64,6 @@ public class FinalizedReplica extends ReplicaInfo {
     return ReplicaState.FINALIZED;
   }
   
-  @Override // ReplicaInfo
-  public boolean isUnlinked() {
-    return unlinked;
-  }
-
-  @Override  // ReplicaInfo
-  public void setUnlinked() {
-    unlinked = true;
-  }
-  
   @Override
   public long getVisibleLength() {
     return getNumBytes();       // all bytes are visible
@@ -98,7 +86,6 @@ public class FinalizedReplica extends ReplicaInfo {
   
   @Override
   public String toString() {
-    return super.toString()
-        + "\n  unlinked          =" + unlinked;
+    return super.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index 136d8a9..31b14fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -197,22 +197,6 @@ abstract public class ReplicaInfo extends Block implements Replica {
   }
 
   /**
-   * check if this replica has already been unlinked.
-   * @return true if the replica has already been unlinked 
-   *         or no need to be detached; false otherwise
-   */
-  public boolean isUnlinked() {
-    return true;                // no need to be unlinked
-  }
-
-  /**
-   * set that this replica is unlinked
-   */
-  public void setUnlinked() {
-    // no need to be unlinked
-  }
-
-  /**
    * Number of bytes reserved for this replica on disk.
    */
   public long getBytesReserved() {
@@ -229,72 +213,6 @@ abstract public class ReplicaInfo extends Block implements Replica {
     return 0;
   }
 
-   /**
-   * Copy specified file into a temporary file. Then rename the
-   * temporary file to the original name. This will cause any
-   * hardlinks to the original file to be removed. The temporary
-   * files are created in the same directory. The temporary files will
-   * be recovered (especially on Windows) on datanode restart.
-   */
-  private void unlinkFile(File file, Block b) throws IOException {
-    File tmpFile = DatanodeUtil.createTmpFile(b, DatanodeUtil.getUnlinkTmpFile(file));
-    try {
-      FileInputStream in = new FileInputStream(file);
-      try {
-        FileOutputStream out = new FileOutputStream(tmpFile);
-        try {
-          IOUtils.copyBytes(in, out, 16*1024);
-        } finally {
-          out.close();
-        }
-      } finally {
-        in.close();
-      }
-      if (file.length() != tmpFile.length()) {
-        throw new IOException("Copy of file " + file + " size " + file.length()+
-                              " into file " + tmpFile +
-                              " resulted in a size of " + tmpFile.length());
-      }
-      FileUtil.replaceFile(tmpFile, file);
-    } catch (IOException e) {
-      boolean done = tmpFile.delete();
-      if (!done) {
-        DataNode.LOG.info("detachFile failed to delete temporary file " +
-                          tmpFile);
-      }
-      throw e;
-    }
-  }
-
-  /**
-   * Remove a hard link by copying the block to a temporary place and 
-   * then moving it back
-   * @param numLinks number of hard links
-   * @return true if copy is successful; 
-   *         false if it is already detached or no need to be detached
-   * @throws IOException if there is any copy error
-   */
-  public boolean unlinkBlock(int numLinks) throws IOException {
-    if (isUnlinked()) {
-      return false;
-    }
-    File file = getBlockFile();
-    if (file == null || getVolume() == null) {
-      throw new IOException("detachBlock:Block not found. " + this);
-    }
-    File meta = getMetaFile();
-
-    if (HardLink.getLinkCount(file) > numLinks) {
-      DataNode.LOG.info("CopyOnWrite for block " + this);
-      unlinkFile(file, this);
-    }
-    if (HardLink.getLinkCount(meta) > numLinks) {
-      unlinkFile(meta, this);
-    }
-    setUnlinked();
-    return true;
-  }
-
   @Override  //Object
   public String toString() {
     return getClass().getSimpleName()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
index 2cd8a01..558ee21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaUnderRecovery.java
@@ -85,16 +85,6 @@ public class ReplicaUnderRecovery extends ReplicaInfo {
   public ReplicaInfo getOriginalReplica() {
     return original;
   }
-
-  @Override //ReplicaInfo
-  public boolean isUnlinked() {
-    return original.isUnlinked();
-  }
-
-  @Override //ReplicaInfo
-  public void setUnlinked() {
-    original.setUnlinked();
-  }
   
   @Override //ReplicaInfo
   public ReplicaState getState() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
index 26ab3db..220649d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaWaitingToBeRecovered.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
  * lease recovery.
  */
 public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
-  private boolean unlinked;      // copy-on-write done for block
 
   /**
    * Constructor
@@ -64,7 +63,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
    */
   public ReplicaWaitingToBeRecovered(ReplicaWaitingToBeRecovered from) {
     super(from);
-    this.unlinked = from.isUnlinked();
   }
 
   @Override //ReplicaInfo
@@ -73,16 +71,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
   }
   
   @Override //ReplicaInfo
-  public boolean isUnlinked() {
-    return unlinked;
-  }
-
-  @Override //ReplicaInfo
-  public void setUnlinked() {
-    unlinked = true;
-  }
-  
-  @Override //ReplicaInfo
   public long getVisibleLength() {
     return -1;  //no bytes are visible
   }
@@ -104,7 +92,6 @@ public class ReplicaWaitingToBeRecovered extends ReplicaInfo {
 
   @Override
   public String toString() {
-    return super.toString()
-        + "\n  unlinked=" + unlinked;
+    return super.toString();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index e981ccb..8722d35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1109,8 +1109,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       throws IOException {
     // If the block is cached, start uncaching it.
     cacheManager.uncacheBlock(bpid, replicaInfo.getBlockId());
-    // unlink the finalized replica
-    replicaInfo.unlinkBlock(1);
     
     // construct a RBW replica with the new GS
     File blkfile = replicaInfo.getBlockFile();
@@ -2480,7 +2478,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           + ", rur=" + rur);
     }
     if (rur.getNumBytes() > newlength) {
-      rur.unlinkBlock(1);
       truncateBlock(blockFile, metaFile, rur.getNumBytes(), newlength);
       if(!copyOnTruncate) {
         // update RUR with the new length

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 85d92c9..7b7f415 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -110,78 +110,6 @@ public class TestFileAppend{
   }
 
   /**
-   * Test that copy on write for blocks works correctly
-   * @throws IOException an exception might be thrown
-   */
-  @Test
-  public void testCopyOnWrite() throws IOException {
-    Configuration conf = new HdfsConfiguration();
-    if (simulatedStorage) {
-      SimulatedFSDataset.setFactory(conf);
-    }
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    FileSystem fs = cluster.getFileSystem();
-    InetSocketAddress addr = new InetSocketAddress("localhost",
-                                                   cluster.getNameNodePort());
-    DFSClient client = new DFSClient(addr, conf);
-    try {
-
-      // create a new file, write to it and close it.
-      //
-      Path file1 = new Path("/filestatus.dat");
-      FSDataOutputStream stm = AppendTestUtil.createFile(fs, file1, 1);
-      writeFile(stm);
-      stm.close();
-
-      // Get a handle to the datanode
-      DataNode[] dn = cluster.listDataNodes();
-      assertTrue("There should be only one datanode but found " + dn.length,
-                  dn.length == 1);
-
-      LocatedBlocks locations = client.getNamenode().getBlockLocations(
-                                  file1.toString(), 0, Long.MAX_VALUE);
-      List<LocatedBlock> blocks = locations.getLocatedBlocks();
-
-      //
-      // Create hard links for a few of the blocks
-      //
-      for (int i = 0; i < blocks.size(); i = i + 2) {
-        ExtendedBlock b = blocks.get(i).getBlock();
-        final File f = DataNodeTestUtils.getFile(dn[0],
-            b.getBlockPoolId(), b.getLocalBlock().getBlockId());
-        File link = new File(f.toString() + ".link");
-        System.out.println("Creating hardlink for File " + f + " to " + link);
-        HardLink.createHardLink(f, link);
-      }
-
-      //
-      // Detach all blocks. This should remove hardlinks (if any)
-      //
-      for (int i = 0; i < blocks.size(); i++) {
-        ExtendedBlock b = blocks.get(i).getBlock();
-        System.out.println("testCopyOnWrite detaching block " + b);
-        assertTrue("Detaching block " + b + " should have returned true",
-            DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
-      }
-
-      // Since the blocks were already detached earlier, these calls should
-      // return false
-      //
-      for (int i = 0; i < blocks.size(); i++) {
-        ExtendedBlock b = blocks.get(i).getBlock();
-        System.out.println("testCopyOnWrite detaching block " + b);
-        assertTrue("Detaching block " + b + " should have returned false",
-            !DataNodeTestUtils.unlinkBlock(dn[0], b, 1));
-      }
-
-    } finally {
-      client.close();
-      fs.close();
-      cluster.shutdown();
-    }
-  }
-
-  /**
    * Test a simple flush on a simple HDFS file.
    * @throws IOException an exception might be thrown
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index 2f9a3e5..b4071de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -151,11 +151,6 @@ public class DataNodeTestUtils {
       throws IOException {
     return FsDatasetTestUtil.getMetaFile(dn.getFSDataset(), bpid, b);
   }
-  
-  public static boolean unlinkBlock(DataNode dn, ExtendedBlock bk, int numLinks
-      ) throws IOException {
-    return FsDatasetTestUtil.unlinkBlock(dn.getFSDataset(), bk, numLinks);
-  }
 
   public static long getPendingAsyncDeletions(DataNode dn) {
     return FsDatasetTestUtil.getPendingAsyncDeletions(dn.getFSDataset());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
index 7ac9b65..164385e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetTestUtil.java
@@ -54,12 +54,6 @@ public class FsDatasetTestUtil {
     return FsDatasetUtil.getMetaFile(getBlockFile(fsd, bpid, b), b
         .getGenerationStamp());
   }
-  
-  public static boolean unlinkBlock(FsDatasetSpi<?> fsd,
-      ExtendedBlock block, int numLinks) throws IOException {
-    final ReplicaInfo info = ((FsDatasetImpl)fsd).getReplicaInfo(block);
-    return info.unlinkBlock(numLinks);
-  }
 
   public static ReplicaInfo fetchReplicaInfo (final FsDatasetSpi<?> fsd,
       final String bpid, final long blockId) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a153b960/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
index 4516696..8bbac9f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
@@ -143,79 +143,7 @@ public class TestDatanodeRestart {
     }      
   }
 
-  // test recovering unlinked tmp replicas
-  @Test public void testRecoverReplicas() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
-    conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    cluster.waitActive();
-    try {
-      FileSystem fs = cluster.getFileSystem();
-      for (int i=0; i<4; i++) {
-        Path fileName = new Path("/test"+i);
-        DFSTestUtil.createFile(fs, fileName, 1, (short)1, 0L);
-        DFSTestUtil.waitReplication(fs, fileName, (short)1);
-      }
-      String bpid = cluster.getNamesystem().getBlockPoolId();
-      DataNode dn = cluster.getDataNodes().get(0);
-      Iterator<ReplicaInfo> replicasItor = 
-          dataset(dn).volumeMap.replicas(bpid).iterator();
-      ReplicaInfo replica = replicasItor.next();
-      createUnlinkTmpFile(replica, true, true); // rename block file
-      createUnlinkTmpFile(replica, false, true); // rename meta file
-      replica = replicasItor.next();
-      createUnlinkTmpFile(replica, true, false); // copy block file
-      createUnlinkTmpFile(replica, false, false); // copy meta file
-      replica = replicasItor.next();
-      createUnlinkTmpFile(replica, true, true); // rename block file
-      createUnlinkTmpFile(replica, false, false); // copy meta file
-
-      cluster.restartDataNodes();
-      cluster.waitActive();
-      dn = cluster.getDataNodes().get(0);
-
-      // check volumeMap: 4 finalized replica
-      Collection<ReplicaInfo> replicas = dataset(dn).volumeMap.replicas(bpid);
-      Assert.assertEquals(4, replicas.size());
-      replicasItor = replicas.iterator();
-      while (replicasItor.hasNext()) {
-        Assert.assertEquals(ReplicaState.FINALIZED, 
-            replicasItor.next().getState());
-      }
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
   private static FsDatasetImpl dataset(DataNode dn) {
     return (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);
   }
-
-  private static void createUnlinkTmpFile(ReplicaInfo replicaInfo, 
-      boolean changeBlockFile, 
-      boolean isRename) throws IOException {
-    File src;
-    if (changeBlockFile) {
-      src = replicaInfo.getBlockFile();
-    } else {
-      src = replicaInfo.getMetaFile();
-    }
-    File dst = DatanodeUtil.getUnlinkTmpFile(src);
-    if (isRename) {
-      src.renameTo(dst);
-    } else {
-      FileInputStream in = new FileInputStream(src);
-      try {
-        FileOutputStream out = new FileOutputStream(dst);
-        try {
-          IOUtils.copyBytes(in, out, 1);
-        } finally {
-          out.close();
-        }
-      } finally {
-        in.close();
-      }
-    }
-  }
 }


[41/50] [abbrv] hadoop git commit: HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT config key. Contributed by Mingliang Liu.

Posted by ec...@apache.org.
HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT config key. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76957a48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76957a48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76957a48

Branch: refs/heads/HADOOP-11890
Commit: 76957a485b526468498f93e443544131a88b5684
Parents: 7b5cf53
Author: Haohui Mai <wh...@apache.org>
Authored: Mon Sep 14 18:22:52 2015 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Mon Sep 14 18:22:52 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  3 +++
 .../src/main/java/org/apache/hadoop/fs/Hdfs.java    |  7 ++++---
 .../apache/hadoop/hdfs/DistributedFileSystem.java   |  4 ++--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java     |  3 ++-
 .../hadoop/hdfs/server/namenode/NameNode.java       | 15 +++++++++++----
 .../apache/hadoop/hdfs/tools/NNHAServiceTarget.java |  3 ++-
 .../hadoop/hdfs/TestAppendSnapshotTruncate.java     |  3 ++-
 .../apache/hadoop/hdfs/TestDFSClientFailover.java   |  3 ++-
 .../apache/hadoop/hdfs/TestDefaultNameNodePort.java | 16 ++++++++++------
 .../balancer/TestBalancerWithHANameNodes.java       |  4 ++--
 .../hdfs/server/namenode/TestFileTruncate.java      |  5 +++--
 11 files changed, 43 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 270f30b..35dcd80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -909,6 +909,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe
     Zhang via Colin P. McCabe)
 
+    HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.
+    DFS_NAMENODE_RPC_PORT_DEFAULT config key. (Mingliang Liu via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index ba5687c..1d37aa9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
@@ -49,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -77,7 +77,8 @@ public class Hdfs extends AbstractFileSystem {
    * @throws IOException
    */
   Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
-    super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
+    super(theUri, HdfsConstants.HDFS_URI_SCHEME, true,
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
 
     if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
       throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
@@ -92,7 +93,7 @@ public class Hdfs extends AbstractFileSystem {
 
   @Override
   public int getUriDefaultPort() {
-    return NameNode.DEFAULT_PORT;
+    return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index ebed3c2..f4cf4c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -84,7 +85,6 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -1497,7 +1497,7 @@ public class DistributedFileSystem extends FileSystem {
 
   @Override
   protected int getDefaultPort() {
-    return NameNode.DEFAULT_PORT;
+    return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index d873526..7a4ec4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -509,7 +509,8 @@ public class NameNodeProxies {
     // Check the port in the URI, if it is logical.
     if (checkPort && providerNN.useLogicalURI()) {
       int port = nameNodeUri.getPort();
-      if (port > 0 && port != NameNode.DEFAULT_PORT) {
+      if (port > 0 &&
+          port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
         // Throwing here without any cleanup is fine since we have not
         // actually created the underlying proxies yet.
         throw new IOException("Port " + port + " specified in URI "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 6e32066..683112b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -39,9 +39,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@@ -111,6 +111,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY;
@@ -314,7 +315,12 @@ public class NameNode implements NameNodeStatusMXBean {
     }
   }
     
-  public static final int DEFAULT_PORT = 8020;
+  /**
+   * @deprecated Use {@link HdfsClientConfigKeys#DFS_NAMENODE_RPC_PORT_DEFAULT}
+   *             instead.
+   */
+  @Deprecated
+  public static final int DEFAULT_PORT = DFS_NAMENODE_RPC_PORT_DEFAULT;
   public static final Logger LOG =
       LoggerFactory.getLogger(NameNode.class.getName());
   public static final Logger stateChangeLog =
@@ -452,7 +458,7 @@ public class NameNode implements NameNodeStatusMXBean {
   }
 
   public static InetSocketAddress getAddress(String address) {
-    return NetUtils.createSocketAddr(address, DEFAULT_PORT);
+    return NetUtils.createSocketAddr(address, DFS_NAMENODE_RPC_PORT_DEFAULT);
   }
   
   /**
@@ -509,7 +515,8 @@ public class NameNode implements NameNodeStatusMXBean {
 
   public static URI getUri(InetSocketAddress namenode) {
     int port = namenode.getPort();
-    String portString = port == DEFAULT_PORT ? "" : (":"+port);
+    String portString = (port == DFS_NAMENODE_RPC_PORT_DEFAULT) ?
+        "" : (":" + port);
     return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
         + namenode.getHostName()+portString);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
index 38f5123..6615a4f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.ha.NodeFencer;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 
@@ -77,7 +78,7 @@ public class NNHAServiceTarget extends HAServiceTarget {
           "Unable to determine service address for namenode '" + nnId + "'");
     }
     this.addr = NetUtils.createSocketAddr(serviceAddr,
-        NameNode.DEFAULT_PORT);
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
 
     this.autoFailoverEnabled = targetConf.getBoolean(
         DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
index 9a09987..a1169c3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TestFileTruncate;
@@ -89,7 +90,7 @@ public class TestAppendSnapshotTruncate {
     cluster = new MiniDFSCluster.Builder(conf)
         .format(true)
         .numDataNodes(DATANODE_NUM)
-        .nameNodePort(NameNode.DEFAULT_PORT)
+        .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
         .waitSafeMode(true)
         .build();
     dfs = cluster.getFileSystem();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
index 644d66d..ff5554a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
@@ -115,7 +115,8 @@ public class TestDFSClientFailover {
     // to include a port number.
     Path withPort = new Path("hdfs://" +
         HATestUtil.getLogicalHostname(cluster) + ":" +
-        NameNode.DEFAULT_PORT + "/" + TEST_FILE.toUri().getPath());
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT + "/" +
+        TEST_FILE.toUri().getPath());
     FileSystem fs2 = withPort.getFileSystem(fs.getConf());
     assertTrue(fs2.exists(withPort));
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
index 27f13e3..38be3c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
@@ -24,7 +24,9 @@ import java.net.URI;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+
 import org.junit.Test;
 
 /** Test NameNode port defaulting code. */
@@ -33,9 +35,9 @@ public class TestDefaultNameNodePort {
   @Test
   public void testGetAddressFromString() throws Exception {
     assertEquals(NameNode.getAddress("foo").getPort(),
-                 NameNode.DEFAULT_PORT);
+                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
     assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
-                 NameNode.DEFAULT_PORT);
+                 HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
     assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
                  555);
     assertEquals(NameNode.getAddress("foo:555").getPort(),
@@ -46,11 +48,13 @@ public class TestDefaultNameNodePort {
   public void testGetAddressFromConf() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://foo/");
-    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress(conf).getPort(),
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
     FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
     assertEquals(NameNode.getAddress(conf).getPort(), 555);
     FileSystem.setDefaultUri(conf, "foo");
-    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress(conf).getPort(),
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
   }
 
   @Test
@@ -58,7 +62,7 @@ public class TestDefaultNameNodePort {
     assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
                  URI.create("hdfs://foo:555"));
     assertEquals(NameNode.getUri(new InetSocketAddress("foo",
-                                                       NameNode.DEFAULT_PORT)),
-                 URI.create("hdfs://foo"));
+            HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)),
+        URI.create("hdfs://foo"));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
index bd91366..7559de4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
@@ -31,8 +31,8 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
 import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.junit.Test;
 
@@ -67,7 +67,7 @@ public class TestBalancerWithHANameNodes {
     assertEquals(capacities.length, racks.length);
     int numOfDatanodes = capacities.length;
     NNConf nn1Conf = new MiniDFSNNTopology.NNConf("nn1");
-    nn1Conf.setIpcPort(NameNode.DEFAULT_PORT);
+    nn1Conf.setIpcPort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
     Configuration copiedConf = new Configuration(conf);
     cluster = new MiniDFSCluster.Builder(copiedConf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76957a48/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 70fa222..711db2a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -99,7 +100,7 @@ public class TestFileTruncate {
     cluster = new MiniDFSCluster.Builder(conf)
         .format(true)
         .numDataNodes(DATANODE_NUM)
-        .nameNodePort(NameNode.DEFAULT_PORT)
+        .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
         .waitSafeMode(true)
         .build();
     fs = cluster.getFileSystem();
@@ -1224,7 +1225,7 @@ public class TestFileTruncate {
       NameNode.doRollback(conf, false);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
         .format(false)
-        .nameNodePort(NameNode.DEFAULT_PORT)
+        .nameNodePort(HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT)
         .startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
         .dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
         .build();


[05/50] [abbrv] hadoop git commit: HDFS-8929. Add a metric to expose the timestamp of the last journal (Contributed by surendra singh lilhore)

Posted by ec...@apache.org.
HDFS-8929. Add a metric to expose the timestamp of the last journal (Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94cf7ab9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94cf7ab9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94cf7ab9

Branch: refs/heads/HADOOP-11890
Commit: 94cf7ab9d28a885181afeb2c181dfe857d158254
Parents: a153b96
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Sep 9 11:03:57 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Wed Sep 9 11:03:57 2015 +0530

----------------------------------------------------------------------
 .../hadoop-common/src/site/markdown/Metrics.md            |  1 +
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt               |  3 +++
 .../org/apache/hadoop/hdfs/qjournal/server/Journal.java   | 10 +++++++++-
 .../hadoop/hdfs/qjournal/server/JournalMetrics.java       |  7 ++++++-
 .../hadoop/hdfs/qjournal/server/TestJournalNode.java      |  9 +++++++++
 5 files changed, 28 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94cf7ab9/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index 8722968..efe1f2f 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -275,6 +275,7 @@ The server-side metrics for a journal from the JournalNode's perspective. Each m
 | `CurrentLagTxns` | The number of transactions that this JournalNode is lagging |
 | `LastWrittenTxId` | The highest transaction id stored on this JournalNode |
 | `LastPromisedEpoch` | The last epoch number which this node has promised not to accept any lower epoch, or 0 if no promises have been made |
+| `LastJournalTimestamp` | The timestamp of last successfully written transaction |
 
 datanode
 --------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94cf7ab9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cfedb0a..14666dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -921,6 +921,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
 
+    HDFS-8929. Add a metric to expose the timestamp of the last journal
+    (surendra singh lilhore via vinayakumarb)
+
   BUG FIXES
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94cf7ab9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 813f267..b94cd8c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StopWatch;
+import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
@@ -132,6 +133,8 @@ public class Journal implements Closeable {
 
   private final JournalMetrics metrics;
 
+  private long lastJournalTimestamp = 0;
+
   /**
    * Time threshold for sync calls, beyond which a warning should be logged to the console.
    */
@@ -253,7 +256,11 @@ public class Journal implements Closeable {
   synchronized long getCommittedTxnIdForTests() throws IOException {
     return committedTxnId.get();
   }
-  
+
+  synchronized long getLastJournalTimestamp() {
+    return lastJournalTimestamp;
+  }
+
   synchronized long getCurrentLagTxns() throws IOException {
     long committed = committedTxnId.get();
     if (committed == 0) {
@@ -411,6 +418,7 @@ public class Journal implements Closeable {
     
     updateHighestWrittenTxId(lastTxnId);
     nextTxId = lastTxnId + 1;
+    lastJournalTimestamp = Time.now();
   }
 
   public void heartbeat(RequestInfo reqInfo) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94cf7ab9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
index 40c0bff..cffe2c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
@@ -109,7 +109,12 @@ class JournalMetrics {
       return -1L;
     }
   }
-  
+
+  @Metric("The timestamp of last successfully written transaction")
+  public long getLastJournalTimestamp() {
+    return journal.getLastJournalTimestamp();
+  }
+
   void addSync(long us) {
     for (MutableQuantiles q : syncsQuantiles) {
       q.add(us);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94cf7ab9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
index 2115671..9dd6846 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
@@ -106,7 +106,9 @@ public class TestJournalNode {
     MetricsAsserts.assertCounter("BatchesWritten", 0L, metrics);
     MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
     MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
+    MetricsAsserts.assertGauge("LastJournalTimestamp", 0L, metrics);
 
+    long beginTimestamp = System.currentTimeMillis();
     IPCLoggerChannel ch = new IPCLoggerChannel(
         conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
     ch.newEpoch(1).get();
@@ -119,6 +121,10 @@ public class TestJournalNode {
     MetricsAsserts.assertCounter("BatchesWritten", 1L, metrics);
     MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 0L, metrics);
     MetricsAsserts.assertGauge("CurrentLagTxns", 0L, metrics);
+    long lastJournalTimestamp = MetricsAsserts.getLongGauge(
+        "LastJournalTimestamp", metrics);
+    assertTrue(lastJournalTimestamp > beginTimestamp);
+    beginTimestamp = lastJournalTimestamp;
 
     ch.setCommittedTxId(100L);
     ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get();
@@ -128,6 +134,9 @@ public class TestJournalNode {
     MetricsAsserts.assertCounter("BatchesWritten", 2L, metrics);
     MetricsAsserts.assertCounter("BatchesWrittenWhileLagging", 1L, metrics);
     MetricsAsserts.assertGauge("CurrentLagTxns", 98L, metrics);
+    lastJournalTimestamp = MetricsAsserts.getLongGauge(
+        "LastJournalTimestamp", metrics);
+    assertTrue(lastJournalTimestamp > beginTimestamp);
 
   }
   


[28/50] [abbrv] hadoop git commit: HDFS-9036. In BlockPlacementPolicyWithNodeGroup#chooseLocalStorage , random node is selected eventhough fallbackToLocalRack is true. (Contributed by J.Andreina)

Posted by ec...@apache.org.
HDFS-9036. In BlockPlacementPolicyWithNodeGroup#chooseLocalStorage , random node is selected eventhough fallbackToLocalRack is true. (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7156503
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7156503
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7156503

Branch: refs/heads/HADOOP-11890
Commit: c7156503856e24faf844c5c647157b310d8b537f
Parents: d845547
Author: Vinayakumar B <vi...@apache.org>
Authored: Sat Sep 12 17:40:16 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Sat Sep 12 17:40:16 2015 +0530

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt      |  4 ++++
 .../BlockPlacementPolicyWithNodeGroup.java       | 14 ++++++--------
 .../TestReplicationPolicyWithNodeGroup.java      | 19 +++++++++++++++++++
 3 files changed, 29 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7156503/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5a42499..0c891ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1315,6 +1315,10 @@ Release 2.8.0 - UNRELEASED
     HDFS-8581. ContentSummary on / skips further counts on yielding lock
     (J.Andreina via vinayakumarb)
 
+    HDFS-9036. In BlockPlacementPolicyWithNodeGroup#chooseLocalStorage , random
+    node is selected eventhough fallbackToLocalRack is true.
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7156503/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
index b1c4b78..89f47ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.net.NodeBase;
  * for placing block replicas on environment with node-group layer.
  * The replica placement strategy is adjusted to:
  * If the writer is on a datanode, the 1st replica is placed on the local 
- *     node (or local node-group), otherwise a random datanode. 
+ *     node(or local node-group or on local rack), otherwise a random datanode.
  * The 2nd replica is placed on a datanode that is on a different rack with 1st
  *     replica node. 
  * The 3rd replica is placed on a datanode which is on a different node-group
@@ -165,7 +165,7 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
   /* choose one node from the nodegroup that <i>localMachine</i> is on.
    * if no such node is available, choose one node from the nodegroup where
    * a second replica is on.
-   * if still no such node is available, choose a random node in the cluster.
+   * if still no such node is available, return null.
    * @return the chosen node
    */
   private DatanodeStorageInfo chooseLocalNodeGroup(
@@ -195,14 +195,12 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
               excludedNodes, blocksize, maxNodesPerRack, results,
               avoidStaleNodes, storageTypes);
         } catch(NotEnoughReplicasException e2) {
-          //otherwise randomly choose one from the network
-          return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
-              maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+          //otherwise return null
+          return null;
         }
       } else {
-        //otherwise randomly choose one from the network
-        return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
-            maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+        //otherwise return null
+        return null;
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7156503/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index ed54aeb..e973925 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -487,6 +487,25 @@ public class TestReplicationPolicyWithNodeGroup {
   }
 
   /**
+   * In this testcase, client is dataNodes[7], but it is not qualified
+   * to be chosen. And there is no other node available on client Node group.
+   * So the 1st replica should be placed on client local rack dataNodes[6]
+   * @throws Exception
+   */
+  @Test
+  public void testChooseTargetForLocalStorage() throws Exception {
+    updateHeartbeatWithUsage(dataNodes[7],
+        2* HdfsServerConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
+        (HdfsServerConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L,
+        0L, 0L, 0, 0); // no space
+
+    DatanodeStorageInfo[] targets;
+    targets = chooseTarget(1, dataNodes[7]);
+    assertEquals(targets.length, 1);
+    assertTrue(targets[0].getDatanodeDescriptor().equals(dataNodes[6]));
+  }
+
+  /**
    * This testcase tests re-replication, when dataNodes[0] is already chosen.
    * So the 1st replica can be placed on random rack. 
    * the 2nd replica should be placed on different node and nodegroup by same rack as 


[23/50] [abbrv] hadoop git commit: YARN-4145. Make RMHATestBase abstract so its not run when running all tests under that namespace (adhoot via rkanter)

Posted by ec...@apache.org.
YARN-4145. Make RMHATestBase abstract so its not run when running all tests under that namespace (adhoot via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea4bb274
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea4bb274
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea4bb274

Branch: refs/heads/HADOOP-11890
Commit: ea4bb2749f966a5eaf712d1dbb2c845df0f5ca67
Parents: b84fb41
Author: Robert Kanter <rk...@apache.org>
Authored: Fri Sep 11 11:42:50 2015 -0700
Committer: Robert Kanter <rk...@apache.org>
Committed: Fri Sep 11 11:46:10 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea4bb274/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bf753f1..3246946 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -427,6 +427,9 @@ Release 2.8.0 - UNRELEASED
 
     YARN-4086. Allow Aggregated Log readers to handle HAR files (rkanter)
 
+    YARN-4145. Make RMHATestBase abstract so its not run when running all
+    tests under that namespace (adhoot via rkanter)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea4bb274/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
index 6f3666f..40b59ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/RMHATestBase.java
@@ -45,7 +45,7 @@ import org.junit.Assert;
 import org.junit.Before;
 
 
-public class RMHATestBase extends ClientBaseWithFixes{
+public abstract class RMHATestBase extends ClientBaseWithFixes{
 
   private static final int ZK_TIMEOUT_MS = 5000;
   private static StateChangeRequestInfo requestInfo =


[02/50] [abbrv] hadoop git commit: YARN-4096. App local logs are leaked if log aggregation fails to initialize for the app. Contributed by Jason Lowe.

Posted by ec...@apache.org.
YARN-4096. App local logs are leaked if log aggregation fails to initialize for the app. Contributed by Jason Lowe.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16b9037d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16b9037d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16b9037d

Branch: refs/heads/HADOOP-11890
Commit: 16b9037dc1300b8bdbe54ba7cd47c53fe16e93d8
Parents: 970daaa
Author: Zhihai Xu <zx...@apache.org>
Authored: Tue Sep 8 12:29:54 2015 -0700
Committer: Zhihai Xu <zx...@apache.org>
Committed: Tue Sep 8 12:29:54 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                    |  3 +++
 .../logaggregation/AppLogAggregator.java           |  2 ++
 .../logaggregation/AppLogAggregatorImpl.java       |  5 +++++
 .../logaggregation/LogAggregationService.java      | 14 +++++++++-----
 .../logaggregation/TestLogAggregationService.java  | 17 ++++++++++++-----
 5 files changed, 31 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b9037d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6d3796a..7308075 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -898,6 +898,9 @@ Release 2.7.2 - UNRELEASED
     YARN-4087. Followup fixes after YARN-2019 regarding RM behavior when
     state-store error occurs. (Jian He via xgong)
 
+    YARN-4096. App local logs are leaked if log aggregation fails to initialize
+    for the app. (Jason Lowe via zxu)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b9037d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
index 83c5d5a..0178699 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregator.java
@@ -27,4 +27,6 @@ public interface AppLogAggregator extends Runnable {
   void abortLogAggregation();
 
   void finishLogAggregation();
+
+  void disableLogAggregation();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b9037d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index 742b8a9..b2342c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -596,6 +596,11 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     this.notifyAll();
   }
 
+  @Override
+  public void disableLogAggregation() {
+    this.logAggregationDisabled = true;
+  }
+
   @Private
   @VisibleForTesting
   // This is only used for testing.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b9037d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
index 259e9ae..6a6f101 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java
@@ -363,19 +363,19 @@ public class LogAggregationService extends AbstractService implements
       throw new YarnRuntimeException("Duplicate initApp for " + appId);
     }
     // wait until check for existing aggregator to create dirs
+    YarnRuntimeException appDirException = null;
     try {
       // Create the app dir
       createAppDir(user, appId, userUgi);
     } catch (Exception e) {
-      appLogAggregators.remove(appId);
-      closeFileSystems(userUgi);
+      appLogAggregator.disableLogAggregation();
       if (!(e instanceof YarnRuntimeException)) {
-        e = new YarnRuntimeException(e);
+        appDirException = new YarnRuntimeException(e);
+      } else {
+        appDirException = (YarnRuntimeException)e;
       }
-      throw (YarnRuntimeException)e;
     }
 
-
     // TODO Get the user configuration for the list of containers that need log
     // aggregation.
 
@@ -391,6 +391,10 @@ public class LogAggregationService extends AbstractService implements
       }
     };
     this.threadPool.execute(aggregatorWrapper);
+
+    if (appDirException != null) {
+      throw appDirException;
+    }
   }
 
   protected void closeFileSystems(final UserGroupInformation userUgi) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16b9037d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 77d75ca..77c6e3c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -731,9 +731,10 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
     this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
         this.remoteRootLogDir.getAbsolutePath());
-        
+
+    DeletionService spyDelSrvc = spy(this.delSrvc);
     LogAggregationService logAggregationService = spy(
-        new LogAggregationService(dispatcher, this.context, this.delSrvc,
+        new LogAggregationService(dispatcher, this.context, spyDelSrvc,
                                   super.dirsHandler));
     logAggregationService.init(this.conf);
     logAggregationService.start();
@@ -741,6 +742,11 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     ApplicationId appId =
         BuilderUtils.newApplicationId(System.currentTimeMillis(),
           (int) (Math.random() * 1000));
+
+    File appLogDir =
+        new File(localLogDir, ConverterUtils.toString(appId));
+    appLogDir.mkdir();
+
     Exception e = new RuntimeException("KABOOM!");
     doThrow(e)
       .when(logAggregationService).createAppDir(any(String.class),
@@ -759,9 +765,6 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     };
     checkEvents(appEventHandler, expectedEvents, false,
         "getType", "getApplicationID", "getDiagnostic");
-    // filesystems may have been instantiated
-    verify(logAggregationService).closeFileSystems(
-        any(UserGroupInformation.class));
 
     // verify trying to collect logs for containers/apps we don't know about
     // doesn't blow up and tear down the NM
@@ -774,6 +777,10 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
     logAggregationService.stop();
     assertEquals(0, logAggregationService.getNumAggregators());
+    verify(spyDelSrvc).delete(eq(user), any(Path.class),
+        Mockito.<Path>anyVararg());
+    verify(logAggregationService).closeFileSystems(
+        any(UserGroupInformation.class));
   }
 
   private void writeContainerLogs(File appLogDir, ContainerId containerId,


[45/50] [abbrv] hadoop git commit: YARN-313. Add Admin API for supporting node resource configuration in command line. (Contributed by Inigo Goiri, Kenji Kikushima and Junping Du)

Posted by ec...@apache.org.
YARN-313. Add Admin API for supporting node resource configuration in command line. (Contributed by Inigo Goiri, Kenji Kikushima and Junping Du)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73e3a49e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73e3a49e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73e3a49e

Branch: refs/heads/HADOOP-11890
Commit: 73e3a49eb0d58f08549dc6b7054eb5c1f62819dc
Parents: a440567
Author: Junping Du <ju...@apache.org>
Authored: Tue Sep 15 07:55:59 2015 -0700
Committer: Junping Du <ju...@apache.org>
Committed: Tue Sep 15 07:56:47 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   4 +
 .../hadoop/yarn/api/records/ResourceOption.java |   3 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |   4 +
 .../ResourceManagerAdministrationProtocol.java  |  16 +-
 .../RefreshNodesResourcesRequest.java           |  39 +++++
 .../RefreshNodesResourcesResponse.java          |  39 +++++
 ...esourcemanager_administration_protocol.proto |   3 +-
 ..._server_resourcemanager_service_protos.proto |   6 +
 .../hadoop/yarn/client/cli/RMAdminCLI.java      |  61 +++++++-
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java  |  31 ++--
 ...nagerAdministrationProtocolPBClientImpl.java |  19 +++
 ...agerAdministrationProtocolPBServiceImpl.java |  22 +++
 .../pb/RefreshNodesResourcesRequestPBImpl.java  |  72 +++++++++
 .../pb/RefreshNodesResourcesResponsePBImpl.java |  72 +++++++++
 .../hadoop/yarn/api/TestPBImplRecords.java      |  16 ++
 .../server/resourcemanager/AdminService.java    |  52 +++++++
 .../resource/DynamicResourceConfiguration.java  | 149 +++++++++++++++++++
 .../server/resourcemanager/rmnode/RMNode.java   |   3 -
 .../resourcemanager/TestRMAdminService.java     |  46 ++++++
 .../resourcemanager/TestRMNodeTransitions.java  |   6 +-
 .../scheduler/fifo/TestFifoScheduler.java       |   2 +-
 21 files changed, 642 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index cff5205..c2bee70 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -190,6 +190,10 @@ Release 2.8.0 - UNRELEASED
     YARN-2884. Added a proxy service in NM to proxy the the communication
     between AM and RM. (Kishore Chaliparambil via jianhe) 
 
+    YARN-313. Add Admin API for supporting node resource configuration in
+    command line. (Inigo Goiri, Kenji Kikushima and Junping Du 
+    via junping_du)
+
   IMPROVEMENTS
 
     YARN-644. Basic null check is not performed on passed in arguments before

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
index 1ca90cc..e9de052 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceOption.java
@@ -36,6 +36,9 @@ public abstract class ResourceOption {
     return resourceOption;
   }
 
+  /** Negative value means no timeout. */
+  public static final int OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT = -1;
+
   /**
    * Get the <em>resource</em> of the ResourceOption.
    * @return <em>resource</em> of the ResourceOption

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index cc4f5de..33e8a1f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -39,6 +39,9 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
 public class YarnConfiguration extends Configuration {
 
   @Private
+  public static final String DR_CONFIGURATION_FILE= "dynamic-resources.xml";
+
+  @Private
   public static final String CS_CONFIGURATION_FILE= "capacity-scheduler.xml";
 
   @Private
@@ -57,6 +60,7 @@ public class YarnConfiguration extends Configuration {
   @Private
   public static final List<String> RM_CONFIGURATION_FILES =
       Collections.unmodifiableList(Arrays.asList(
+          DR_CONFIGURATION_FILE,
           CS_CONFIGURATION_FILE,
           HADOOP_POLICY_CONFIGURATION_FILE,
           YARN_SITE_CONFIGURATION_FILE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
index 08a258c..8523342 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ResourceManagerAdministrationProtocol.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.api;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.io.retry.Idempotent;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
@@ -51,6 +52,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequ
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
 
 @Private
 public interface ResourceManagerAdministrationProtocol extends GetUserMappingsProtocol {
@@ -74,7 +77,7 @@ public interface ResourceManagerAdministrationProtocol extends GetUserMappingsPr
 
   @Private
   @Idempotent
-  public RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
+  RefreshUserToGroupsMappingsResponse refreshUserToGroupsMappings(
       RefreshUserToGroupsMappingsRequest request)
   throws StandbyException, YarnException, IOException;
 
@@ -107,9 +110,14 @@ public interface ResourceManagerAdministrationProtocol extends GetUserMappingsPr
   @Private
   @Idempotent
   public UpdateNodeResourceResponse updateNodeResource(
-      UpdateNodeResourceRequest request) 
-  throws YarnException, IOException;
-   
+      UpdateNodeResourceRequest request) throws YarnException, IOException;
+
+  @Private
+  @Evolving
+  @Idempotent
+  public RefreshNodesResourcesResponse refreshNodesResources(
+      RefreshNodesResourcesRequest request) throws YarnException, IOException;
+
   @Private
   @Idempotent
   public AddToClusterNodeLabelsResponse addToClusterNodeLabels(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesRequest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesRequest.java
new file mode 100644
index 0000000..f8c91f6
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesRequest.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Request to refresh the resources of a node.
+ */
+@Private
+@Evolving
+public abstract class RefreshNodesResourcesRequest {
+  @Public
+  @Evolving
+  public static RefreshNodesResourcesRequest newInstance() {
+    RefreshNodesResourcesRequest request =
+        Records.newRecord(RefreshNodesResourcesRequest.class);
+    return request;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesResponse.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesResponse.java
new file mode 100644
index 0000000..9d7bb21
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RefreshNodesResourcesResponse.java
@@ -0,0 +1,39 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.util.Records;
+
+/**
+ * Response to a request to refresh the resources of a node.
+ */
+@Private
+@Evolving
+public abstract class RefreshNodesResourcesResponse {
+  @Private
+  @Unstable
+  public static RefreshNodesResourcesResponse newInstance() {
+    RefreshNodesResourcesResponse response =
+        Records.newRecord(RefreshNodesResourcesResponse.class);
+    return response;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
index da8f34f..1134623 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/resourcemanager_administration_protocol.proto
@@ -38,7 +38,8 @@ service ResourceManagerAdministrationProtocolService {
   rpc refreshAdminAcls(RefreshAdminAclsRequestProto) returns (RefreshAdminAclsResponseProto);
   rpc refreshServiceAcls(RefreshServiceAclsRequestProto) returns (RefreshServiceAclsResponseProto);
   rpc getGroupsForUser(GetGroupsForUserRequestProto) returns (GetGroupsForUserResponseProto);
-  rpc updateNodeResource (UpdateNodeResourceRequestProto) returns (UpdateNodeResourceResponseProto);
+  rpc updateNodeResource(UpdateNodeResourceRequestProto) returns (UpdateNodeResourceResponseProto);
+  rpc refreshNodesResources(RefreshNodesResourcesRequestProto) returns (RefreshNodesResourcesResponseProto);
   rpc addToClusterNodeLabels(AddToClusterNodeLabelsRequestProto) returns (AddToClusterNodeLabelsResponseProto);
   rpc removeFromClusterNodeLabels(RemoveFromClusterNodeLabelsRequestProto) returns (RemoveFromClusterNodeLabelsResponseProto);
   rpc replaceLabelsOnNodes(ReplaceLabelsOnNodeRequestProto) returns (ReplaceLabelsOnNodeResponseProto);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
index 037f78c..eaf658f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/server/yarn_server_resourcemanager_service_protos.proto
@@ -76,6 +76,12 @@ message UpdateNodeResourceRequestProto {
 message UpdateNodeResourceResponseProto {
 }
 
+message RefreshNodesResourcesRequestProto {
+}
+
+message RefreshNodesResourcesResponseProto {
+}
+
 message AddToClusterNodeLabelsRequestProto {
   repeated NodeLabelProto nodeLabels = 1;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 8278bd9..92cb934 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.yarn.api.records.DecommissionType;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeLabel;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceOption;
 import org.apache.hadoop.yarn.client.ClientRMProxy;
 import org.apache.hadoop.yarn.client.RMHAServiceTarget;
 import org.apache.hadoop.yarn.conf.HAUtil;
@@ -58,12 +60,15 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableMap;
@@ -98,6 +103,8 @@ public class RMAdminCLI extends HAAdmin {
               + "[-g [timeout in seconds] is optional, if we specify the "
               + "timeout then ResourceManager will wait for timeout before "
               + "marking the NodeManager as decommissioned."))
+          .put("-refreshNodesResources", new UsageInfo("",
+              "Refresh resources of NodeManagers at the ResourceManager."))
           .put("-refreshSuperUserGroupsConfiguration", new UsageInfo("",
               "Refresh superuser proxy groups mappings"))
           .put("-refreshUserToGroupsMappings", new UsageInfo("",
@@ -136,7 +143,10 @@ public class RMAdminCLI extends HAAdmin {
           .put("-refreshClusterMaxPriority",
               new UsageInfo("",
                   "Refresh cluster max priority"))
-              .build();
+          .put("-updateNodeResource",
+              new UsageInfo("[NodeID] [MemSize] [vCores] ([OvercommitTimeout])",
+                  "Update resource on specific node."))
+          .build();
 
   public RMAdminCLI() {
     super();
@@ -221,6 +231,7 @@ public class RMAdminCLI extends HAAdmin {
     "yarn rmadmin" +
       " [-refreshQueues]" +
       " [-refreshNodes [-g [timeout in seconds]]]" +
+      " [-refreshNodesResources]" +
       " [-refreshSuperUserGroupsConfiguration]" +
       " [-refreshUserToGroupsMappings]" +
       " [-refreshAdminAcls]" +
@@ -230,7 +241,8 @@ public class RMAdminCLI extends HAAdmin {
                   + "label2(exclusive=false),label3\">]" +
       " [-removeFromClusterNodeLabels <label1,label2,label3>]" +
       " [-replaceLabelsOnNode <\"node1[:port]=label1,label2 node2[:port]=label1\">]" +
-      " [-directlyAccessNodeLabelStore]]");
+      " [-directlyAccessNodeLabelStore]]" +
+      " [-updateNodeResource [NodeID] [MemSize] [vCores] ([OvercommitTimeout])");
     if (isHAEnabled) {
       appendHAUsage(summary);
     }
@@ -348,6 +360,15 @@ public class RMAdminCLI extends HAAdmin {
     return 0;
   }
 
+  private int refreshNodesResources() throws IOException, YarnException {
+    // Refresh the resources at the Nodemanager
+    ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
+    RefreshNodesResourcesRequest request =
+    recordFactory.newRecordInstance(RefreshNodesResourcesRequest.class);
+    adminProtocol.refreshNodesResources(request);
+    return 0;
+  }
+
   private int refreshUserToGroupsMappings() throws IOException,
       YarnException {
     // Refresh the user-to-groups mappings
@@ -395,6 +416,22 @@ public class RMAdminCLI extends HAAdmin {
     return 0;
   }
   
+  private int updateNodeResource(String nodeIdStr, int memSize,
+      int cores, int overCommitTimeout) throws IOException, YarnException {
+    // Refresh the nodes
+    ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
+    UpdateNodeResourceRequest request =
+      recordFactory.newRecordInstance(UpdateNodeResourceRequest.class);
+    NodeId nodeId = ConverterUtils.toNodeId(nodeIdStr);
+    Resource resource = Resources.createResource(memSize, cores);
+    Map<NodeId, ResourceOption> resourceMap =
+        new HashMap<NodeId, ResourceOption>();
+    resourceMap.put(
+        nodeId, ResourceOption.newInstance(resource, overCommitTimeout));
+    adminProtocol.updateNodeResource(request);
+    return 0;
+  }
+
   private int getGroups(String[] usernames) throws IOException {
     // Get groups users belongs to
     ResourceManagerAdministrationProtocol adminProtocol = createAdminProtocol();
@@ -653,6 +690,7 @@ public class RMAdminCLI extends HAAdmin {
     // verify that we have enough command line parameters
     //
     if ("-refreshAdminAcls".equals(cmd) || "-refreshQueues".equals(cmd) ||
+        "-refreshNodesResources".equals(cmd) ||
         "-refreshServiceAcl".equals(cmd) ||
         "-refreshUserToGroupsMappings".equals(cmd) ||
         "-refreshSuperUserGroupsConfiguration".equals(cmd)) {
@@ -681,6 +719,8 @@ public class RMAdminCLI extends HAAdmin {
           printUsage(cmd, isHAEnabled);
           return -1;
         }
+      } else if ("-refreshNodesResources".equals(cmd)) {
+        exitCode = refreshNodesResources();
       } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
         exitCode = refreshUserToGroupsMappings();
       } else if ("-refreshSuperUserGroupsConfiguration".equals(cmd)) {
@@ -694,6 +734,23 @@ public class RMAdminCLI extends HAAdmin {
       } else if ("-getGroups".equals(cmd)) {
         String[] usernames = Arrays.copyOfRange(args, i, args.length);
         exitCode = getGroups(usernames);
+      } else if ("-updateNodeResource".equals(cmd)) {
+        if (args.length < 4 || args.length > 5) {
+          System.err.println("Number of parameters specified for " +
+              "updateNodeResource is wrong.");
+          printUsage(cmd, isHAEnabled);
+          exitCode = -1;
+        } else {
+          String nodeID = args[i++];
+          String memSize = args[i++];
+          String cores = args[i++];
+          int overCommitTimeout = ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT;
+          if (i == args.length - 1) {
+            overCommitTimeout = Integer.parseInt(args[i]);
+          }
+          exitCode = updateNodeResource(nodeID, Integer.parseInt(memSize),
+              Integer.parseInt(cores), overCommitTimeout);
+        }
       } else if ("-addToClusterNodeLabels".equals(cmd)) {
         if (i >= args.length) {
           System.err.println(NO_LABEL_ERR_MSG);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index a52225b..74ed848 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -204,7 +204,6 @@ public class TestRMAdminCLI {
     when(admin.checkForDecommissioningNodes(any(
         CheckForDecommissioningNodesRequest.class))).thenReturn(response);
     assertEquals(0, rmAdminCLI.run(args));
-//    verify(admin).refreshNodes(any(RefreshNodesRequest.class));
     verify(admin).refreshNodes(
         RefreshNodesRequest.newInstance(DecommissionType.GRACEFUL));
 
@@ -343,12 +342,17 @@ public class TestRMAdminCLI {
       assertTrue(dataOut
           .toString()
           .contains(
-              "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper" +
-              "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " +
-              "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" +
-              " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true),label2(exclusive=false),label3\">]" +
-              " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode " +
-              "<\"node1[:port]=label1,label2 node2[:port]=label1\">] [-directlyAccessNodeLabelStore]] " +
+              "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in " +
+              "seconds]]] [-refreshNodesResources] [-refreshSuperUserGroups" +
+              "Configuration] [-refreshUserToGroupsMappings] " +
+              "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup " +
+              "[username]] [-addToClusterNodeLabels " +
+              "<\"label1(exclusive=true),label2(exclusive=false),label3\">] " +
+              "[-removeFromClusterNodeLabels <label1,label2,label3>] " +
+              "[-replaceLabelsOnNode " +
+              "<\"node1[:port]=label1,label2 node2[:port]=label1\">] " +
+              "[-directlyAccessNodeLabelStore]] [-updateNodeResource " +
+              "[NodeID] [MemSize] [vCores] ([OvercommitTimeout]) " +
               "[-help [cmd]]"));
       assertTrue(dataOut
           .toString()
@@ -360,6 +364,11 @@ public class TestRMAdminCLI {
           .contains(
               "-refreshNodes [-g [timeout in seconds]]: Refresh the hosts information at the " +
               "ResourceManager."));
+      assertTrue(dataOut
+          .toString()
+          .contains(
+              "-refreshNodesResources: Refresh resources of NodeManagers at the " +
+              "ResourceManager."));
       assertTrue(dataOut.toString().contains(
           "-refreshUserToGroupsMappings: Refresh user-to-groups mappings"));
       assertTrue(dataOut
@@ -387,6 +396,8 @@ public class TestRMAdminCLI {
           "Usage: yarn rmadmin [-refreshQueues]", dataErr, 0);
       testError(new String[] { "-help", "-refreshNodes" },
           "Usage: yarn rmadmin [-refreshNodes [-g [timeout in seconds]]]", dataErr, 0);
+      testError(new String[] { "-help", "-refreshNodesResources" },
+          "Usage: yarn rmadmin [-refreshNodesResources]", dataErr, 0);
       testError(new String[] { "-help", "-refreshUserToGroupsMappings" },
           "Usage: yarn rmadmin [-refreshUserToGroupsMappings]", dataErr, 0);
       testError(
@@ -423,13 +434,15 @@ public class TestRMAdminCLI {
       assertEquals(0, rmAdminCLIWithHAEnabled.run(args));
       oldOutPrintStream.println(dataOut);
       String expectedHelpMsg = 
-          "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] [-refreshSuper"
-              + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] "
+          "yarn rmadmin [-refreshQueues] [-refreshNodes [-g [timeout in seconds]]] "
+              + "[-refreshNodesResources] [-refreshSuperUserGroupsConfiguration] "
+              + "[-refreshUserToGroupsMappings] "
               + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"
               + " [username]] [-addToClusterNodeLabels <\"label1(exclusive=true),"
                   + "label2(exclusive=false),label3\">]"
               + " [-removeFromClusterNodeLabels <label1,label2,label3>] [-replaceLabelsOnNode "
               + "<\"node1[:port]=label1,label2 node2[:port]=label1\">] [-directlyAccessNodeLabelStore]] "
+              + "[-updateNodeResource [NodeID] [MemSize] [vCores] ([OvercommitTimeout]) "
               + "[-transitionToActive [--forceactive] <serviceId>] "
               + "[-transitionToStandby <serviceId>] "
               + "[-getServiceState <serviceId>] [-checkHealth <serviceId>] [-help [cmd]]";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
index 44ef654..077edf3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/client/ResourceManagerAdministrationProtocolPBClientImpl.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Refre
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RemoveFromClusterNodeLabelsRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
 import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsRequest;
@@ -54,6 +55,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRespons
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
@@ -93,6 +96,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOn
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
 
 import com.google.protobuf.ServiceException;
 
@@ -232,6 +237,20 @@ public class ResourceManagerAdministrationProtocolPBClientImpl implements Resour
   }
 
   @Override
+  public RefreshNodesResourcesResponse refreshNodesResources(
+      RefreshNodesResourcesRequest request) throws YarnException, IOException {
+    RefreshNodesResourcesRequestProto requestProto =
+      ((RefreshNodesResourcesRequestPBImpl)request).getProto();
+    try {
+      return new RefreshNodesResourcesResponsePBImpl(
+          proxy.refreshNodesResources(null, requestProto));
+    } catch (ServiceException e) {
+      RPCUtil.unwrapAndThrowException(e);
+      return null;
+    }
+  }
+
+  @Override
   public AddToClusterNodeLabelsResponse addToClusterNodeLabels(
       AddToClusterNodeLabelsRequest request) throws YarnException, IOException {
     AddToClusterNodeLabelsRequestProto requestProto =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
index ec734f3..aafce08 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/impl/pb/service/ResourceManagerAdministrationProtocolPBServiceImpl.java
@@ -48,6 +48,8 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Repla
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.ReplaceLabelsOnNodeResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.UpdateNodeResourceResponseProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
 import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocolPB;
 import org.apache.hadoop.yarn.server.api.protocolrecords.AddToClusterNodeLabelsResponse;
@@ -56,6 +58,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioning
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshClusterMaxPriorityResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
@@ -88,6 +91,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOn
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.UpdateNodeResourceResponsePBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
 
 import com.google.protobuf.RpcController;
 import com.google.protobuf.ServiceException;
@@ -232,6 +237,23 @@ public class ResourceManagerAdministrationProtocolPBServiceImpl implements Resou
   }
 
   @Override
+  public RefreshNodesResourcesResponseProto refreshNodesResources(
+      RpcController controller, RefreshNodesResourcesRequestProto proto)
+          throws ServiceException {
+    RefreshNodesResourcesRequestPBImpl request =
+        new RefreshNodesResourcesRequestPBImpl(proto);
+    try {
+      RefreshNodesResourcesResponse response =
+          real.refreshNodesResources(request);
+      return ((RefreshNodesResourcesResponsePBImpl)response).getProto();
+    } catch (YarnException e) {
+      throw new ServiceException(e);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
   public AddToClusterNodeLabelsResponseProto addToClusterNodeLabels(
       RpcController controller, AddToClusterNodeLabelsRequestProto proto)
       throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesRequestPBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesRequestPBImpl.java
new file mode 100644
index 0000000..5742ebd
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesRequestPBImpl.java
@@ -0,0 +1,72 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
+
+import com.google.protobuf.TextFormat;
+
+@Private
+@Unstable
+public class RefreshNodesResourcesRequestPBImpl extends RefreshNodesResourcesRequest {
+
+  RefreshNodesResourcesRequestProto proto =
+      RefreshNodesResourcesRequestProto.getDefaultInstance();
+  RefreshNodesResourcesRequestProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public RefreshNodesResourcesRequestPBImpl() {
+    builder = RefreshNodesResourcesRequestProto.newBuilder();
+  }
+
+  public RefreshNodesResourcesRequestPBImpl(
+      RefreshNodesResourcesRequestProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public RefreshNodesResourcesRequestProto getProto() {
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesResponsePBImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesResponsePBImpl.java
new file mode 100644
index 0000000..f700539
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResourcesResponsePBImpl.java
@@ -0,0 +1,72 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
+
+import com.google.protobuf.TextFormat;
+
+@Private
+@Unstable
+public class RefreshNodesResourcesResponsePBImpl extends RefreshNodesResourcesResponse {
+
+  RefreshNodesResourcesResponseProto proto =
+      RefreshNodesResourcesResponseProto.getDefaultInstance();
+  RefreshNodesResourcesResponseProto.Builder builder = null;
+  boolean viaProto = false;
+
+  public RefreshNodesResourcesResponsePBImpl() {
+    builder = RefreshNodesResourcesResponseProto.newBuilder();
+  }
+
+  public RefreshNodesResourcesResponsePBImpl(
+      RefreshNodesResourcesResponseProto proto) {
+    this.proto = proto;
+    viaProto = true;
+  }
+
+  public RefreshNodesResourcesResponseProto getProto() {
+    proto = viaProto ? proto : builder.build();
+    viaProto = true;
+    return proto;
+  }
+
+  @Override
+  public int hashCode() {
+    return getProto().hashCode();
+  }
+
+  @Override
+  public boolean equals(Object other) {
+    if (other == null)
+      return false;
+    if (other.getClass().isAssignableFrom(this.getClass())) {
+      return this.getProto().equals(this.getClass().cast(other).getProto());
+    }
+    return false;
+  }
+
+  @Override
+  public String toString() {
+    return TextFormat.shortDebugString(getProto());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
index 8dacd3b..6357c36 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestPBImplRecords.java
@@ -220,6 +220,8 @@ import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.Check
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesRequestProto;
+import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResourcesResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto;
 import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto;
@@ -296,6 +298,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.CheckForDecommi
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshAdminAclsResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesRequestPBImpl;
+import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResourcesResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshNodesResponsePBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesRequestPBImpl;
 import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RefreshQueuesResponsePBImpl;
@@ -1127,6 +1131,18 @@ public class TestPBImplRecords {
   }
 
   @Test
+  public void testRefreshNodesResourcesRequestPBImpl() throws Exception {
+    validatePBImplRecord(RefreshNodesResourcesRequestPBImpl.class,
+        RefreshNodesResourcesRequestProto.class);
+  }
+
+  @Test
+  public void testRefreshNodesResourcesResponsePBImpl() throws Exception {
+    validatePBImplRecord(RefreshNodesResourcesResponsePBImpl.class,
+        RefreshNodesResourcesResponseProto.class);
+  }
+
+  @Test
   public void testRefreshServiceAclsRequestPBImpl() throws Exception {
     validatePBImplRecord(RefreshServiceAclsRequestPBImpl.class,
         RefreshServiceAclsRequestProto.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index a95e22c..ab46419 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -72,6 +72,8 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
@@ -85,6 +87,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeResp
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceResponse;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
@@ -602,6 +605,55 @@ public class AdminService extends CompositeService implements
     return response;
   }
 
+  @Override
+  public RefreshNodesResourcesResponse refreshNodesResources(
+      RefreshNodesResourcesRequest request)
+      throws YarnException, StandbyException {
+    String argName = "refreshNodesResources";
+    UserGroupInformation user = checkAcls(argName);
+    final String msg = "refresh nodes.";
+
+    checkRMStatus(user.getShortUserName(), argName, msg);
+
+    RefreshNodesResourcesResponse response =
+        recordFactory.newRecordInstance(RefreshNodesResourcesResponse.class);
+
+    try {
+      Configuration conf = getConfig();
+      Configuration configuration = new Configuration(conf);
+      DynamicResourceConfiguration newconf;
+
+      InputStream DRInputStream =
+        this.rmContext.getConfigurationProvider()
+        .getConfigurationInputStream(configuration,
+          YarnConfiguration.DR_CONFIGURATION_FILE);
+      if (DRInputStream != null) {
+        configuration.addResource(DRInputStream);
+        newconf = new DynamicResourceConfiguration(configuration, false);
+      } else {
+        newconf = new DynamicResourceConfiguration(configuration, true);
+      }
+
+      if (newconf.getNodes().length == 0) {
+        RMAuditLogger.logSuccess(user.getShortUserName(), argName,
+            "AdminService");
+        return response;
+      } else {
+        Map<NodeId, ResourceOption> nodeResourceMap =
+          newconf.getNodeResourceMap();
+
+        UpdateNodeResourceRequest updateRequest =
+          UpdateNodeResourceRequest.newInstance(nodeResourceMap);
+        updateNodeResource(updateRequest);
+        RMAuditLogger.logSuccess(user.getShortUserName(), argName,
+          "AdminService");
+        return response;
+      }
+    } catch (IOException ioe) {
+      throw logAndWrapException(ioe, user.getShortUserName(), argName, msg);
+    }
+  }
+
   private synchronized Configuration getConfiguration(Configuration conf,
       String... confFileNames) throws YarnException, IOException {
     for (String confFileName : confFileNames) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
new file mode 100644
index 0000000..dd37801
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/DynamicResourceConfiguration.java
@@ -0,0 +1,149 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.yarn.server.resourcemanager.resource;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceOption;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
+
+public class DynamicResourceConfiguration extends Configuration {
+
+  private static final Log LOG =
+    LogFactory.getLog(DynamicResourceConfiguration.class);
+
+  private static final String DR_CONFIGURATION_FILE = "dynamic-resources.xml";
+
+  @Private
+  public static final String PREFIX = "yarn.resource.dynamic.";
+
+  @Private
+  public static final String DOT = ".";
+
+  @Private
+  public static final String NODES = "nodes";
+
+  @Private
+  public static final String VCORES = "vcores";
+
+  @Private
+  public static final String MEMORY = "memory";
+
+  @Private
+  public static final String OVERCOMMIT_TIMEOUT = "overcommittimeout";
+
+  public DynamicResourceConfiguration() {
+    this(new Configuration());
+  }
+
+  public DynamicResourceConfiguration(Configuration configuration) {
+    this(configuration, true);
+  }
+
+  public DynamicResourceConfiguration(Configuration configuration,
+      boolean useLocalConfigurationProvider) {
+    super(configuration);
+    if (useLocalConfigurationProvider) {
+      addResource(DR_CONFIGURATION_FILE);
+    }
+  }
+
+  private String getNodePrefix(String node) {
+    String nodeName = PREFIX + node + DOT;
+    return nodeName;
+  }
+
+  public int getVcoresPerNode(String node) {
+    int vcoresPerNode =
+      getInt(getNodePrefix(node) + VCORES,
+        YarnConfiguration.DEFAULT_NM_VCORES);
+    return vcoresPerNode;
+  }
+
+  public void setVcoresPerNode(String node, int vcores) {
+    setInt(getNodePrefix(node) + VCORES, vcores);
+    LOG.debug("DRConf - setVcoresPerNode: nodePrefix=" + getNodePrefix(node) +
+      ", vcores=" + vcores);
+  }
+
+  public int getMemoryPerNode(String node) {
+    int memoryPerNode =
+      getInt(getNodePrefix(node) + MEMORY,
+        YarnConfiguration.DEFAULT_NM_PMEM_MB);
+    return memoryPerNode;
+  }
+
+  public void setMemoryPerNode(String node, int memory) {
+    setInt(getNodePrefix(node) + MEMORY, memory);
+    LOG.debug("DRConf - setMemoryPerNode: nodePrefix=" + getNodePrefix(node) +
+      ", memory=" + memory);
+  }
+
+  public int getOverCommitTimeoutPerNode(String node) {
+    int overCommitTimeoutPerNode =
+      getInt(getNodePrefix(node) + OVERCOMMIT_TIMEOUT,
+      ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT);
+    return overCommitTimeoutPerNode;
+  }
+
+  public void setOverCommitTimeoutPerNode(String node, int overCommitTimeout) {
+    setInt(getNodePrefix(node) + OVERCOMMIT_TIMEOUT, overCommitTimeout);
+    LOG.debug("DRConf - setOverCommitTimeoutPerNode: nodePrefix=" +
+      getNodePrefix(node) +
+        ", overCommitTimeout=" + overCommitTimeout);
+  }
+
+  public String[] getNodes() {
+    String[] nodes = getStrings(PREFIX + NODES);
+    return nodes;
+  }
+
+  public void setNodes(String[] nodes) {
+    set(PREFIX + NODES, StringUtils.arrayToString(nodes));
+  }
+
+  public Map<NodeId, ResourceOption> getNodeResourceMap() {
+    String[] nodes = getNodes();
+    Map<NodeId, ResourceOption> resourceOptions
+      = new HashMap<NodeId, ResourceOption> ();
+
+    for (String node : nodes) {
+      NodeId nid = ConverterUtils.toNodeId(node);
+      int vcores = getVcoresPerNode(node);
+      int memory = getMemoryPerNode(node);
+      int overCommitTimeout = getOverCommitTimeoutPerNode(node);
+      Resource resource = Resources.createResource(memory, vcores);
+      ResourceOption resourceOption =
+          ResourceOption.newInstance(resource, overCommitTimeout);
+      resourceOptions.put(nid, resourceOption);
+    }
+
+    return resourceOptions;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index 00cd3b6..6bb0971 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -37,9 +37,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
  */
 public interface RMNode {
 
-  /** negative value means no timeout */
-  public static final int OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT = -1;
-  
   /**
    * the node id of of this node.
    * @return the node id of this node.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
index f62559a..7e75cfa 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java
@@ -58,11 +58,17 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
+import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResourcesRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.RemoveFromClusterNodeLabelsRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.ReplaceLabelsOnNodeRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
+import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
+import org.apache.hadoop.yarn.api.records.NodeId;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -82,6 +88,8 @@ public class TestRMAdminService {
   static {
     YarnConfiguration.addDefaultResource(
         YarnConfiguration.CS_CONFIGURATION_FILE);
+    YarnConfiguration.addDefaultResource(
+        YarnConfiguration.DR_CONFIGURATION_FILE);
   }
 
   @Before
@@ -169,6 +177,44 @@ public class TestRMAdminService {
   }
 
   @Test
+  public void testAdminRefreshNodesResourcesWithFileSystemBasedConfigurationProvider()
+      throws IOException, YarnException {
+    configuration.set(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+        "org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider");
+
+    //upload default configurations
+    uploadDefaultConfiguration();
+
+    try {
+      rm = new MockRM(configuration);
+      rm.init(configuration);
+      rm.start();
+      rm.registerNode("h1:1234", 5120);
+    } catch(Exception ex) {
+      fail("Should not get any exceptions");
+    }
+
+    NodeId nid = ConverterUtils.toNodeId("h1:1234");
+    RMNode ni = rm.getRMContext().getRMNodes().get(nid);
+    Resource resource = ni.getTotalCapability();
+    Assert.assertEquals("<memory:5120, vCores:5>", resource.toString());
+
+    DynamicResourceConfiguration drConf =
+        new DynamicResourceConfiguration();
+    drConf.set("yarn.resource.dynamic.nodes", "h1:1234");
+    drConf.set("yarn.resource.dynamic.h1:1234.vcores", "4");
+    drConf.set("yarn.resource.dynamic.h1:1234.memory", "4096");
+    uploadConfiguration(drConf, "dynamic-resources.xml");
+
+    rm.adminService.refreshNodesResources(
+        RefreshNodesResourcesRequest.newInstance());
+
+    RMNode niAfter = rm.getRMContext().getRMNodes().get(nid);
+    Resource resourceAfter = niAfter.getTotalCapability();
+    Assert.assertEquals("<memory:4096, vCores:4>", resourceAfter.toString());
+  }
+
+  @Test
   public void testAdminAclsWithLocalConfigurationProvider() {
     rm = new MockRM(configuration);
     rm.init(configuration);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
index 4964c59..61c6166 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java
@@ -659,7 +659,7 @@ public class TestRMNodeTransitions {
     assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
     node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
         ResourceOption.newInstance(Resource.newInstance(2048, 2), 
-            RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
+            ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
     Resource newCapacity = node.getTotalCapability();
     assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
     assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
@@ -678,7 +678,7 @@ public class TestRMNodeTransitions {
     assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
     node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
         ResourceOption.newInstance(Resource.newInstance(2048, 2), 
-            RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
+            ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
     Resource newCapacity = node.getTotalCapability();
     assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
     assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);
@@ -694,7 +694,7 @@ public class TestRMNodeTransitions {
     assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4);
     node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(),
         ResourceOption.newInstance(Resource.newInstance(2048, 2), 
-            RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
+            ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT)));
     Resource newCapacity = node.getTotalCapability();
     assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048);
     assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73e3a49e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
index 6607211..5b5c5ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
@@ -327,7 +327,7 @@ public class TestFifoScheduler {
     
     NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new 
         NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance(
-            newResource, RMNode.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
+            newResource, ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
     scheduler.handle(node0ResourceUpdate);
     
     // SchedulerNode's total resource and available resource are changed.


[50/50] [abbrv] hadoop git commit: HADOOP-11630 Allow HDFS to bind to ipv6 conditionally

Posted by ec...@apache.org.
HADOOP-11630 Allow HDFS to bind to ipv6 conditionally

Signed-off-by: Elliott Clark <el...@fb.com>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3d952b4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3d952b4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3d952b4

Branch: refs/heads/HADOOP-11890
Commit: a3d952b4d27016d079e264e211169b22c1c59693
Parents: ce69c9b
Author: Elliott Clark <el...@fb.com>
Authored: Tue Feb 24 13:08:28 2015 -0800
Committer: Elliott Clark <el...@fb.com>
Committed: Tue Sep 15 12:09:53 2015 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/bin/hadoop-functions.sh             | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3d952b4/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index b9b7919..208edae 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -269,7 +269,11 @@ function hadoop_bootstrap
   export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
   # defaults
-  export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
+  if [[ "${HADOOP_ALLOW_IPV6}" -ne "yes" ]]; then
+    export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
+  else
+    export HADOOP_OPTS=${HADOOP_OPTS:-""}
+  fi
   hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
 }
 


[06/50] [abbrv] hadoop git commit: HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%. Contributed by Brahma Reddy Battula.

Posted by ec...@apache.org.
HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0113e452
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0113e452
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0113e452

Branch: refs/heads/HADOOP-11890
Commit: 0113e4528deda7563b62a29745fbf209ab31b81a
Parents: 94cf7ab
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Sep 9 17:20:46 2015 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Wed Sep 9 17:20:46 2015 +0900

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/protocol/DatanodeInfo.java    |  6 ++++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt              |  3 +++
 .../apache/hadoop/hdfs/server/namenode/TestMetaSave.java | 11 +++++++++++
 3 files changed, 18 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0113e452/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
index 3555add..2ef40d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
@@ -371,9 +371,11 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long c = getCapacity();
     long r = getRemaining();
     long u = getDfsUsed();
+    float usedPercent = getDfsUsedPercent();
     long cc = getCacheCapacity();
     long cr = getCacheRemaining();
     long cu = getCacheUsed();
+    float cacheUsedPercent = getCacheUsedPercent();
     buffer.append(getName());
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
@@ -387,11 +389,11 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
     buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
-    buffer.append(" " + percent2String(u/(double)c));
+    buffer.append(" " + percent2String(usedPercent));
     buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
     buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
     buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
-    buffer.append(" " + percent2String(cu/(double)cc));
+    buffer.append(" " + percent2String(cacheUsedPercent));
     buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0113e452/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 14666dd..a07fca2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1358,6 +1358,9 @@ Release 2.7.2 - UNRELEASED
     HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
     (Kihwal Lee via yliu)
 
+    HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%.
+    (Brahma Reddy Battula via aajisaka)
+
 Release 2.7.1 - 2015-07-06
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0113e452/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
index f1d3104..f818987 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -149,6 +150,16 @@ public class TestMetaSave {
       assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
       line = reader.readLine();
       assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
+      line = reader.readLine();
+      assertTrue(line.equals("Metasave: Blocks 2 waiting deletion from 1 datanodes."));
+     //skip 2 lines to reach HDFS-9033 scenario.
+      line = reader.readLine();
+      line = reader.readLine();
+      line = reader.readLine();
+      assertTrue(line.equals("Metasave: Number of datanodes: 2"));
+      line = reader.readLine();
+      assertFalse(line.contains("NaN"));
+
     } finally {
       if (reader != null)
         reader.close();


[37/50] [abbrv] hadoop git commit: HDFS-9069. TestNameNodeMetricsLogger failing -port in use. (stevel)

Posted by ec...@apache.org.
HDFS-9069. TestNameNodeMetricsLogger failing -port in use. (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69557712
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69557712
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69557712

Branch: refs/heads/HADOOP-11890
Commit: 6955771261ad0047056eb7c2505ba9915fce60d3
Parents: e1b1d7e
Author: Steve Loughran <st...@apache.org>
Authored: Mon Sep 14 10:36:34 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Sep 14 10:36:44 2015 +0100

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                     | 3 +++
 .../hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java  | 5 +++--
 2 files changed, 6 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69557712/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b1ba39b..cb0fae9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1322,6 +1322,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-9041. Move entries in META-INF/services/o.a.h.fs.FileSystem to
     hdfs-client. (Mingliang Liu via wheat9)
 
+    HDFS-9069. TestNameNodeMetricsLogger failing -port in use.
+    (stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69557712/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index c8dc6ec..6968bc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AppenderSkeleton;
@@ -118,8 +119,8 @@ public class TestNameNodeMetricsLogger {
     return new TestNameNode(conf);
   }
 
-  private int getRandomPort() {
-    return 10000 + random.nextInt(50000);
+  private int getRandomPort() throws IOException {
+    return ServerSocketUtil.getPort(0, 10);
   }
 
   private void addAppender(Log log, Appender appender) {


[31/50] [abbrv] hadoop git commit: HADOOP-12407. Test failing: hadoop.ipc.TestSaslRPC. (stevel)

Posted by ec...@apache.org.
HADOOP-12407. Test failing: hadoop.ipc.TestSaslRPC. (stevel)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f685cd5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f685cd5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f685cd5

Branch: refs/heads/HADOOP-11890
Commit: 3f685cd5714b1dba44ed33f40683c7ea4895790d
Parents: 4992f07
Author: Steve Loughran <st...@apache.org>
Authored: Sat Sep 12 18:55:42 2015 +0100
Committer: Steve Loughran <st...@apache.org>
Committed: Sat Sep 12 18:56:42 2015 +0100

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt             | 2 ++
 .../src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java    | 9 ++++++++-
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f685cd5/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index fffd561..db671ae 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1115,6 +1115,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12388. Fix components' version information in the web page
     'About the Cluster'. (Jun Gong via zxu)
 
+    HADOOP-12407. Test failing: hadoop.ipc.TestSaslRPC. (stevel)
+
 Release 2.7.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f685cd5/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
index f6ab380..754b811 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
@@ -558,9 +558,16 @@ public class TestSaslRPC {
       e = se;
     }
     assertNotNull(e);
-    assertEquals("PLAIN auth failed: wrong password", e.getMessage());
+    String message = e.getMessage();
+    assertContains("PLAIN auth failed", message);
+    assertContains("wrong password", message);
   }
 
+  private void assertContains(String expected, String text) {
+    assertNotNull("null text", text );
+    assertTrue("No {" + expected + "} in {" + text + "}",
+        text.contains(expected));
+  }
 
   private void runNegotiation(CallbackHandler clientCbh,
                               CallbackHandler serverCbh)