You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2017/06/08 20:05:43 UTC

[01/50] [abbrv] hadoop git commit: HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 058129bca -> 2ec6464a1


HADOOP-14458. Add missing imports to TestAliyunOSSFileSystemContract.java. Contributed by Mingliang Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d86fbcf0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d86fbcf0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d86fbcf0

Branch: refs/heads/HDFS-7240
Commit: d86fbcf0840dc1e29b2d678149bdfb5cc61ba85d
Parents: f006cf9
Author: Akira Ajisaka <aa...@apache.org>
Authored: Tue May 30 15:11:10 2017 +0900
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:48 2017 -0700

----------------------------------------------------------------------
 .../fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d86fbcf0/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 419ddee..321e958 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -20,17 +20,22 @@ package org.apache.hadoop.fs.aliyun.oss;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
+
 import org.junit.Before;
 import org.junit.Test;
 
-import static org.junit.Assume.*;
-import org.apache.hadoop.fs.FileStatus;
-
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeNotNull;
+import static org.junit.Assume.assumeTrue;
+
 /**
  * Tests a live Aliyun OSS system.
  *


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/50] [abbrv] hadoop git commit: HADOOP-14466. Remove useless document from TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.

Posted by xy...@apache.org.
HADOOP-14466. Remove useless document from TestAliyunOSSFileSystemContract.java. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0618f490
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0618f490
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0618f490

Branch: refs/heads/HDFS-7240
Commit: 0618f490ddadbf50bdd4532747df775105d2385e
Parents: 83b97f8
Author: Akira Ajisaka <aa...@apache.org>
Authored: Thu Jun 1 13:08:01 2017 +0900
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java   | 5 -----
 1 file changed, 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0618f490/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
index 321e958..46ab339 100644
--- a/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
+++ b/hadoop-tools/hadoop-aliyun/src/test/java/org/apache/hadoop/fs/aliyun/oss/TestAliyunOSSFileSystemContract.java
@@ -38,11 +38,6 @@ import static org.junit.Assume.assumeTrue;
 
 /**
  * Tests a live Aliyun OSS system.
- *
- * This uses BlockJUnit4ClassRunner because FileSystemContractBaseTest from
- * TestCase which uses the old Junit3 runner that doesn't ignore assumptions
- * properly making it impossible to skip the tests if we don't have a valid
- * bucket.
  */
 public class TestAliyunOSSFileSystemContract
     extends FileSystemContractBaseTest {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/50] [abbrv] hadoop git commit: YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.

Posted by xy...@apache.org.
YARN-6649. RollingLevelDBTimelineServer throws RuntimeException if object decoding ever fails runtime exception. Contributed by Jon Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/177c0c15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/177c0c15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/177c0c15

Branch: refs/heads/HDFS-7240
Commit: 177c0c1523ad8b1004070f16807ee225fa577523
Parents: 5a81e70
Author: Nathan Roberts <nr...@apache.org>
Authored: Tue May 30 16:10:33 2017 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../timeline/RollingLevelDBTimelineStore.java   | 29 ++++++++++++++++----
 1 file changed, 23 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/177c0c15/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index 20e0379..d139346 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -473,9 +473,16 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
         }
       } else if (key[prefixlen] == OTHER_INFO_COLUMN[0]) {
         if (otherInfo) {
-          entity.addOtherInfo(
-              parseRemainingKey(key, prefixlen + OTHER_INFO_COLUMN.length),
-              fstConf.asObject(iterator.peekNext().getValue()));
+          Object o = null;
+          String keyStr = parseRemainingKey(key,
+              prefixlen + OTHER_INFO_COLUMN.length);
+          try {
+            o = fstConf.asObject(iterator.peekNext().getValue());
+            entity.addOtherInfo(keyStr, o);
+          } catch (Exception e) {
+            LOG.warn("Error while decoding "
+                + entityId + ":otherInfo:" + keyStr, e);
+          }
         }
       } else if (key[prefixlen] == RELATED_ENTITIES_COLUMN[0]) {
         if (relatedEntities) {
@@ -1338,7 +1345,12 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
       TimelineEvent event = new TimelineEvent();
       event.setTimestamp(ts);
       event.setEventType(tstype);
-      Object o = fstConf.asObject(value);
+      Object o = null;
+      try {
+        o = fstConf.asObject(value);
+      } catch (Exception e) {
+        LOG.warn("Error while decoding " + tstype, e);
+      }
       if (o == null) {
         event.setEventInfo(null);
       } else if (o instanceof Map) {
@@ -1362,8 +1374,13 @@ public class RollingLevelDBTimelineStore extends AbstractService implements
     KeyParser kp = new KeyParser(key, offset);
     String name = kp.getNextString();
     byte[] bytes = kp.getRemainingBytes();
-    Object value = fstConf.asObject(bytes);
-    entity.addPrimaryFilter(name, value);
+    Object value = null;
+    try {
+      value = fstConf.asObject(bytes);
+      entity.addPrimaryFilter(name, value);
+    } catch (Exception e) {
+      LOG.warn("Error while decoding " + name, e);
+    }
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[42/50] [abbrv] hadoop git commit: YARN-6604. Allow metric TTL for Application table to be specified through cmd (Haibo Chen via Varun Saxena)

Posted by xy...@apache.org.
YARN-6604. Allow metric TTL for Application table to be specified through cmd (Haibo Chen via Varun Saxena)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0887355d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0887355d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0887355d

Branch: refs/heads/HDFS-7240
Commit: 0887355d9cd85f909df66a2a73ba7db2768ef54f
Parents: 7d1d496
Author: Varun Saxena <va...@apache.org>
Authored: Wed Jun 7 21:51:07 2017 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../storage/TimelineSchemaCreator.java          | 36 ++++++++++++++------
 1 file changed, 26 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0887355d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index b436eec..b3b749e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -60,9 +60,10 @@ public final class TimelineSchemaCreator {
   final static String NAME = TimelineSchemaCreator.class.getSimpleName();
   private static final Log LOG = LogFactory.getLog(TimelineSchemaCreator.class);
   private static final String SKIP_EXISTING_TABLE_OPTION_SHORT = "s";
+  private static final String APP_METRICS_TTL_OPTION_SHORT = "ma";
   private static final String APP_TABLE_NAME_SHORT = "a";
   private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
-  private static final String TTL_OPTION_SHORT = "m";
+  private static final String ENTITY_METRICS_TTL_OPTION_SHORT = "me";
   private static final String ENTITY_TABLE_NAME_SHORT = "e";
   private static final String HELP_SHORT = "h";
   private static final String CREATE_TABLES_SHORT = "c";
@@ -87,12 +88,12 @@ public final class TimelineSchemaCreator {
       if (StringUtils.isNotBlank(entityTableName)) {
         hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
       }
-      // Grab the TTL argument
-      String entityTableTTLMetrics =commandLine.getOptionValue(
-          TTL_OPTION_SHORT);
-      if (StringUtils.isNotBlank(entityTableTTLMetrics)) {
-        int metricsTTL = Integer.parseInt(entityTableTTLMetrics);
-        new EntityTable().setMetricsTTL(metricsTTL, hbaseConf);
+      // Grab the entity metrics TTL
+      String entityTableMetricsTTL = commandLine.getOptionValue(
+          ENTITY_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(entityTableMetricsTTL)) {
+        int entityMetricsTTL = Integer.parseInt(entityTableMetricsTTL);
+        new EntityTable().setMetricsTTL(entityMetricsTTL, hbaseConf);
       }
       // Grab the appToflowTableName argument
       String appToflowTableName = commandLine.getOptionValue(
@@ -107,6 +108,13 @@ public final class TimelineSchemaCreator {
         hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
             applicationTableName);
       }
+      // Grab the application metrics TTL
+      String applicationTableMetricsTTL = commandLine.getOptionValue(
+          APP_METRICS_TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(applicationTableMetricsTTL)) {
+        int appMetricsTTL = Integer.parseInt(applicationTableMetricsTTL);
+        new ApplicationTable().setMetricsTTL(appMetricsTTL, hbaseConf);
+      }
 
       // create all table schemas in hbase
       final boolean skipExisting = commandLine.hasOption(
@@ -145,9 +153,9 @@ public final class TimelineSchemaCreator {
     o.setRequired(false);
     options.addOption(o);
 
-    o = new Option(TTL_OPTION_SHORT, "metricsTTL", true,
+    o = new Option(ENTITY_METRICS_TTL_OPTION_SHORT, "entityMetricsTTL", true,
         "TTL for metrics column family");
-    o.setArgName("metricsTTL");
+    o.setArgName("entityMetricsTTL");
     o.setRequired(false);
     options.addOption(o);
 
@@ -163,6 +171,12 @@ public final class TimelineSchemaCreator {
     o.setRequired(false);
     options.addOption(o);
 
+    o = new Option(APP_METRICS_TTL_OPTION_SHORT, "applicationMetricsTTL", true,
+        "TTL for metrics column family");
+    o.setArgName("applicationMetricsTTL");
+    o.setRequired(false);
+    options.addOption(o);
+
     // Options without an argument
     // No need to set arg name since we do not need an argument here
     o = new Option(SKIP_EXISTING_TABLE_OPTION_SHORT, "skipExistingTable",
@@ -193,12 +207,14 @@ public final class TimelineSchemaCreator {
     usage.append("The Optional options for creating tables include: \n");
     usage.append("[-entityTableName <Entity Table Name>] " +
         "The name of the Entity table\n");
-    usage.append("[-metricsTTL <Entity Table Metrics TTL>]" +
+    usage.append("[-entityMetricsTTL <Entity Table Metrics TTL>]" +
         " TTL for metrics in the Entity table\n");
     usage.append("[-appToflowTableName <AppToflow Table Name>]" +
         " The name of the AppToFlow table\n");
     usage.append("[-applicationTableName <Application Table Name>]" +
         " The name of the Application table\n");
+    usage.append("[-applicationMetricsTTL <Application Table Metrics TTL>]" +
+        " TTL for metrics in the Application table\n");
     usage.append("[-skipExistingTable] Whether to skip existing" +
         " hbase tables\n");
     System.out.println(usage.toString());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/50] [abbrv] hadoop git commit: HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.

Posted by xy...@apache.org.
HDFS-11901. Modifier 'static' is redundant for inner enums. Contributed by ZhangBing Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c48a7a0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c48a7a0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c48a7a0e

Branch: refs/heads/HDFS-7240
Commit: c48a7a0e9e9d9ea7f1ca3eeda6e402c226e61a00
Parents: 3c3685a
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed May 31 23:09:08 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:48 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java  | 4 ++--
 .../src/main/java/org/apache/hadoop/lib/server/Server.java       | 2 +-
 .../main/java/org/apache/hadoop/lib/server/ServerException.java  | 2 +-
 .../src/test/java/org/apache/hadoop/lib/lang/TestXException.java | 2 +-
 .../src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java      | 2 +-
 .../main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java  | 2 +-
 .../main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java | 2 +-
 .../hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java   | 2 +-
 .../hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java  | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java   | 2 +-
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java    | 2 +-
 .../hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java       | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/Content.java     | 2 +-
 .../hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java   | 2 +-
 .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java    | 2 +-
 .../hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java       | 2 +-
 .../hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java    | 2 +-
 .../hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java     | 2 +-
 .../src/main/java/org/apache/hadoop/hdfs/util/Diff.java          | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java   | 2 +-
 .../hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java   | 2 +-
 23 files changed, 24 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 055a57e..5922958 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -139,7 +139,7 @@ public class HttpFSFileSystem extends FileSystem
 
   public static final String UPLOAD_CONTENT_TYPE= "application/octet-stream";
 
-  public static enum FILE_TYPE {
+  public enum FILE_TYPE {
     FILE, DIRECTORY, SYMLINK;
 
     public static FILE_TYPE getType(FileStatus fileStatus) {
@@ -210,7 +210,7 @@ public class HttpFSFileSystem extends FileSystem
   private static final String HTTP_DELETE = "DELETE";
 
   @InterfaceAudience.Private
-  public static enum Operation {
+  public enum Operation {
     OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
     GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
index 82be027..57f651a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/Server.java
@@ -101,7 +101,7 @@ public class Server {
    * Enumeration that defines the server status.
    */
   @InterfaceAudience.Private
-  public static enum Status {
+  public enum Status {
     UNDEF(false, false),
     BOOTING(false, true),
     HALTED(true, true),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
index e3759ce..22bfa5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ServerException.java
@@ -31,7 +31,7 @@ public class ServerException extends XException {
    * Error codes use by the {@link Server} class.
    */
   @InterfaceAudience.Private
-  public static enum ERROR implements XException.ERROR {
+  public enum ERROR implements XException.ERROR {
     S01("Dir [{0}] does not exist"),
     S02("[{0}] is not a directory"),
     S03("Could not load file from classpath [{0}], {1}"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
index 59d02e3..2869d47 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/lang/TestXException.java
@@ -27,7 +27,7 @@ import org.junit.Test;
 
 public class TestXException extends HTestCase {
 
-  public static enum TestERROR implements XException.ERROR {
+  public enum TestERROR implements XException.ERROR {
     TC;
 
     @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
index 92719db..553ce9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
@@ -100,7 +100,7 @@ public class TestParam {
     test(param, "L", "a long", 1L, 2L, "x", null);
   }
 
-  public static enum ENUM {
+  public enum ENUM {
     FOO, BAR
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
index 9371a72..2617019 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
@@ -72,7 +72,7 @@ class OpenFileCtx {
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
 
-  static enum COMMIT_STATUS {
+  enum COMMIT_STATUS {
     COMMIT_FINISHED,
     COMMIT_WAIT,
     COMMIT_INACTIVE_CTX,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
index f4c32f6..f89679f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java
@@ -44,7 +44,7 @@ class WriteCtx {
    * wait for prerequisite writes. NO_DUMP: sequential write, no need to dump
    * since it will be written to HDFS soon. DUMPED: already dumped to a file.
    */
-  public static enum DataState {
+  public enum DataState {
     ALLOW_DUMP,
     NO_DUMP,
     DUMPED

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
index 1750790..671971d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutVersion.java
@@ -74,7 +74,7 @@ public class LayoutVersion {
    * </li>
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     NAMESPACE_QUOTA(-16, "Support for namespace quotas"),
     FILE_ACCESS_TIME(-17, "Support for access time on files"),
     DISKSPACE_QUOTA(-18, "Support for disk space quotas"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
index 8a097a5..0442588 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
@@ -46,7 +46,7 @@ import com.google.common.annotations.VisibleForTesting;
 public class CorruptReplicasMap{
 
   /** The corruption reason code */
-  public static enum Reason {
+  public enum Reason {
     NONE,                // not specified.
     ANY,                 // wildcard reason
     GENSTAMP_MISMATCH,   // mismatch in generation stamps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index 8af86d3..b1ccea2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -398,7 +398,7 @@ public class DatanodeStorageInfo {
     this.remaining = remaining;
   }
 
-  static enum AddBlockResult {
+  enum AddBlockResult {
     ADDED, REPLACED, ALREADY_EXIST
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index c1cda57..fcf7d5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -96,7 +96,7 @@ class BPServiceActor implements Runnable {
   Thread bpThread;
   DatanodeProtocolClientSideTranslatorPB bpNamenode;
 
-  static enum RunningState {
+  enum RunningState {
     CONNECTING, INIT_FAILED, RUNNING, EXITED, FAILED;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index c5462a9..a0e646d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -1185,7 +1185,7 @@ class BlockReceiver implements Closeable {
     return handler;
   }
 
-  private static enum PacketResponderType {
+  private enum PacketResponderType {
     NON_PIPELINE, LAST_IN_PIPELINE, HAS_DOWNSTREAM_IN_PIPELINE
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
index 609a740..abe7b74 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
@@ -61,7 +61,7 @@ public class DataNodeLayoutVersion {
    * </li>
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     FIRST_LAYOUT(-55, -53, "First datanode layout", false),
     BLOCKID_BASED_LAYOUT(-56,
         "The block ID of a finalized block uniquely determines its position " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index 450ddee..0737824 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -55,7 +55,7 @@ public class BackupImage extends FSImage {
    *   stopApplyingOnNextRoll is true.
    */
   volatile BNState bnState;
-  static enum BNState {
+  enum BNState {
     /**
      * Edits from the NN should be dropped. On the next log roll,
      * transition to JOURNAL_ONLY state

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
index c1caae5..cb5a2f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java
@@ -63,7 +63,7 @@ public enum Content {
   public static class CountsMap
       extends EnumCounters.Map<CountsMap.Key, Content, Counts> {
     /** The key type of the map. */
-    public static enum Key { CURRENT, SNAPSHOT }
+    public enum Key { CURRENT, SNAPSHOT }
 
     CountsMap() {
       super(FACTORY);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
index 8ff15a8..fe58577 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java
@@ -33,7 +33,7 @@ import com.google.common.base.Preconditions;
 @InterfaceAudience.Private
 public abstract class INodeWithAdditionalFields extends INode
     implements LinkedElement {
-  static enum PermissionStatusFormat {
+  enum PermissionStatusFormat {
     MODE(null, 16),
     GROUP(MODE.BITS, 25),
     USER(GROUP.BITS, 23);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 32d268a..2bc3642 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -209,7 +209,7 @@ public class NameNode extends ReconfigurableBase implements
   /**
    * Categories of operations supported by the namenode.
    */
-  public static enum OperationCategory {
+  public enum OperationCategory {
     /** Operations that are state agnostic */
     UNCHECKED,
     /** Read operation that does not change the namespace state */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index 2943fc2..182cc08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -77,7 +77,7 @@ public class NameNodeLayoutVersion {
    * support downgrade.
    * </ul>
    */
-  public static enum Feature implements LayoutFeature {
+  public enum Feature implements LayoutFeature {
     ROLLING_UPGRADE(-55, -53, -55, "Support rolling upgrade", false),
     EDITLOG_LENGTH(-56, -56, "Add length field to every edit log op"),
     XATTRS(-57, -57, "Extended attributes"),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
index 740036a..355f09f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
@@ -28,7 +28,7 @@ public class ReceivedDeletedBlockInfo {
   BlockStatus status;
   String delHints;
 
-  public static enum BlockStatus {
+  public enum BlockStatus {
     RECEIVING_BLOCK(1),
     RECEIVED_BLOCK(2),
     DELETED_BLOCK(3);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
index 1882e58..a6901f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
@@ -61,7 +61,7 @@ class OfflineEditsXmlLoader
   private long nextTxId;
   private final OpInstanceCache opCache = new OpInstanceCache();
   
-  static enum ParseState {
+  enum ParseState {
     EXPECT_EDITS_TAG,
     EXPECT_VERSION,
     EXPECT_RECORD,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
index 54e7103..26d1fb5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java
@@ -73,7 +73,7 @@ import com.google.common.base.Preconditions;
  * @param <E> The element type, which must implement {@link Element} interface.
  */
 public class Diff<K, E extends Diff.Element<K>> {
-  public static enum ListType {
+  public enum ListType {
     CREATED, DELETED
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index 9dff529..e191414 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -1123,7 +1123,7 @@ public class TestFileCreation {
     doCreateTest(CreationMethod.PATH_FROM_URI);
   }
   
-  private static enum CreationMethod {
+  private enum CreationMethod {
     DIRECT_NN_RPC,
     PATH_FROM_URI,
     PATH_FROM_STRING

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c48a7a0e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
index 079038c..38c2b2d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
@@ -82,7 +82,7 @@ public class TestFailureToReadEdits {
   private NameNode nn1;
   private FileSystem fs;
   
-  private static enum TestType {
+  private enum TestType {
     SHARED_DIR_HA,
     QJM_HA;
   };


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[37/50] [abbrv] hadoop git commit: HADOOP-14431. ModifyTime of FileStatus returned by SFTPFileSystem's getFileStatus method is wrong. Contributed by Hongyuan Li.

Posted by xy...@apache.org.
HADOOP-14431. ModifyTime of FileStatus returned by SFTPFileSystem's getFileStatus method is wrong. Contributed by Hongyuan Li.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c06897a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c06897a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c06897a

Branch: refs/heads/HDFS-7240
Commit: 4c06897a3637e60e481b6537e21c6d0d13415d6a
Parents: 23b15b2
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 6 12:31:40 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java | 2 +-
 .../java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java  | 9 +++++++++
 2 files changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c06897a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
index d91d391..6de69fa 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/sftp/SFTPFileSystem.java
@@ -277,7 +277,7 @@ public class SFTPFileSystem extends FileSystem {
     // Using default block size since there is no way in SFTP channel to know of
     // block sizes on server. The assumption could be less than ideal.
     long blockSize = DEFAULT_BLOCK_SIZE;
-    long modTime = attr.getMTime() * 1000; // convert to milliseconds
+    long modTime = attr.getMTime() * 1000L; // convert to milliseconds
     long accessTime = attr.getATime() * 1000L;
     FsPermission permission = getPermissions(sftpFile);
     // not be able to get the real user group name, just use the user and group

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c06897a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
index 9b514e1..3d57dab 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/sftp/TestSFTPFileSystem.java
@@ -319,4 +319,13 @@ public class TestSFTPFileSystem {
     assertEquals(accessTime1, accessTime2);
   }
 
+  @Test
+  public void testGetModifyTime() throws IOException {
+    Path file = touch(localFs, name.getMethodName().toLowerCase() + "1");
+    java.io.File localFile = ((LocalFileSystem) localFs).pathToFile(file);
+    long modifyTime1 = localFile.lastModified();
+    long modifyTime2 = sftpFs.getFileStatus(file).getModificationTime();
+    assertEquals(modifyTime1, modifyTime2);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[39/50] [abbrv] hadoop git commit: YARN-6634. [API] Refactor ResourceManager WebServices to make API explicit. (Giovanni Matteo Fumarola via curino)

Posted by xy...@apache.org.
YARN-6634. [API] Refactor ResourceManager WebServices to make API explicit. (Giovanni Matteo Fumarola via curino)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5c15bca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5c15bca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5c15bca

Branch: refs/heads/HDFS-7240
Commit: a5c15bca30d82196edff185267614ccc4a99cc67
Parents: bea02d2
Author: Carlo Curino <cu...@apache.org>
Authored: Wed Jun 7 13:41:06 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/webapp/RMWSConsts.java      | 202 ++++
 .../webapp/RMWebServiceProtocol.java            | 635 +++++++++++++
 .../resourcemanager/webapp/RMWebServices.java   | 937 +++++++++----------
 3 files changed, 1279 insertions(+), 495 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c15bca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
new file mode 100644
index 0000000..23d4bb1
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java
@@ -0,0 +1,202 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+/**
+ * Constants for {@code RMWebServiceProtocol}.
+ */
+public final class RMWSConsts {
+
+  public static final String EMPTY = "";
+  public static final String ANY = "*";
+
+  public static final String FORWARDED_FOR = "X-Forwarded-For";
+
+  // ----------------Paths for RMWebServiceProtocol----------------
+
+  /** Path for {@code RMWebServiceProtocol}. */
+  public static final String RM_WEB_SERVICE_PATH = "/ws/v1/cluster";
+
+  /** Path for {@code RMWebServiceProtocol#getClusterInfo}. */
+  public static final String INFO = "/info";
+
+  /** Path for {@code RMWebServiceProtocol#getClusterMetricsInfo}. */
+  public static final String METRICS = "/metrics";
+
+  /** Path for {@code RMWebServiceProtocol#getSchedulerInfo}. */
+  public static final String SCHEDULER = "/scheduler";
+
+  /** Path for {@code RMWebServiceProtocol#dumpSchedulerLogs}. */
+  public static final String SCHEDULER_LOGS = "/scheduler/logs";
+
+  /** Path for {@code RMWebServiceProtocol#getNodes}. */
+  public static final String NODES = "/nodes";
+
+  /** Path for {@code RMWebServiceProtocol#getNode}. */
+  public static final String NODES_NODEID = "/nodes/{nodeId}";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#getApps} and
+   * {@code RMWebServiceProtocol#getApp}.
+   */
+  public static final String APPS = "/apps";
+
+  /** Path for {@code RMWebServiceProtocol#getActivities}. */
+  public static final String SCHEDULER_ACTIVITIES = "/scheduler/activities";
+
+  /** Path for {@code RMWebServiceProtocol#getAppActivities}. */
+  public static final String SCHEDULER_APP_ACTIVITIES =
+      "/scheduler/app-activities";
+
+  /** Path for {@code RMWebServiceProtocol#getAppStatistics}. */
+  public static final String APP_STATISTICS = "/appstatistics";
+
+  /** Path for {@code RMWebServiceProtocol#getApp}. */
+  public static final String APPS_APPID = "/apps/{appid}";
+
+  /** Path for {@code RMWebServiceProtocol#getAppAttempts}. */
+  public static final String APPS_APPID_APPATTEMPTS =
+      "/apps/{appid}/appattempts";
+
+  /** Path for {@code WebServices#getAppAttempt}. */
+  public static final String APPS_APPID_APPATTEMPTS_APPATTEMPTID =
+      "/apps/{appid}/appattempts/{appattemptid}";
+
+  /** Path for {@code WebServices#getContainers}. */
+  public static final String APPS_APPID_APPATTEMPTS_APPATTEMPTID_CONTAINERS =
+      "/apps/{appid}/appattempts/{appattemptid}/containers";
+
+  /** Path for {@code RMWebServiceProtocol#getNodeToLabels}. */
+  public static final String GET_NODE_TO_LABELS = "/get-node-to-labels";
+
+  /** Path for {@code RMWebServiceProtocol#getLabelsToNodes}. */
+  public static final String LABEL_MAPPINGS = "/label-mappings";
+
+  /** Path for {@code RMWebServiceProtocol#replaceLabelsOnNodes}. */
+  public static final String REPLACE_NODE_TO_LABELS = "/replace-node-to-labels";
+
+  /** Path for {@code RMWebServiceProtocol#replaceLabelsOnNode}. */
+  public static final String NODES_NODEID_REPLACE_LABELS =
+      "/nodes/{nodeId}/replace-labels";
+
+  /** Path for {@code RMWebServiceProtocol#getClusterNodeLabels}. */
+  public static final String GET_NODE_LABELS = "/get-node-labels";
+
+  /** Path for {@code RMWebServiceProtocol#addToClusterNodeLabels}. */
+  public static final String ADD_NODE_LABELS = "/add-node-labels";
+
+  /** Path for {@code RMWebServiceProtocol#removeFromCluserNodeLabels}. */
+  public static final String REMOVE_NODE_LABELS = "/remove-node-labels";
+
+  /** Path for {@code RMWebServiceProtocol#getLabelsOnNode}. */
+  public static final String NODES_NODEID_GETLABELS =
+      "/nodes/{nodeId}/get-labels";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#getAppPriority} and
+   * {@code RMWebServiceProtocol#updateApplicationPriority}.
+   */
+  public static final String APPS_APPID_PRIORITY = "/apps/{appid}/priority";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#getAppQueue} and
+   * {@code RMWebServiceProtocol#updateAppQueue}.
+   */
+  public static final String APPS_APPID_QUEUE = "/apps/{appid}/queue";
+
+  /** Path for {@code RMWebServiceProtocol#createNewApplication}. */
+  public static final String APPS_NEW_APPLICATION = "/apps/new-application";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#getAppState} and
+   * {@code RMWebServiceProtocol#updateAppState}.
+   */
+  public static final String APPS_APPID_STATE = "/apps/{appid}/state";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#postDelegationToken} and
+   * {@code RMWebServiceProtocol#cancelDelegationToken}.
+   */
+  public static final String DELEGATION_TOKEN = "/delegation-token";
+
+  /** Path for {@code RMWebServiceProtocol#postDelegationTokenExpiration}. */
+  public static final String DELEGATION_TOKEN_EXPIRATION =
+      "/delegation-token/expiration";
+
+  /** Path for {@code RMWebServiceProtocol#createNewReservation}. */
+  public static final String RESERVATION_NEW = "/reservation/new-reservation";
+
+  /** Path for {@code RMWebServiceProtocol#submitReservation}. */
+  public static final String RESERVATION_SUBMIT = "/reservation/submit";
+
+  /** Path for {@code RMWebServiceProtocol#updateReservation}. */
+  public static final String RESERVATION_UPDATE = "/reservation/update";
+
+  /** Path for {@code RMWebServiceProtocol#deleteReservation}. */
+  public static final String RESERVATION_DELETE = "/reservation/delete";
+
+  /** Path for {@code RMWebServiceProtocol#listReservation}. */
+  public static final String RESERVATION_LIST = "/reservation/list";
+
+  /** Path for {@code RMWebServiceProtocol#getAppTimeout}. */
+  public static final String APPS_TIMEOUTS_TYPE =
+      "/apps/{appid}/timeouts/{type}";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#getAppTimeouts}.
+   */
+  public static final String APPS_TIMEOUTS = "/apps/{appid}/timeouts";
+
+  /**
+   * Path for {@code RMWebServiceProtocol#updateApplicationTimeout}.
+   */
+  public static final String APPS_TIMEOUT = "/apps/{appid}/timeout";
+
+  // ----------------QueryParams for RMWebServiceProtocol----------------
+
+  public static final String TIME = "time";
+  public static final String STATES = "states";
+  public static final String NODEID = "nodeId";
+  public static final String STATE = "state";
+  public static final String FINAL_STATUS = "finalStatus";
+  public static final String USER = "user";
+  public static final String QUEUE = "queue";
+  public static final String LIMIT = "limit";
+  public static final String STARTED_TIME_BEGIN = "startedTimeBegin";
+  public static final String STARTED_TIME_END = "startedTimeEnd";
+  public static final String FINISHED_TIME_BEGIN = "finishedTimeBegin";
+  public static final String FINISHED_TIME_END = "finishedTimeEnd";
+  public static final String APPLICATION_TYPES = "applicationTypes";
+  public static final String APPLICATION_TAGS = "applicationTags";
+  public static final String APP_ID = "appId";
+  public static final String MAX_TIME = "maxTime";
+  public static final String APPATTEMPTID = "appattemptid";
+  public static final String APPID = "appid";
+  public static final String LABELS = "labels";
+  public static final String RESERVATION_ID = "reservation-id";
+  public static final String START_TIME = "start-time";
+  public static final String END_TIME = "end-time";
+  public static final String INCLUDE_RESOURCE = "include-resource-allocations";
+  public static final String TYPE = "type";
+
+  private RMWSConsts() {
+    // not called
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c15bca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
new file mode 100644
index 0000000..6dd9c41
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java
@@ -0,0 +1,635 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.webapp;
+
+import java.io.IOException;
+import java.util.Set;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.ws.rs.core.Response;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Evolving;
+import org.apache.hadoop.security.authorize.AuthorizationException;
+import org.apache.hadoop.yarn.api.ApplicationBaseProtocol;
+import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
+import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ActivitiesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppActivitiesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppPriority;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppQueue;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppState;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppTimeoutsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.DelegationToken;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsEntryList;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationDeleteRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationSubmissionRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ReservationUpdateRequestInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+
+/**
+ * <p>
+ * The protocol between clients and the <code>ResourceManager</code> to
+ * submit/abort jobs and to get information on applications, cluster metrics,
+ * nodes, queues, ACLs and reservations via REST calls.
+ * </p>
+ *
+ * The WebService is reachable by using {@link RMWSConsts#RM_WEB_SERVICE_PATH}
+ */
+@Private
+@Evolving
+public interface RMWebServiceProtocol {
+
+  /**
+   * This method retrieves the cluster information, and it is reachable by using
+   * {@link RMWSConsts#INFO}.
+   *
+   * @return the cluster information
+   */
+  ClusterInfo get();
+
+  /**
+   * This method retrieves the cluster information, and it is reachable by using
+   * {@link RMWSConsts#INFO}.
+   *
+   * @return the cluster information
+   */
+  ClusterInfo getClusterInfo();
+
+  /**
+   * This method retrieves the cluster metrics information, and it is reachable
+   * by using {@link RMWSConsts#METRICS}.
+   *
+   * @see ApplicationClientProtocol#getClusterMetrics
+   * @return the cluster metrics information
+   */
+  ClusterMetricsInfo getClusterMetricsInfo();
+
+  /**
+   * This method retrieves the current scheduler status, and it is reachable by
+   * using {@link RMWSConsts#SCHEDULER}.
+   *
+   * @return the current scheduler status
+   */
+  SchedulerTypeInfo getSchedulerInfo();
+
+  /**
+   * This method dumps the scheduler logs for the time got in input, and it is
+   * reachable by using {@link RMWSConsts#SCHEDULER_LOGS}.
+   *
+   * @param time the period of time
+   * @param hsr the servlet request
+   * @return the result of the operation
+   * @throws IOException when it cannot create dump log file
+   */
+  String dumpSchedulerLogs(String time, HttpServletRequest hsr)
+      throws IOException;
+
+  /**
+   * This method retrieves all the nodes information in the cluster, and it is
+   * reachable by using {@link RMWSConsts#NODES}.
+   *
+   * @see ApplicationClientProtocol#getClusterNodes
+   * @param states the states we want to filter
+   * @return all nodes in the cluster. If the states param is given, returns all
+   *         nodes that are in the comma-separated list of states
+   */
+  NodesInfo getNodes(String states);
+
+  /**
+   * This method retrieves a specific node information, and it is reachable by
+   * using {@link RMWSConsts#NODES_NODEID}.
+   *
+   * @param nodeId the node we want to retrieve the information
+   * @return the information about the node in input
+   */
+  NodeInfo getNode(String nodeId);
+
+  /**
+   * This method retrieves all the app reports in the cluster, and it is
+   * reachable by using {@link RMWSConsts#APPS}.
+   *
+   * @see ApplicationClientProtocol#getApplications
+   * @param hsr the servlet request
+   * @param stateQuery right now the stateQuery is deprecated
+   * @param statesQuery filter the result by states
+   * @param finalStatusQuery filter the result by final states
+   * @param userQuery filter the result by user
+   * @param queueQuery filter the result by queue
+   * @param count set a limit of the result
+   * @param startedBegin filter the result by started begin time
+   * @param startedEnd filter the result by started end time
+   * @param finishBegin filter the result by finish begin time
+   * @param finishEnd filter the result by finish end time
+   * @param applicationTypes filter the result by types
+   * @param applicationTags filter the result by tags
+   * @return all apps in the cluster
+   */
+  @SuppressWarnings("checkstyle:parameternumber")
+  AppsInfo getApps(HttpServletRequest hsr, String stateQuery,
+      Set<String> statesQuery, String finalStatusQuery, String userQuery,
+      String queueQuery, String count, String startedBegin, String startedEnd,
+      String finishBegin, String finishEnd, Set<String> applicationTypes,
+      Set<String> applicationTags);
+
+  /**
+   * This method retrieve all the activities in a specific node, and it is
+   * reachable by using {@link RMWSConsts#SCHEDULER_ACTIVITIES}.
+   *
+   * @param hsr the servlet request
+   * @param nodeId the node we want to retrieve the activities
+   * @return all the activities in the specific node
+   */
+  ActivitiesInfo getActivities(HttpServletRequest hsr, String nodeId);
+
+  /**
+   * This method retrieves all the activities for a specific app for a specific
+   * period of time, and it is reachable by using
+   * {@link RMWSConsts#SCHEDULER_APP_ACTIVITIES}.
+   *
+   * @param hsr the servlet request
+   * @param appId the applicationId we want to retrieve the activities
+   * @param time for how long we want to retrieve the activities
+   * @return all the activities about a specific app for a specific time
+   */
+  AppActivitiesInfo getAppActivities(HttpServletRequest hsr, String appId,
+      String time);
+
+  /**
+   * This method retrieves all the statistics for a specific app, and it is
+   * reachable by using {@link RMWSConsts#APP_STATISTICS}.
+   *
+   * @param hsr the servlet request
+   * @param stateQueries filter the result by states
+   * @param typeQueries filter the result by type names
+   * @return the application's statistics for specific states and types
+   */
+  ApplicationStatisticsInfo getAppStatistics(HttpServletRequest hsr,
+      Set<String> stateQueries, Set<String> typeQueries);
+
+  /**
+   * This method retrieves the report for a specific app, and it is reachable by
+   * using {@link RMWSConsts#APPS_APPID}.
+   *
+   * @see ApplicationClientProtocol#getApplicationReport
+   * @param hsr the servlet request
+   * @param appId the Id of the application we want the report
+   * @return the app report for a specific application
+   */
+  AppInfo getApp(HttpServletRequest hsr, String appId);
+
+  /**
+   * This method retrieves the state for a specific app, and it is reachable by
+   * using {@link RMWSConsts#APPS_APPID_STATE}.
+   *
+   * @param hsr the servlet request
+   * @param appId the Id of the application we want the state
+   * @return the state for a specific application
+   * @throws AuthorizationException if the user is not authorized
+   */
+  AppState getAppState(HttpServletRequest hsr, String appId)
+      throws AuthorizationException;
+
+  /**
+   * This method updates the state of the app in input, and it is reachable by
+   * using {@link RMWSConsts#APPS_APPID_STATE}.
+   *
+   * @param targetState the target state for the app
+   * @param hsr the servlet request
+   * @param appId the Id of the application we want to update the state
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws YarnException if app does not exist
+   * @throws InterruptedException if interrupted
+   * @throws IOException if doAs action throws an IOException
+   */
+  Response updateAppState(AppState targetState, HttpServletRequest hsr,
+      String appId) throws AuthorizationException, YarnException,
+      InterruptedException, IOException;
+
+  /**
+   * This method retrieves all the node labels with the respective nodes in the
+   * cluster, and it is reachable by using
+   * {@link RMWSConsts#GET_NODE_TO_LABELS}.
+   *
+   * @see ApplicationClientProtocol#getNodeToLabels
+   * @param hsr the servlet request
+   * @return all the nodes within a node label
+   * @throws IOException if an IOException happened
+   */
+  NodeToLabelsInfo getNodeToLabels(HttpServletRequest hsr) throws IOException;
+
+  /**
+   * This method retrieves all the node within multiple node labels in the
+   * cluster, and it is reachable by using {@link RMWSConsts#LABEL_MAPPINGS}.
+   *
+   * @see ApplicationClientProtocol#getLabelsToNodes
+   * @param labels filter the result by node labels
+   * @return all the nodes within multiple node labels
+   * @throws IOException if an IOException happened
+   */
+  LabelsToNodesInfo getLabelsToNodes(Set<String> labels) throws IOException;
+
+  /**
+   * This method replaces all the node labels for specific nodes, and it is
+   * reachable by using {@link RMWSConsts#REPLACE_NODE_TO_LABELS}.
+   *
+   * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode
+   * @param newNodeToLabels the list of new labels
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws Exception if an exception happened
+   */
+  Response replaceLabelsOnNodes(NodeToLabelsEntryList newNodeToLabels,
+      HttpServletRequest hsr) throws Exception;
+
+  /**
+   * This method replaces all the node labels for specific node, and it is
+   * reachable by using {@link RMWSConsts#NODES_NODEID_REPLACE_LABELS}.
+   *
+   * @see ResourceManagerAdministrationProtocol#replaceLabelsOnNode
+   * @param newNodeLabelsName the list of new labels
+   * @param hsr the servlet request
+   * @param nodeId the node we want to replace the node labels
+   * @return Response containing the status code
+   * @throws Exception if an exception happened
+   */
+  Response replaceLabelsOnNode(Set<String> newNodeLabelsName,
+      HttpServletRequest hsr, String nodeId) throws Exception;
+
+  /**
+   * This method retrieves all the node labels in the cluster, and it is
+   * reachable by using {@link RMWSConsts#GET_NODE_LABELS}.
+   *
+   * @see ApplicationClientProtocol#getClusterNodeLabels
+   * @param hsr the servlet request
+   * @return all the node labels in the cluster
+   * @throws IOException if an IOException happened
+   */
+  NodeLabelsInfo getClusterNodeLabels(HttpServletRequest hsr)
+      throws IOException;
+
+  /**
+   * This method adds specific node labels for specific nodes, and it is
+   * reachable by using {@link RMWSConsts#ADD_NODE_LABELS}.
+   *
+   * @see ResourceManagerAdministrationProtocol#addToClusterNodeLabels
+   * @param newNodeLabels the node labels to add
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws Exception in case of bad request
+   */
+  Response addToClusterNodeLabels(NodeLabelsInfo newNodeLabels,
+      HttpServletRequest hsr) throws Exception;
+
+  /**
+   * This method removes all the node labels for specific nodes, and it is
+   * reachable by using {@link RMWSConsts#REMOVE_NODE_LABELS}.
+   *
+   * @see ResourceManagerAdministrationProtocol#removeFromClusterNodeLabels
+   * @param oldNodeLabels the node labels to remove
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws Exception in case of bad request
+   */
+  Response removeFromCluserNodeLabels(Set<String> oldNodeLabels,
+      HttpServletRequest hsr) throws Exception;
+
+  /**
+   * This method retrieves all the node labels for specific node, and it is
+   * reachable by using {@link RMWSConsts#NODES_NODEID_GETLABELS}.
+   *
+   * @param hsr the servlet request
+   * @param nodeId the node we want to get all the node labels
+   * @return all the labels for a specific node.
+   * @throws IOException if an IOException happened
+   */
+  NodeLabelsInfo getLabelsOnNode(HttpServletRequest hsr, String nodeId)
+      throws IOException;
+
+  /**
+   * This method retrieves the priority for a specific app, and it is reachable
+   * by using {@link RMWSConsts#APPS_APPID_PRIORITY}.
+   *
+   * @param hsr the servlet request
+   * @param appId the app we want to get the priority
+   * @return the priority for a specific application
+   * @throws AuthorizationException in case of the user is not authorized
+   */
+  AppPriority getAppPriority(HttpServletRequest hsr, String appId)
+      throws AuthorizationException;
+
+  /**
+   * This method updates the priority for a specific application, and it is
+   * reachable by using {@link RMWSConsts#APPS_APPID_PRIORITY}.
+   *
+   * @param targetPriority the priority we want to set for the app
+   * @param hsr the servlet request
+   * @param appId the application we want to update its priority
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authenticated
+   * @throws YarnException if the target is null
+   * @throws IOException if the update fails.
+   * @throws InterruptedException if interrupted.
+   */
+  Response updateApplicationPriority(AppPriority targetPriority,
+      HttpServletRequest hsr, String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException;
+
+  /**
+   * This method retrieves the queue for a specific app, and it is reachable by
+   * using {@link RMWSConsts#APPS_APPID_QUEUE}.
+   *
+   * @param hsr the servlet request
+   * @param appId the application we want to retrieve its queue
+   * @return the Queue for a specific application.
+   * @throws AuthorizationException if the user is not authenticated
+   */
+  AppQueue getAppQueue(HttpServletRequest hsr, String appId)
+      throws AuthorizationException;
+
+  /**
+   * This method updates the queue for a specific application, and it is
+   * reachable by using {@link RMWSConsts#APPS_APPID_QUEUE}.
+   *
+   * @param targetQueue the queue we want to set
+   * @param hsr the servlet request
+   * @param appId the application we want to change its queue
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authenticated
+   * @throws YarnException if the app is not found
+   * @throws IOException if the update fails.
+   * @throws InterruptedException if interrupted.
+   */
+  Response updateAppQueue(AppQueue targetQueue, HttpServletRequest hsr,
+      String appId) throws AuthorizationException, YarnException,
+      InterruptedException, IOException;
+
+  /**
+   * Generates a new ApplicationId which is then sent to the client. This method
+   * is reachable by using {@link RMWSConsts#APPS_NEW_APPLICATION}.
+   *
+   * @see ApplicationClientProtocol#getNewApplication
+   *
+   * @param hsr the servlet request
+   * @return Response containing the app id and the maximum resource
+   *         capabilities
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws IOException if the creation fails
+   * @throws InterruptedException if interrupted
+   */
+  Response createNewApplication(HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * Function to submit an app to the RM. This method is reachable by using
+   * {@link RMWSConsts#APPS}.
+   *
+   * @see ApplicationClientProtocol#submitApplication
+   *
+   * @param newApp structure containing information to construct the
+   *          ApplicationSubmissionContext
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws IOException if the submission failed
+   * @throws InterruptedException if interrupted
+   */
+  Response submitApplication(ApplicationSubmissionContextInfo newApp,
+      HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * This method posts a delegation token from the client, and it is reachable
+   * by using {@link RMWSConsts#DELEGATION_TOKEN}.
+   *
+   * @see ApplicationBaseProtocol#getDelegationToken
+   * @param tokenData the token to delegate
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if Kerberos auth failed
+   * @throws IOException if the delegation failed
+   * @throws InterruptedException if interrupted
+   * @throws Exception in case of bad request
+   */
+  Response postDelegationToken(DelegationToken tokenData,
+      HttpServletRequest hsr) throws AuthorizationException, IOException,
+      InterruptedException, Exception;
+
+  /**
+   * This method updates the expiration for a delegation token from the client,
+   * and it is reachable by using
+   * {@link RMWSConsts#DELEGATION_TOKEN_EXPIRATION}.
+   *
+   * @see ApplicationBaseProtocol#renewDelegationToken
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if Kerberos auth failed
+   * @throws IOException if the delegation failed
+   * @throws Exception in case of bad request
+   */
+  Response postDelegationTokenExpiration(HttpServletRequest hsr)
+      throws AuthorizationException, IOException, Exception;
+
+  /**
+   * This method cancel the delegation token from the client, and it is
+   * reachable by using {@link RMWSConsts#DELEGATION_TOKEN}.
+   *
+   * @see ApplicationBaseProtocol#cancelDelegationToken
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if Kerberos auth failed
+   * @throws IOException if the delegation failed
+   * @throws InterruptedException if interrupted
+   * @throws Exception in case of bad request
+   */
+  Response cancelDelegationToken(HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException,
+      Exception;
+
+  /**
+   * Generates a new ReservationId which is then sent to the client. This method
+   * is reachable by using {@link RMWSConsts#RESERVATION_NEW}.
+   *
+   * @see ApplicationClientProtocol#getNewReservation
+   *
+   * @param hsr the servlet request
+   * @return Response containing the app id and the maximum resource
+   *         capabilities
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method.
+   * @throws IOException if creation failed
+   * @throws InterruptedException if interrupted
+   */
+  Response createNewReservation(HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * Function to submit a Reservation to the RM.This method is reachable by
+   * using {@link RMWSConsts#RESERVATION_SUBMIT}.
+   *
+   * @see ApplicationClientProtocol#submitReservation
+   *
+   * @param resContext provides information to construct the
+   *          ReservationSubmissionRequest
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws IOException if creation failed
+   * @throws InterruptedException if interrupted
+   */
+  Response submitReservation(ReservationSubmissionRequestInfo resContext,
+      HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * Function to update a Reservation to the RM. This method is reachable by
+   * using {@link RMWSConsts#RESERVATION_UPDATE}.
+   *
+   * @see ApplicationClientProtocol#updateReservation
+   *
+   * @param resContext provides information to construct the
+   *          ReservationUpdateRequest
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws IOException if the operation failed
+   * @throws InterruptedException if interrupted
+   */
+  Response updateReservation(ReservationUpdateRequestInfo resContext,
+      HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * Function to delete a Reservation to the RM. This method is reachable by
+   * using {@link RMWSConsts#RESERVATION_DELETE}.
+   *
+   * @see ApplicationClientProtocol#deleteReservation
+   *
+   * @param resContext provides information to construct the
+   *          ReservationDeleteRequest
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws AuthorizationException when the user group information cannot be
+   *           retrieved.
+   * @throws IOException when a {@link ReservationDeleteRequest} cannot be
+   *           created from the {@link ReservationDeleteRequestInfo}. This
+   *           exception is also thrown on
+   *           {@code ClientRMService.deleteReservation} invokation failure.
+   * @throws InterruptedException if doAs action throws an InterruptedException.
+   */
+  Response deleteReservation(ReservationDeleteRequestInfo resContext,
+      HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException;
+
+  /**
+   * Function to retrieve a list of all the reservations. This method is
+   * reachable by using {@link RMWSConsts#RESERVATION_LIST}.
+   *
+   * @see ApplicationClientProtocol#listReservations
+   * @param queue filter the result by queue
+   * @param reservationId filter the result by reservationId
+   * @param startTime filter the result by start time
+   * @param endTime filter the result by end time
+   * @param includeResourceAllocations true if the resource allocation should be
+   *          in the result, false otherwise
+   * @param hsr the servlet request
+   * @return Response containing the status code
+   * @throws Exception in case of bad request
+   */
+  Response listReservation(String queue, String reservationId, long startTime,
+      long endTime, boolean includeResourceAllocations, HttpServletRequest hsr)
+      throws Exception;
+
+  /**
+   * This method retrieves the timeout information for a specific app with a
+   * specific type, and it is reachable by using
+   * {@link RMWSConsts#APPS_TIMEOUTS_TYPE}.
+   *
+   * @param hsr the servlet request
+   * @param appId the application we want to get the timeout
+   * @param type the type of the timeouts
+   * @return the timeout for a specific application with a specific type.
+   * @throws AuthorizationException if the user is not authorized
+   */
+  AppTimeoutInfo getAppTimeout(HttpServletRequest hsr, String appId,
+      String type) throws AuthorizationException;
+
+  /**
+   * This method retrieves the timeout information for a specific app, and it is
+   * reachable by using {@link RMWSConsts#APPS_TIMEOUTS}.
+   *
+   * @param hsr the servlet request
+   * @param appId the application we want to get the timeouts
+   * @return the timeouts for a specific application
+   * @throws AuthorizationException if the user is not authorized
+   */
+  AppTimeoutsInfo getAppTimeouts(HttpServletRequest hsr, String appId)
+      throws AuthorizationException;
+
+  /**
+   * This method updates the timeout information for a specific app, and it is
+   * reachable by using {@link RMWSConsts#APPS_TIMEOUT}.
+   *
+   * @see ApplicationClientProtocol#updateApplicationTimeouts
+   * @param appTimeout the appTimeoutInfo
+   * @param hsr the servlet request
+   * @param appId the application we want to update
+   * @return Response containing the status code
+   * @throws AuthorizationException if the user is not authorized to invoke this
+   *           method
+   * @throws YarnException in case of bad request
+   * @throws IOException if the operation failed
+   * @throws InterruptedException if interrupted
+   */
+  Response updateApplicationTimeout(AppTimeoutInfo appTimeout,
+      HttpServletRequest hsr, String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException;
+
+  /**
+   * This method retrieves all the attempts information for a specific app, and
+   * it is reachable by using {@link RMWSConsts#APPS_APPID_APPATTEMPTS}.
+   *
+   * @see ApplicationBaseProtocol#getApplicationAttempts
+   * @param hsr the servlet request
+   * @param appId the application we want to get the attempts
+   * @return all the attempts info for a specific application
+   */
+  AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId);
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[34/50] [abbrv] hadoop git commit: HADOOP-14478. Optimize NativeAzureFsInputStream for positional reads. Contributed by Rajesh Balamohan

Posted by xy...@apache.org.
HADOOP-14478. Optimize NativeAzureFsInputStream for positional reads. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2777b1d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2777b1d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2777b1d4

Branch: refs/heads/HDFS-7240
Commit: 2777b1d4565efea85ea25fee3327c1ff53ab72f2
Parents: b55a346
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Jun 5 15:56:43 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../fs/azure/AzureNativeFileSystemStore.java    |  5 +---
 .../hadoop/fs/azure/NativeAzureFileSystem.java  | 31 ++++++++++++++++++--
 2 files changed, 29 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2777b1d4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index a5bb370..534919e 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.azure;
 import static org.apache.hadoop.fs.azure.NativeAzureFileSystem.PATH_DELIMITER;
 
-import java.io.BufferedInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
@@ -2043,11 +2042,9 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
 
         // Get blob reference and open the input buffer stream.
         CloudBlobWrapper blob = getBlobReference(key);
-      BufferedInputStream inBufStream = new BufferedInputStream(
-          openInputStream(blob));
 
         // Return a data input stream.
-        DataInputStream inDataStream = new DataInputStream(inBufStream);
+        DataInputStream inDataStream = new DataInputStream(openInputStream(blob));
         return inDataStream;
     } catch (Exception e) {
       // Re-throw as an Azure storage exception.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2777b1d4/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
index b61baab..b2cc4ea 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java
@@ -816,6 +816,27 @@ public class NativeAzureFileSystem extends FileSystem {
       }
     }
 
+    @Override
+    public synchronized  void readFully(long position, byte[] buffer, int offset, int length)
+        throws IOException {
+      validatePositionedReadArgs(position, buffer, offset, length);
+
+      int nread = 0;
+      while (nread < length) {
+        // In case BlobInputStream is used, mark() can act as a hint to read ahead only this
+        // length instead of 4 MB boundary.
+        in.mark(length - nread);
+        int nbytes = read(position + nread,
+            buffer,
+            offset + nread,
+            length - nread);
+        if (nbytes < 0) {
+          throw new EOFException(FSExceptionMessages.EOF_IN_READ_FULLY);
+        }
+        nread += nbytes;
+      }
+    }
+
     /*
      * Reads up to len bytes of data from the input stream into an array of
      * bytes. An attempt is made to read as many as len bytes, but a smaller
@@ -886,9 +907,13 @@ public class NativeAzureFileSystem extends FileSystem {
         if (pos < 0) {
           throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
         }
-        IOUtils.closeStream(in);
-        in = store.retrieve(key);
-        this.pos = in.skip(pos);
+        if (this.pos > pos) {
+          IOUtils.closeStream(in);
+          in = store.retrieve(key);
+          this.pos = in.skip(pos);
+        } else {
+          this.pos += in.skip(pos - this.pos);
+        }
         LOG.debug("Seek to position {}. Bytes skipped {}", pos,
           this.pos);
       } catch(IOException e) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/50] [abbrv] hadoop git commit: HDFS-11904. Reuse iip in unprotectedRemoveXAttrs calls.

Posted by xy...@apache.org.
HDFS-11904. Reuse iip in unprotectedRemoveXAttrs calls.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/36914a72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/36914a72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/36914a72

Branch: refs/heads/HDFS-7240
Commit: 36914a729122e39629a0c7c6671b44080498cadb
Parents: cb622bc
Author: Xiao Chen <xi...@apache.org>
Authored: Thu Jun 1 14:13:57 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java       | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java    | 5 ++---
 .../org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java | 3 ++-
 3 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/36914a72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index a875e4b..bedbe7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -242,7 +242,7 @@ final class FSDirErasureCodingOp {
 
     final List<XAttr> xattrs = Lists.newArrayListWithCapacity(1);
     xattrs.add(ecXAttr);
-    FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP.getPath(), xattrs);
+    FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs);
     return xattrs;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36914a72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index e5243ee..ddc088c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -170,7 +170,7 @@ class FSDirXAttrOp {
       src = iip.getPath();
       checkXAttrChangeAccess(fsd, iip, xAttr, pc);
 
-      List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, src, xAttrs);
+      List<XAttr> removedXAttrs = unprotectedRemoveXAttrs(fsd, iip, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
         fsd.getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
       } else {
@@ -184,10 +184,9 @@ class FSDirXAttrOp {
   }
 
   static List<XAttr> unprotectedRemoveXAttrs(
-      FSDirectory fsd, final String src, final List<XAttr> toRemove)
+      FSDirectory fsd, final INodesInPath iip, final List<XAttr> toRemove)
       throws IOException {
     assert fsd.hasWriteLock();
-    INodesInPath iip = fsd.getINodesInPath(src, DirOp.WRITE);
     INode inode = FSDirectory.resolveLastINode(iip);
     int snapshotId = iip.getLatestSnapshotId();
     List<XAttr> existingXAttrs = XAttrStorage.readINodeXAttrs(inode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/36914a72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index ae0b304..060bd59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -931,7 +931,8 @@ public class FSEditLogLoader {
     }
     case OP_REMOVE_XATTR: {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
-      FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, removeXAttrOp.src,
+      INodesInPath iip = fsDir.getINodesInPath(removeXAttrOp.src, DirOp.WRITE);
+      FSDirXAttrOp.unprotectedRemoveXAttrs(fsDir, iip,
                                            removeXAttrOp.xAttrs);
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[43/50] [abbrv] hadoop git commit: YARN-6547. Enhance SLS-based tests leveraging invariant checker.

Posted by xy...@apache.org.
YARN-6547. Enhance SLS-based tests leveraging invariant checker.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58bd157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58bd157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58bd157

Branch: refs/heads/HDFS-7240
Commit: c58bd15776814a53ffc550285f1528781b031787
Parents: bbf158d
Author: Carlo Curino <cu...@apache.org>
Authored: Tue Jun 6 17:19:15 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../hadoop/metrics2/source/JvmMetrics.java      | 10 ++++
 hadoop-tools/hadoop-sls/pom.xml                 |  2 +
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  8 +++
 .../hadoop/yarn/sls/BaseSLSRunnerTest.java      | 41 +++++++++++++--
 .../sls/TestReservationSystemInvariants.java    |  4 ++
 .../apache/hadoop/yarn/sls/TestSLSRunner.java   | 17 +++---
 .../src/test/resources/exit-invariants.txt      |  8 +++
 .../src/test/resources/log4j.properties         | 19 +++++++
 .../src/test/resources/ongoing-invariants.txt   | 54 ++++++++++++++++++++
 9 files changed, 152 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
index caba170..c6369cd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java
@@ -27,6 +27,7 @@ import java.lang.management.GarbageCollectorMXBean;
 import java.util.List;
 import java.util.concurrent.ConcurrentHashMap;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.log.metrics.EventCounter;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -59,6 +60,15 @@ public class JvmMetrics implements MetricsSource {
     }
   }
 
+  @VisibleForTesting
+  public synchronized void registerIfNeeded(){
+    // during tests impl might exist, but is not registered
+    MetricsSystem ms = DefaultMetricsSystem.instance();
+    if (ms.getSource("JvmMetrics") == null) {
+      ms.register(JvmMetrics.name(), JvmMetrics.description(), this);
+    }
+  }
+
   static final float M = 1024*1024;
   static public final float MEMORY_MAX_UNLIMITED_MB = -1;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
index d70021c..4089473 100644
--- a/hadoop-tools/hadoop-sls/pom.xml
+++ b/hadoop-tools/hadoop-sls/pom.xml
@@ -135,6 +135,8 @@
             <exclude>src/test/resources/syn.json</exclude>
             <exclude>src/test/resources/inputsls.json</exclude>
             <exclude>src/test/resources/nodes.json</exclude>
+            <exclude>src/test/resources/exit-invariants.txt</exclude>
+            <exclude>src/test/resources/ongoing-invariants.txt</exclude>
           </excludes>
         </configuration>
       </plugin>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index e679c9d..a77d401 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
@@ -243,6 +244,13 @@ public class SLSRunner extends Configured implements Tool {
         return new MockAMLauncher(se, this.rmContext, amMap);
       }
     };
+
+    // Across runs of parametrized tests, the JvmMetrics objects is retained,
+    // but is not registered correctly
+    JvmMetrics jvmMetrics = JvmMetrics.initSingleton("ResourceManager", null);
+    jvmMetrics.registerIfNeeded();
+
+    // Init and start the actual ResourceManager
     rm.init(rmConf);
     rm.start();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
index 8ef72ab..6b369f2 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/BaseSLSRunnerTest.java
@@ -21,8 +21,10 @@ import net.jcip.annotations.NotThreadSafe;
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants.MetricsInvariantChecker;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameter;
@@ -39,7 +41,7 @@ import java.util.UUID;
 @RunWith(value = Parameterized.class)
 @NotThreadSafe
 @SuppressWarnings("VisibilityModifier")
-public class BaseSLSRunnerTest {
+public abstract class BaseSLSRunnerTest {
 
   @Parameter(value = 0)
   public String schedulerType;
@@ -54,6 +56,11 @@ public class BaseSLSRunnerTest {
   public String nodeFile;
 
   protected SLSRunner sls;
+  protected String ongoingInvariantFile;
+  protected String exitInvariantFile;
+
+  @Before
+  public abstract void setup();
 
   @After
   public void tearDown() throws InterruptedException {
@@ -82,22 +89,30 @@ public class BaseSLSRunnerTest {
     switch (traceType) {
     case "OLD_SLS":
       args = new String[] {"-inputsls", traceLocation, "-output",
-          slsOutputDir.getAbsolutePath()};
+          slsOutputDir.getAbsolutePath() };
       break;
     case "OLD_RUMEN":
       args = new String[] {"-inputrumen", traceLocation, "-output",
-          slsOutputDir.getAbsolutePath()};
+          slsOutputDir.getAbsolutePath() };
       break;
     default:
       args = new String[] {"-tracetype", traceType, "-tracelocation",
-          traceLocation, "-output", slsOutputDir.getAbsolutePath()};
+          traceLocation, "-output", slsOutputDir.getAbsolutePath() };
     }
 
     if (nodeFile != null) {
-      args = ArrayUtils.addAll(args, new String[] {"-nodes", nodeFile});
+      args = ArrayUtils.addAll(args, new String[] {"-nodes", nodeFile });
     }
 
+    // enable continuous invariant checks
     conf.set(YarnConfiguration.RM_SCHEDULER, schedulerType);
+    if (ongoingInvariantFile != null) {
+      conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES,
+          MetricsInvariantChecker.class.getCanonicalName());
+      conf.set(MetricsInvariantChecker.INVARIANTS_FILE, ongoingInvariantFile);
+      conf.setBoolean(MetricsInvariantChecker.THROW_ON_VIOLATION, true);
+    }
+
     sls = new SLSRunner(conf);
     sls.run(args);
 
@@ -115,6 +130,22 @@ public class BaseSLSRunnerTest {
       }
       timeout--;
     }
+    shutdownHookInvariantCheck();
+  }
+
+  /**
+   * Checks exit invariants (e.g., number of apps submitted, completed, etc.).
+   */
+  private void shutdownHookInvariantCheck() {
+
+    if(exitInvariantFile!=null) {
+      MetricsInvariantChecker ic = new MetricsInvariantChecker();
+      Configuration conf = new Configuration();
+      conf.set(MetricsInvariantChecker.INVARIANTS_FILE, exitInvariantFile);
+      conf.setBoolean(MetricsInvariantChecker.THROW_ON_VIOLATION, true);
+      ic.init(conf, null, null);
+      ic.editSchedule();
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestReservationSystemInvariants.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestReservationSystemInvariants.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestReservationSystemInvariants.java
index b3a79cb..22e1e2e 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestReservationSystemInvariants.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestReservationSystemInvariants.java
@@ -70,4 +70,8 @@ public class TestReservationSystemInvariants extends BaseSLSRunnerTest {
 
   }
 
+  @Override
+  public void setup() {
+
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
index b2bc8d5..567f0d9 100644
--- a/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/TestSLSRunner.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.yarn.sls;
 
 import net.jcip.annotations.NotThreadSafe;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -38,12 +41,8 @@ public class TestSLSRunner extends BaseSLSRunnerTest {
   @Parameters(name = "Testing with: {1}, {0}, (nodeFile {3})")
   public static Collection<Object[]> data() {
 
-    String capScheduler =
-        "org.apache.hadoop.yarn.server.resourcemanager.scheduler."
-            + "capacity.CapacityScheduler";
-    String fairScheduler =
-        "org.apache.hadoop.yarn.server.resourcemanager.scheduler."
-            + "fair.FairScheduler";
+    String capScheduler = CapacityScheduler.class.getCanonicalName();
+    String fairScheduler = FairScheduler.class.getCanonicalName();
     String slsTraceFile = "src/test/resources/inputsls.json";
     String rumenTraceFile = "src/main/data/2jobs2min-rumen-jh.json";
     String synthTraceFile = "src/test/resources/syn.json";
@@ -73,6 +72,12 @@ public class TestSLSRunner extends BaseSLSRunnerTest {
     });
   }
 
+  @Before
+  public void setup() {
+    ongoingInvariantFile = "src/test/resources/ongoing-invariants.txt";
+    exitInvariantFile = "src/test/resources/exit-invariants.txt";
+  }
+
   @Test(timeout = 60000)
   @SuppressWarnings("all")
   public void testSimulatorRunning() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/resources/exit-invariants.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/exit-invariants.txt b/hadoop-tools/hadoop-sls/src/test/resources/exit-invariants.txt
new file mode 100644
index 0000000..b4a3228
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/exit-invariants.txt
@@ -0,0 +1,8 @@
+ActiveApplications >= 0
+AppsCompleted >= 0
+AppsFailed >= 0
+AppsKilled >= 0
+AppsPending >= 0
+AppsRunning >= 0
+AppsSubmitted >= 0
+PendingContainers >= 0

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/log4j.properties b/hadoop-tools/hadoop-sls/src/test/resources/log4j.properties
new file mode 100644
index 0000000..81a3f6a
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/log4j.properties
@@ -0,0 +1,19 @@
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=info,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58bd157/hadoop-tools/hadoop-sls/src/test/resources/ongoing-invariants.txt
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/test/resources/ongoing-invariants.txt b/hadoop-tools/hadoop-sls/src/test/resources/ongoing-invariants.txt
new file mode 100644
index 0000000..363ed0d
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/test/resources/ongoing-invariants.txt
@@ -0,0 +1,54 @@
+running_0 >= 0
+running_60 >= 0
+running_300 >= 0
+running_1440 >= 0
+AppsSubmitted >= 0
+AppsRunning >= 0
+AppsPending >= 0
+AppsCompleted >= 0
+AppsKilled >= 0
+AppsFailed >= 0
+AllocatedMB >= 0
+AllocatedVCores >= 0
+AllocatedContainers >= 0
+AggregateContainersAllocated >= 0
+AggregateNodeLocalContainersAllocated >= 0
+AggregateRackLocalContainersAllocated >= 0
+AggregateOffSwitchContainersAllocated >= 0
+AggregateContainersReleased >= 0
+AggregateContainersPreempted >= 0
+AvailableMB >= 0
+AvailableVCores >= 0
+PendingMB >= 0
+PendingVCores >= 0
+PendingContainers >= 0
+ReservedMB >= 0
+ReservedVCores >= 0
+ReservedContainers >= 0
+ActiveUsers >= 0
+ActiveApplications >= 0
+AppAttemptFirstContainerAllocationDelayNumOps >= 0
+AppAttemptFirstContainerAllocationDelayAvgTime >= 0
+MemNonHeapUsedM >= 0
+MemNonHeapCommittedM >= 0
+MemNonHeapMaxM >= 0 || MemNonHeapMaxM == -1
+MemHeapUsedM >= 0
+MemHeapCommittedM >= 0
+MemHeapMaxM >= 0
+MemMaxM >= 0
+GcCountPS_Scavenge >= 0
+GcTimeMillisPS_Scavenge >= 0
+GcCountPS_MarkSweep >= 0
+GcTimeMillisPS_MarkSweep >= 0
+GcCount >= 0
+GcTimeMillis >= 0
+ThreadsNew >= 0
+ThreadsRunnable >= 0
+ThreadsBlocked >= 0
+ThreadsWaiting >= 0
+ThreadsTimedWaiting >= 0
+ThreadsTerminated >= 0
+LogFatal >= 0
+LogError >= 0
+LogWarn >= 0
+LogInfo >= 0


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/50] [abbrv] hadoop git commit: HADOOP-14485. Redundant 'final' modifier in try-with-resources statement. Contributed by wenxin he.

Posted by xy...@apache.org.
HADOOP-14485. Redundant 'final' modifier in try-with-resources statement. Contributed by wenxin he.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc8bcf1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc8bcf1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc8bcf1e

Branch: refs/heads/HDFS-7240
Commit: cc8bcf1efd692d4a5d2c119c222be5f95d3d52e2
Parents: fd77c7f
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 6 23:11:47 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/example/ITUseMiniCluster.java     | 8 ++++----
 .../src/test/java/org/apache/hadoop/ipc/TestIPC.java         | 2 +-
 .../org/apache/hadoop/metrics2/lib/TestRollingAverages.java  | 2 +-
 .../test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java | 2 +-
 .../org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 2 +-
 .../hdfs/server/datanode/TestDataNodeFaultInjector.java      | 2 +-
 .../apache/hadoop/yarn/client/api/impl/TestYarnClient.java   | 2 +-
 7 files changed, 10 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java b/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java
index 3f560d0..5fcbe13 100644
--- a/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java
+++ b/hadoop-client-modules/hadoop-client-integration-tests/src/test/java/org/apache/hadoop/example/ITUseMiniCluster.java
@@ -82,7 +82,7 @@ public class ITUseMiniCluster {
 
   @Test
   public void useHdfsFileSystem() throws IOException {
-    try (final FileSystem fs = cluster.getFileSystem()) {
+    try (FileSystem fs = cluster.getFileSystem()) {
       simpleReadAfterWrite(fs);
     }
   }
@@ -94,10 +94,10 @@ public class ITUseMiniCluster {
       throw new IOException("Mkdirs failed to create " +
           TEST_PATH);
     }
-    try (final FSDataOutputStream out = fs.create(path)) {
+    try (FSDataOutputStream out = fs.create(path)) {
       out.writeUTF(TEXT);
     }
-    try (final FSDataInputStream in = fs.open(path)) {
+    try (FSDataInputStream in = fs.open(path)) {
       final String result = in.readUTF();
       Assert.assertEquals("Didn't read back text we wrote.", TEXT, result);
     }
@@ -105,7 +105,7 @@ public class ITUseMiniCluster {
 
   @Test
   public void useWebHDFS() throws IOException, URISyntaxException {
-    try (final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(
+    try (FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(
         cluster.getConfiguration(0), WebHdfsConstants.WEBHDFS_SCHEME)) {
       simpleReadAfterWrite(fs);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 611000d..1d47473 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -1425,7 +1425,7 @@ public class TestIPC {
   }
 
   private void checkVersionMismatch() throws IOException {
-    try (final ServerSocket listenSocket = new ServerSocket()) {
+    try (ServerSocket listenSocket = new ServerSocket()) {
       listenSocket.bind(null);
       InetSocketAddress addr =
           (InetSocketAddress) listenSocket.getLocalSocketAddress();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestRollingAverages.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestRollingAverages.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestRollingAverages.java
index 44202e7..6ea3d26 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestRollingAverages.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/lib/TestRollingAverages.java
@@ -42,7 +42,7 @@ public class TestRollingAverages {
   public void testRollingAveragesEmptyRollover() throws Exception {
     final MetricsRecordBuilder rb = mockMetricsRecordBuilder();
     /* 5s interval and 2 windows */
-    try (final RollingAverages rollingAverages =
+    try (RollingAverages rollingAverages =
              new RollingAverages(5000, 2)) {
       /* Check it initially */
       rollingAverages.snapshot(rb, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
index 3d4cc72..d631b68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -254,7 +254,7 @@ public class TestMiniDFSCluster {
   @Test
   public void testClusterNoStorageTypeSetForDatanodes() throws IOException {
     final Configuration conf = new HdfsConfiguration();
-    try (final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3).build()) {
       cluster.waitActive();
       ArrayList<DataNode> dataNodes = cluster.getDataNodes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index dd28914..016a233 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1847,7 +1847,7 @@ public class TestBalancer {
     for(int i = 0; i < lengths.length; i++) {
       final long size = lengths[i];
       final Path p = new Path("/file" + i + "_size" + size);
-      try(final OutputStream out = dfs.create(p)) {
+      try(OutputStream out = dfs.create(p)) {
         for(int j = 0; j < size; j++) {
           out.write(j);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
index fe65429..1507844 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java
@@ -153,7 +153,7 @@ public class TestDataNodeFaultInjector {
       cluster.waitActive();
 
       final FileSystem fs = cluster.getFileSystem();
-      try (final FSDataOutputStream out = fs
+      try (FSDataOutputStream out = fs
           .create(new Path(baseDir, "test.data"), (short) 2)) {
         out.write(0x31);
         out.hflush();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc8bcf1e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
index 9adc187..7d7272a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
@@ -303,7 +303,7 @@ public class TestYarnClient {
     int pollIntervalMs = 1000;
     conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
         pollIntervalMs);
-    try (final YarnClient client = new MockYarnClient()) {
+    try (YarnClient client = new MockYarnClient()) {
       client.init(conf);
       client.start();
       // Submit the application and then interrupt it while its waiting


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[33/50] [abbrv] hadoop git commit: HDFS-10816. TestComputeInvalidateWork#testDatanodeReRegistration fails due to race between test and replication monitor. Contributed by Eric Badger.

Posted by xy...@apache.org.
HDFS-10816. TestComputeInvalidateWork#testDatanodeReRegistration fails due to race between test and replication monitor. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14f782b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14f782b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14f782b6

Branch: refs/heads/HDFS-7240
Commit: 14f782b6b960a818e0927edc7e32eb1fa51a2d08
Parents: 2ad147e
Author: Kihwal Lee <ki...@apache.org>
Authored: Mon Jun 5 15:17:43 2017 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/blockmanagement/TestComputeInvalidateWork.java      | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/14f782b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index 033f4d1..61e69f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -63,6 +63,7 @@ public class TestComputeInvalidateWork {
     namesystem = cluster.getNamesystem();
     bm = namesystem.getBlockManager();
     nodes = bm.getDatanodeManager().getHeartbeatManager().getDatanodes();
+    BlockManagerTestUtil.stopRedundancyThread(bm);
     assertEquals(nodes.length, NUM_OF_DATANODES);
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[46/50] [abbrv] hadoop git commit: HDFS-11708. Positional read will fail if replicas moved to different DNs after stream is opened. Contributed by Vinayakumar B.

Posted by xy...@apache.org.
HDFS-11708. Positional read will fail if replicas moved to different DNs after stream is opened. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2f1ada6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2f1ada6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2f1ada6

Branch: refs/heads/HDFS-7240
Commit: f2f1ada640f3997a33a4fc70d6d8157269563e1b
Parents: 7d3bbf2
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Jun 7 10:11:23 2017 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  38 ++---
 .../hadoop/hdfs/DFSStripedInputStream.java      |   2 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |  46 ++++++
 .../java/org/apache/hadoop/hdfs/TestPread.java  | 146 +++++++++++++++++++
 .../server/datanode/TestBlockReplacement.java   |  40 +----
 5 files changed, 221 insertions(+), 51 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2f1ada6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d388d00..77f5a92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -559,6 +559,8 @@ public class DFSInputStream extends FSInputStream
       chosenNode = retval.info;
       InetSocketAddress targetAddr = retval.addr;
       StorageType storageType = retval.storageType;
+      // Latest block if refreshed by chooseDatanode()
+      targetBlock = retval.block;
 
       try {
         blockReader = getBlockReader(targetBlock, offsetIntoBlock,
@@ -915,7 +917,7 @@ public class DFSInputStream extends FSInputStream
         chosenNode.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname());
     DFSClient.LOG.debug("Connecting to datanode {}", dnAddr);
     InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
-    return new DNAddrPair(chosenNode, targetAddr, storageType);
+    return new DNAddrPair(chosenNode, targetAddr, storageType, block);
   }
 
   /**
@@ -957,12 +959,13 @@ public class DFSInputStream extends FSInputStream
   protected void fetchBlockByteRange(LocatedBlock block, long start, long end,
       ByteBuffer buf, CorruptedBlocks corruptedBlocks)
       throws IOException {
-    block = refreshLocatedBlock(block);
     while (true) {
       DNAddrPair addressPair = chooseDataNode(block, null);
+      // Latest block, if refreshed internally
+      block = addressPair.block;
       try {
-        actualGetFromOneDataNode(addressPair, block, start, end,
-            buf, corruptedBlocks);
+        actualGetFromOneDataNode(addressPair, start, end, buf,
+            corruptedBlocks);
         return;
       } catch (IOException e) {
         checkInterrupted(e); // check if the read has been interrupted
@@ -983,8 +986,7 @@ public class DFSInputStream extends FSInputStream
       public ByteBuffer call() throws Exception {
         try (TraceScope ignored = dfsClient.getTracer().
             newScope("hedgedRead" + hedgedReadId, parentSpanId)) {
-          actualGetFromOneDataNode(datanode, block, start, end, bb,
-              corruptedBlocks);
+          actualGetFromOneDataNode(datanode, start, end, bb, corruptedBlocks);
           return bb;
         }
       }
@@ -995,27 +997,21 @@ public class DFSInputStream extends FSInputStream
    * Read data from one DataNode.
    *
    * @param datanode          the datanode from which to read data
-   * @param block             the located block containing the requested data
    * @param startInBlk        the startInBlk offset of the block
    * @param endInBlk          the endInBlk offset of the block
    * @param buf               the given byte buffer into which the data is read
    * @param corruptedBlocks   map recording list of datanodes with corrupted
    *                          block replica
    */
-  void actualGetFromOneDataNode(final DNAddrPair datanode, LocatedBlock block,
-      final long startInBlk, final long endInBlk, ByteBuffer buf,
-                                CorruptedBlocks corruptedBlocks)
+  void actualGetFromOneDataNode(final DNAddrPair datanode, final long startInBlk,
+      final long endInBlk, ByteBuffer buf, CorruptedBlocks corruptedBlocks)
       throws IOException {
     DFSClientFaultInjector.get().startFetchFromDatanode();
     int refetchToken = 1; // only need to get a new access token once
     int refetchEncryptionKey = 1; // only need to get a new encryption key once
     final int len = (int) (endInBlk - startInBlk + 1);
-
+    LocatedBlock block = datanode.block;
     while (true) {
-      // cached block locations may have been updated by chooseDataNode()
-      // or fetchBlockAt(). Always get the latest list of locations at the
-      // start of the loop.
-      block = refreshLocatedBlock(block);
       BlockReader reader = null;
       try {
         DFSClientFaultInjector.get().fetchFromDatanodeException();
@@ -1078,6 +1074,9 @@ public class DFSInputStream extends FSInputStream
           addToDeadNodes(datanode.info);
           throw new IOException(msg);
         }
+        // Refresh the block for updated tokens in case of token failures or
+        // encryption key failures.
+        block = refreshLocatedBlock(block);
       } finally {
         if (reader != null) {
           reader.close();
@@ -1113,7 +1112,6 @@ public class DFSInputStream extends FSInputStream
     ByteBuffer bb;
     int len = (int) (end - start + 1);
     int hedgedReadId = 0;
-    block = refreshLocatedBlock(block);
     while (true) {
       // see HDFS-6591, this metric is used to verify/catch unnecessary loops
       hedgedReadOpsLoopNumForTesting++;
@@ -1123,6 +1121,8 @@ public class DFSInputStream extends FSInputStream
         // chooseDataNode is a commitment. If no node, we go to
         // the NN to reget block locations. Only go here on first read.
         chosenNode = chooseDataNode(block, ignored);
+        // Latest block, if refreshed internally
+        block = chosenNode.block;
         bb = ByteBuffer.allocate(len);
         Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
             chosenNode, block, start, end, bb,
@@ -1160,6 +1160,8 @@ public class DFSInputStream extends FSInputStream
           if (chosenNode == null) {
             chosenNode = chooseDataNode(block, ignored);
           }
+          // Latest block, if refreshed internally
+          block = chosenNode.block;
           bb = ByteBuffer.allocate(len);
           Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
               chosenNode, block, start, end, bb,
@@ -1530,12 +1532,14 @@ public class DFSInputStream extends FSInputStream
     final DatanodeInfo info;
     final InetSocketAddress addr;
     final StorageType storageType;
+    final LocatedBlock block;
 
     DNAddrPair(DatanodeInfo info, InetSocketAddress addr,
-        StorageType storageType) {
+        StorageType storageType, LocatedBlock block) {
       this.info = info;
       this.addr = addr;
       this.storageType = storageType;
+      this.block = block;
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2f1ada6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 75ad022..d4d0646 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -236,7 +236,7 @@ public class DFSStripedInputStream extends DFSInputStream {
     BlockReader reader = null;
     final ReaderRetryPolicy retry = new ReaderRetryPolicy();
     DFSInputStream.DNAddrPair dnInfo =
-        new DFSInputStream.DNAddrPair(null, null, null);
+        new DFSInputStream.DNAddrPair(null, null, null, null);
 
     while (true) {
       try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2f1ada6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 2cfcc2b..038b6ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -50,6 +50,7 @@ import java.lang.reflect.Modifier;
 import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.Socket;
+import java.net.SocketException;
 import java.net.URI;
 import java.net.URL;
 import java.net.URLConnection;
@@ -122,7 +123,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -2055,6 +2058,49 @@ public class DFSTestUtil {
     return lastBlock;
   }
 
+  /*
+   * Copy a block from sourceProxy to destination. If the block becomes
+   * over-replicated, preferably remove it from source.
+   * Return true if a block is successfully copied; otherwise false.
+   */
+  public static boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
+      DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
+    return replaceBlock(block, source, sourceProxy, destination,
+        StorageType.DEFAULT, Status.SUCCESS);
+  }
+
+  /*
+   * Replace block
+   */
+  public static boolean replaceBlock(ExtendedBlock block, DatanodeInfo source,
+      DatanodeInfo sourceProxy, DatanodeInfo destination,
+      StorageType targetStorageType, Status opStatus) throws IOException,
+      SocketException {
+    Socket sock = new Socket();
+    try {
+      sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
+          HdfsConstants.READ_TIMEOUT);
+      sock.setKeepAlive(true);
+      // sendRequest
+      DataOutputStream out = new DataOutputStream(sock.getOutputStream());
+      new Sender(out).replaceBlock(block, targetStorageType,
+          BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
+          sourceProxy, null);
+      out.flush();
+      // receiveResponse
+      DataInputStream reply = new DataInputStream(sock.getInputStream());
+
+      BlockOpResponseProto proto = BlockOpResponseProto.parseDelimitedFrom(
+          reply);
+      while (proto.getStatus() == Status.IN_PROGRESS) {
+        proto = BlockOpResponseProto.parseDelimitedFrom(reply);
+      }
+      return proto.getStatus() == opStatus;
+    } finally {
+      sock.close();
+    }
+  }
+
   /**
    * Because currently DFSStripedOutputStream does not support hflush/hsync,
    * tests can use this method to flush all the buffered data to DataNodes.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2f1ada6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 637f2df..85fc97b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -23,6 +23,8 @@ import static org.junit.Assert.assertTrue;
 import java.io.EOFException;
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
@@ -30,6 +32,8 @@ import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
@@ -38,6 +42,9 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
@@ -51,6 +58,8 @@ import org.mockito.Mockito;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
+import com.google.common.base.Supplier;
+
 /**
  * This class tests the DFS positional read functionality in a single node
  * mini-cluster.
@@ -542,6 +551,143 @@ public class TestPread {
     }
   }
 
+  /**
+   * Scenario: 1. Write a file with RF=2, DN1 and DN2<br>
+   * 2. Open the stream, Consider Locations are [DN1, DN2] in LocatedBlock.<br>
+   * 3. Move block from DN2 to DN3.<br>
+   * 4. Let block gets replicated to another DN3<br>
+   * 5. Stop DN1 also.<br>
+   * 6. Current valid Block locations in NameNode [DN1, DN3]<br>
+   * 7. Consider next calls to getBlockLocations() always returns DN3 as last
+   * location.<br>
+   */
+  @Test
+  public void testPreadFailureWithChangedBlockLocations() throws Exception {
+    doPreadTestWithChangedLocations();
+  }
+
+  /**
+   * Scenario: 1. Write a file with RF=2, DN1 and DN2<br>
+   * 2. Open the stream, Consider Locations are [DN1, DN2] in LocatedBlock.<br>
+   * 3. Move block from DN2 to DN3.<br>
+   * 4. Let block gets replicated to another DN3<br>
+   * 5. Stop DN1 also.<br>
+   * 6. Current valid Block locations in NameNode [DN1, DN3]<br>
+   * 7. Consider next calls to getBlockLocations() always returns DN3 as last
+   * location.<br>
+   */
+  @Test
+  public void testPreadHedgedFailureWithChangedBlockLocations()
+      throws Exception {
+    isHedgedRead = true;
+    doPreadTestWithChangedLocations();
+  }
+
+  private void doPreadTestWithChangedLocations()
+      throws IOException, TimeoutException, InterruptedException {
+    GenericTestUtils.setLogLevel(DFSClient.LOG, Level.DEBUG);
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    if (isHedgedRead) {
+      conf.setInt(HdfsClientConfigKeys.HedgedRead.THREADPOOL_SIZE_KEY, 2);
+    }
+    try (MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(3).build()) {
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      Path p = new Path("/test");
+      String data = "testingmissingblock";
+      DFSTestUtil.writeFile(dfs, p, data);
+
+      FSDataInputStream in = dfs.open(p);
+      List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(in);
+      LocatedBlock lb = blocks.get(0);
+      DFSTestUtil.waitForReplication(cluster, lb.getBlock(), 1, 2, 0);
+      blocks = DFSTestUtil.getAllBlocks(in);
+      DatanodeInfo[] locations = null;
+      for (LocatedBlock locatedBlock : blocks) {
+        locations = locatedBlock.getLocations();
+        DFSClient.LOG
+            .info(locatedBlock.getBlock() + " " + Arrays.toString(locations));
+      }
+      final DatanodeInfo validDownLocation = locations[0];
+      final DFSClient client = dfs.getClient();
+      DFSClient dfsClient = Mockito.spy(client);
+      // Keep the valid location as last in the locations list for second
+      // requests
+      // onwards.
+      final AtomicInteger count = new AtomicInteger(0);
+      Mockito.doAnswer(new Answer<LocatedBlocks>() {
+        @Override
+        public LocatedBlocks answer(InvocationOnMock invocation)
+            throws Throwable {
+          if (count.compareAndSet(0, 1)) {
+            return (LocatedBlocks) invocation.callRealMethod();
+          }
+          Object obj = invocation.callRealMethod();
+          LocatedBlocks locatedBlocks = (LocatedBlocks) obj;
+          LocatedBlock lb = locatedBlocks.get(0);
+          DatanodeInfo[] locations = lb.getLocations();
+          if (!(locations[0].getName().equals(validDownLocation.getName()))) {
+            // Latest location which is currently down, should be first
+            DatanodeInfo l = locations[0];
+            locations[0] = locations[locations.length - 1];
+            locations[locations.length - 1] = l;
+          }
+          return locatedBlocks;
+        }
+      }).when(dfsClient).getLocatedBlocks(p.toString(), 0);
+
+      // Findout target node to move the block to.
+      DatanodeInfo[] nodes =
+          cluster.getNameNodeRpc().getDatanodeReport(DatanodeReportType.LIVE);
+      DatanodeInfo toMove = null;
+      List<DatanodeInfo> locationsList = Arrays.asList(locations);
+      for (DatanodeInfo node : nodes) {
+        if (locationsList.contains(node)) {
+          continue;
+        }
+        toMove = node;
+        break;
+      }
+      // STEP 2: Open stream
+      DFSInputStream din = dfsClient.open(p.toString());
+      // STEP 3: Move replica
+      final DatanodeInfo source = locations[1];
+      final DatanodeInfo destination = toMove;
+      DFSTestUtil.replaceBlock(lb.getBlock(), source, locations[1], toMove);
+      // Wait for replica to get deleted
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+
+        @Override
+        public Boolean get() {
+          try {
+            LocatedBlocks lbs = dfsClient.getLocatedBlocks(p.toString(), 0);
+            LocatedBlock lb = lbs.get(0);
+            List<DatanodeInfo> locations = Arrays.asList(lb.getLocations());
+            DFSClient.LOG
+                .info("Source :" + source + ", destination: " + destination);
+            DFSClient.LOG.info("Got updated locations :" + locations);
+            return locations.contains(destination)
+                && !locations.contains(source);
+          } catch (IOException e) {
+            DFSClient.LOG.error("Problem in getting block locations", e);
+          }
+          return null;
+        }
+      }, 1000, 10000);
+      DFSTestUtil.waitForReplication(cluster, lb.getBlock(), 1, 2, 0);
+      // STEP 4: Stop first node in new locations
+      cluster.stopDataNode(validDownLocation.getName());
+      DFSClient.LOG.info("Starting read");
+      byte[] buf = new byte[1024];
+      int n = din.read(0, buf, 0, data.length());
+      assertEquals(data.length(), n);
+      assertEquals("Data should be read", data, new String(buf, 0, n));
+      DFSClient.LOG.info("Read completed");
+    }
+  }
+
   public static void main(String[] args) throws Exception {
     new TestPread().testPreadDFS();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2f1ada6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
index 8992d47..97255ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.Socket;
 import java.net.SocketException;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -48,19 +48,14 @@ import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
@@ -371,7 +366,7 @@ public class TestBlockReplacement {
    */
   private boolean replaceBlock( ExtendedBlock block, DatanodeInfo source,
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
-    return replaceBlock(block, source, sourceProxy, destination,
+    return DFSTestUtil.replaceBlock(block, source, sourceProxy, destination,
         StorageType.DEFAULT, Status.SUCCESS);
   }
 
@@ -385,29 +380,8 @@ public class TestBlockReplacement {
       DatanodeInfo destination,
       StorageType targetStorageType,
       Status opStatus) throws IOException, SocketException {
-    Socket sock = new Socket();
-    try {
-      sock.connect(NetUtils.createSocketAddr(destination.getXferAddr()),
-          HdfsConstants.READ_TIMEOUT);
-      sock.setKeepAlive(true);
-      // sendRequest
-      DataOutputStream out = new DataOutputStream(sock.getOutputStream());
-      new Sender(out).replaceBlock(block, targetStorageType,
-          BlockTokenSecretManager.DUMMY_TOKEN, source.getDatanodeUuid(),
-          sourceProxy, null);
-      out.flush();
-      // receiveResponse
-      DataInputStream reply = new DataInputStream(sock.getInputStream());
-
-      BlockOpResponseProto proto =
-          BlockOpResponseProto.parseDelimitedFrom(reply);
-      while (proto.getStatus() == Status.IN_PROGRESS) {
-        proto = BlockOpResponseProto.parseDelimitedFrom(reply);
-      }
-      return proto.getStatus() == opStatus;
-    } finally {
-      sock.close();
-    }
+    return DFSTestUtil.replaceBlock(block, source, sourceProxy, destination,
+        targetStorageType, opStatus);
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[40/50] [abbrv] hadoop git commit: HDFS-11929. Document missing processor of hdfs oiv_legacy command. Contributed by LiXin Ge.

Posted by xy...@apache.org.
HDFS-11929. Document missing processor of hdfs oiv_legacy command. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d1d4960
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d1d4960
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d1d4960

Branch: refs/heads/HDFS-7240
Commit: 7d1d49603d349db1374765fa876d3fddee3244db
Parents: f2f1ada
Author: Yiqun Lin <yq...@apache.org>
Authored: Wed Jun 7 13:42:13 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java   | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md  | 2 +-
 .../hadoop-hdfs/src/site/markdown/HdfsImageViewer.md               | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d1d4960/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
index c542d90..46a9c75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewer.java
@@ -94,7 +94,7 @@ public class OfflineImageViewer {
           + "Optional command line arguments:\n"
           + "-p,--processor <arg>   Select which type of processor to apply\n"
           + "                       against image file."
-          + " (Ls|XML|Delimited|Indented|FileDistribution).\n"
+          + " (Ls|XML|Delimited|Indented|FileDistribution|NameDistribution).\n"
           + "-h,--help              Display usage information and exit\n"
           + "-printToScreen         For processors that write to a file, also\n"
           + "                       output to screen. On large image files this\n"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d1d4960/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 6765a8f..ffffee9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -233,7 +233,7 @@ Usage: `hdfs oiv_legacy [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE`
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
+| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, FileDistribution and NameDistribution. |
 | `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
 | `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
 | `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d1d4960/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
index 10d98b2..9baadc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsImageViewer.md
@@ -238,7 +238,7 @@ Due to the internal layout changes introduced by the ProtocolBuffer-based fsimag
 |:---- |:---- |
 | `-i`\|`--inputFile` *input file* | Specify the input fsimage file to process. Required. |
 | `-o`\|`--outputFile` *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |
-| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, and FileDistribution. |
+| `-p`\|`--processor` *processor* | Specify the image processor to apply against the image file. Valid options are Ls (default), XML, Delimited, Indented, FileDistribution and NameDistribution. |
 | `-maxSize` *size* | Specify the range [0, maxSize] of file sizes to be analyzed in bytes (128GB by default). This option is used with FileDistribution processor. |
 | `-step` *size* | Specify the granularity of the distribution in bytes (2MB by default). This option is used with FileDistribution processor. |
 | `-format` | Format the output result in a human-readable fashion rather than a number of bytes. (false by default). This option is used with FileDistribution processor. |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[36/50] [abbrv] hadoop git commit: HADOOP-14035. Reduce fair call queue backoff's impact on clients. Contributed by Daryn Sharp.

Posted by xy...@apache.org.
HADOOP-14035. Reduce fair call queue backoff's impact on clients. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd77c7f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd77c7f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd77c7f7

Branch: refs/heads/HDFS-7240
Commit: fd77c7f76bcadfb10f789a95700fd50972a2f292
Parents: 4c06897
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue Jun 6 08:34:33 2017 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/ipc/CallQueueManager.java | 127 ++++++++++++++-
 .../org/apache/hadoop/ipc/FairCallQueue.java    |  90 ++++++++---
 .../main/java/org/apache/hadoop/ipc/Server.java |  22 ++-
 .../apache/hadoop/ipc/TestCallQueueManager.java |  78 +++++++++-
 .../apache/hadoop/ipc/TestFairCallQueue.java    | 156 +++++++++++++++++++
 .../java/org/apache/hadoop/ipc/TestRPC.java     |   4 +-
 6 files changed, 441 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
index 50ed353..2764788 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/CallQueueManager.java
@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.ipc;
 
+import java.io.IOException;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
+import java.util.AbstractQueue;
+import java.util.Collection;
+import java.util.Iterator;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
@@ -28,11 +32,15 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
+
+import com.google.common.annotations.VisibleForTesting;
 
 /**
  * Abstracts queue operations for different blocking queues.
  */
-public class CallQueueManager<E> {
+public class CallQueueManager<E extends Schedulable>
+    extends AbstractQueue<E> implements BlockingQueue<E> {
   public static final Log LOG = LogFactory.getLog(CallQueueManager.class);
   // Number of checkpoints for empty queue.
   private static final int CHECKPOINT_NUM = 20;
@@ -76,6 +84,15 @@ public class CallQueueManager<E> {
         maxQueueSize + " scheduler: " + schedulerClass);
   }
 
+  @VisibleForTesting // only!
+  CallQueueManager(BlockingQueue<E> queue, RpcScheduler scheduler,
+      boolean clientBackOffEnabled) {
+    this.putRef = new AtomicReference<BlockingQueue<E>>(queue);
+    this.takeRef = new AtomicReference<BlockingQueue<E>>(queue);
+    this.scheduler = scheduler;
+    this.clientBackOffEnabled = clientBackOffEnabled;
+  }
+
   private static <T extends RpcScheduler> T createScheduler(
       Class<T> theClass, int priorityLevels, String ns, Configuration conf) {
     // Used for custom, configurable scheduler
@@ -190,12 +207,40 @@ public class CallQueueManager<E> {
   }
 
   /**
-   * Insert e into the backing queue or block until we can.
+   * Insert e into the backing queue or block until we can.  If client
+   * backoff is enabled this method behaves like add which throws if
+   * the queue overflows.
    * If we block and the queue changes on us, we will insert while the
    * queue is drained.
    */
+  @Override
   public void put(E e) throws InterruptedException {
-    putRef.get().put(e);
+    if (!isClientBackoffEnabled()) {
+      putRef.get().put(e);
+    } else if (shouldBackOff(e)) {
+      throwBackoff();
+    } else {
+      add(e);
+    }
+  }
+
+  @Override
+  public boolean add(E e) {
+    try {
+      return putRef.get().add(e);
+    } catch (CallQueueOverflowException ex) {
+      // queue provided a custom exception that may control if the client
+      // should be disconnected.
+      throw ex;
+    } catch (IllegalStateException ise) {
+      throwBackoff();
+    }
+    return true;
+  }
+
+  // ideally this behavior should be controllable too.
+  private void throwBackoff() throws IllegalStateException {
+    throw CallQueueOverflowException.DISCONNECT;
   }
 
   /**
@@ -203,14 +248,37 @@ public class CallQueueManager<E> {
    * Return true if e is queued.
    * Return false if the queue is full.
    */
-  public boolean offer(E e) throws InterruptedException {
+  @Override
+  public boolean offer(E e) {
     return putRef.get().offer(e);
   }
 
+  @Override
+  public boolean offer(E e, long timeout, TimeUnit unit)
+      throws InterruptedException {
+    return putRef.get().offer(e, timeout, unit);
+  }
+
+  @Override
+  public E peek() {
+    return takeRef.get().peek();
+  }
+
+  @Override
+  public E poll() {
+    return takeRef.get().poll();
+  }
+
+  @Override
+  public E poll(long timeout, TimeUnit unit) throws InterruptedException {
+    return takeRef.get().poll(timeout, unit);
+  }
+
   /**
    * Retrieve an E from the backing queue or block until we can.
    * Guaranteed to return an element from the current queue.
    */
+  @Override
   public E take() throws InterruptedException {
     E e = null;
 
@@ -221,10 +289,16 @@ public class CallQueueManager<E> {
     return e;
   }
 
+  @Override
   public int size() {
     return takeRef.get().size();
   }
 
+  @Override
+  public int remainingCapacity() {
+    return takeRef.get().remainingCapacity();
+  }
+
   /**
    * Read the number of levels from the configuration.
    * This will affect the FairCallQueue's overall capacity.
@@ -308,4 +382,49 @@ public class CallQueueManager<E> {
   private String stringRepr(Object o) {
     return o.getClass().getName() + '@' + Integer.toHexString(o.hashCode());
   }
+
+  @Override
+  public int drainTo(Collection<? super E> c) {
+    return takeRef.get().drainTo(c);
+  }
+
+  @Override
+  public int drainTo(Collection<? super E> c, int maxElements) {
+    return takeRef.get().drainTo(c, maxElements);
+  }
+
+  @Override
+  public Iterator<E> iterator() {
+    return takeRef.get().iterator();
+  }
+
+  // exception that mimics the standard ISE thrown by blocking queues but
+  // embeds a rpc server exception for the client to retry and indicate
+  // if the client should be disconnected.
+  @SuppressWarnings("serial")
+  static class CallQueueOverflowException extends IllegalStateException {
+    private static String TOO_BUSY = "Server too busy";
+    static final CallQueueOverflowException KEEPALIVE =
+        new CallQueueOverflowException(
+            new RetriableException(TOO_BUSY),
+            RpcStatusProto.ERROR);
+    static final CallQueueOverflowException DISCONNECT =
+        new CallQueueOverflowException(
+            new RetriableException(TOO_BUSY + " - disconnecting"),
+            RpcStatusProto.FATAL);
+
+    CallQueueOverflowException(final IOException ioe,
+        final RpcStatusProto status) {
+      super("Queue full", new RpcServerException(ioe.getMessage(), ioe){
+        @Override
+        public RpcStatusProto getRpcStatusProto() {
+          return status;
+        }
+      });
+    }
+    @Override
+    public IOException getCause() {
+      return (IOException)super.getCause();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
index 820f24c..8bcaf05 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueue.java
@@ -36,6 +36,7 @@ import org.apache.commons.lang.NotImplementedException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 /**
@@ -134,45 +135,84 @@ public class FairCallQueue<E extends Schedulable> extends AbstractQueue<E>
   /* AbstractQueue and BlockingQueue methods */
 
   /**
-   * Put and offer follow the same pattern:
+   * Add, put, and offer follow the same pattern:
    * 1. Get the assigned priorityLevel from the call by scheduler
    * 2. Get the nth sub-queue matching this priorityLevel
    * 3. delegate the call to this sub-queue.
    *
    * But differ in how they handle overflow:
-   * - Put will move on to the next queue until it lands on the last queue
+   * - Add will move on to the next queue, throw on last queue overflow
+   * - Put will move on to the next queue, block on last queue overflow
    * - Offer does not attempt other queues on overflow
    */
-  @Override
-  public void put(E e) throws InterruptedException {
-    int priorityLevel = e.getPriorityLevel();
 
-    final int numLevels = this.queues.size();
-    while (true) {
-      BlockingQueue<E> q = this.queues.get(priorityLevel);
-      boolean res = q.offer(e);
-      if (!res) {
-        // Update stats
-        this.overflowedCalls.get(priorityLevel).getAndIncrement();
-
-        // If we failed to insert, try again on the next level
-        priorityLevel++;
-
-        if (priorityLevel == numLevels) {
-          // That was the last one, we will block on put in the last queue
-          // Delete this line to drop the call
-          this.queues.get(priorityLevel-1).put(e);
-          break;
-        }
-      } else {
-        break;
-      }
+  @Override
+  public boolean add(E e) {
+    final int priorityLevel = e.getPriorityLevel();
+    // try offering to all queues.
+    if (!offerQueues(priorityLevel, e, true)) {
+      // only disconnect the lowest priority users that overflow the queue.
+      throw (priorityLevel == queues.size() - 1)
+          ? CallQueueOverflowException.DISCONNECT
+          : CallQueueOverflowException.KEEPALIVE;
     }
+    return true;
+  }
 
+  @Override
+  public void put(E e) throws InterruptedException {
+    final int priorityLevel = e.getPriorityLevel();
+    // try offering to all but last queue, put on last.
+    if (!offerQueues(priorityLevel, e, false)) {
+      putQueue(queues.size() - 1, e);
+    }
+  }
 
+  /**
+   * Put the element in a queue of a specific priority.
+   * @param priority - queue priority
+   * @param e - element to add
+   */
+  @VisibleForTesting
+  void putQueue(int priority, E e) throws InterruptedException {
+    queues.get(priority).put(e);
     signalNotEmpty();
   }
 
+  /**
+   * Offer the element to queue of a specific priority.
+   * @param priority - queue priority
+   * @param e - element to add
+   * @return boolean if added to the given queue
+   */
+  @VisibleForTesting
+  boolean offerQueue(int priority, E e) {
+    boolean ret = queues.get(priority).offer(e);
+    if (ret) {
+      signalNotEmpty();
+    }
+    return ret;
+  }
+
+  /**
+   * Offer the element to queue of the given or lower priority.
+   * @param priority - starting queue priority
+   * @param e - element to add
+   * @param includeLast - whether to attempt last queue
+   * @return boolean if added to a queue
+   */
+  private boolean offerQueues(int priority, E e, boolean includeLast) {
+    int lastPriority = queues.size() - (includeLast ? 1 : 2);
+    for (int i=priority; i <= lastPriority; i++) {
+      if (offerQueue(i, e)) {
+        return true;
+      }
+      // Update stats
+      overflowedCalls.get(i).getAndIncrement();
+    }
+    return false;
+  }
+
   @Override
   public boolean offer(E e, long timeout, TimeUnit unit)
       throws InterruptedException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index f3b9a82..df108b8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -83,6 +83,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
@@ -2479,7 +2480,9 @@ public abstract class Server {
       call.setPriorityLevel(callQueue.getPriorityLevel(call));
 
       try {
-        queueCall(call);
+        internalQueueCall(call);
+      } catch (RpcServerException rse) {
+        throw rse;
       } catch (IOException ioe) {
         throw new FatalRpcServerException(
             RpcErrorCodeProto.ERROR_RPC_SERVER, ioe);
@@ -2616,9 +2619,19 @@ public abstract class Server {
   }
 
   public void queueCall(Call call) throws IOException, InterruptedException {
-    if (!callQueue.isClientBackoffEnabled()) {
+    // external non-rpc calls don't need server exception wrapper.
+    try {
+      internalQueueCall(call);
+    } catch (RpcServerException rse) {
+      throw (IOException)rse.getCause();
+    }
+  }
+
+  private void internalQueueCall(Call call)
+      throws IOException, InterruptedException {
+    try {
       callQueue.put(call); // queue the call; maybe blocked here
-    } else if (callQueue.shouldBackOff(call) || !callQueue.offer(call)) {
+    } catch (CallQueueOverflowException cqe) {
       // If rpc scheduler indicates back off based on performance degradation
       // such as response time or rpc queue is full, we will ask the client
       // to back off by throwing RetriableException. Whether the client will
@@ -2626,7 +2639,8 @@ public abstract class Server {
       // For example, IPC clients using FailoverOnNetworkExceptionRetry handle
       // RetriableException.
       rpcMetrics.incrClientBackoff();
-      throw new RetriableException("Server is too busy.");
+      // unwrap retriable exception.
+      throw cqe.getCause();
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
index 1211657..a5a0b00 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestCallQueueManager.java
@@ -19,8 +19,14 @@
 package org.apache.hadoop.ipc;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertSame;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.reset;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
 
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -29,8 +35,10 @@ import java.util.concurrent.LinkedBlockingQueue;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 public class TestCallQueueManager {
   private CallQueueManager<FakeCall> manager;
@@ -311,11 +319,21 @@ public class TestCallQueueManager {
     assertEquals(totalCallsConsumed, totalCallsCreated);
   }
 
-  public static class ExceptionFakeCall {
+  public static class ExceptionFakeCall implements Schedulable {
     public ExceptionFakeCall() {
       throw new IllegalArgumentException("Exception caused by call queue " +
           "constructor.!!");
     }
+
+    @Override
+    public UserGroupInformation getUserGroupInformation() {
+      return null;
+    }
+
+    @Override
+    public int getPriorityLevel() {
+      return 0;
+    }
   }
 
   public static class ExceptionFakeScheduler {
@@ -359,4 +377,62 @@ public class TestCallQueueManager {
           .getMessage());
     }
   }
+
+  @SuppressWarnings("unchecked")
+  @Test
+  public void testCallQueueOverflowExceptions() throws Exception {
+    RpcScheduler scheduler = Mockito.mock(RpcScheduler.class);
+    BlockingQueue<Schedulable> queue = Mockito.mock(BlockingQueue.class);
+    CallQueueManager<Schedulable> cqm =
+        Mockito.spy(new CallQueueManager<>(queue, scheduler, false));
+    Schedulable call = new FakeCall(0);
+
+    // call queue exceptions passed threw as-is
+    doThrow(CallQueueOverflowException.KEEPALIVE).when(queue).add(call);
+    try {
+      cqm.add(call);
+      fail("didn't throw");
+    } catch (CallQueueOverflowException cqe) {
+      assertSame(CallQueueOverflowException.KEEPALIVE, cqe);
+    }
+
+    // standard exception for blocking queue full converted to overflow
+    // exception.
+    doThrow(new IllegalStateException()).when(queue).add(call);
+    try {
+      cqm.add(call);
+      fail("didn't throw");
+    } catch (Exception ex) {
+      assertTrue(ex.toString(), ex instanceof CallQueueOverflowException);
+    }
+
+    // backoff disabled, put is put to queue.
+    reset(queue);
+    cqm.setClientBackoffEnabled(false);
+    cqm.put(call);
+    verify(queue, times(1)).put(call);
+    verify(queue, times(0)).add(call);
+
+    // backoff enabled, put is add to queue.
+    reset(queue);
+    cqm.setClientBackoffEnabled(true);
+    doReturn(Boolean.FALSE).when(cqm).shouldBackOff(call);
+    cqm.put(call);
+    verify(queue, times(0)).put(call);
+    verify(queue, times(1)).add(call);
+    reset(queue);
+
+    // backoff is enabled, put + scheduler backoff = overflow exception.
+    reset(queue);
+    cqm.setClientBackoffEnabled(true);
+    doReturn(Boolean.TRUE).when(cqm).shouldBackOff(call);
+    try {
+      cqm.put(call);
+      fail("didn't fail");
+    } catch (Exception ex) {
+      assertTrue(ex.toString(), ex instanceof CallQueueOverflowException);
+    }
+    verify(queue, times(0)).put(call);
+    verify(queue, times(0)).add(call);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
index 901a771..6b1cd29 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestFairCallQueue.java
@@ -18,13 +18,19 @@
 
 package org.apache.hadoop.ipc;
 
+import static org.mockito.Mockito.anyInt;
+import static org.mockito.Mockito.anyObject;
+import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.times;
 
 import junit.framework.TestCase;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
+
+import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
@@ -34,7 +40,10 @@ import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ipc.CallQueueManager.CallQueueOverflowException;
+import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.RpcResponseHeaderProto.RpcStatusProto;
 
 public class TestFairCallQueue extends TestCase {
   private FairCallQueue<Schedulable> fcq;
@@ -133,6 +142,153 @@ public class TestFairCallQueue extends TestCase {
     assertNull(fcq.poll());
   }
 
+  @SuppressWarnings("unchecked") // for mock reset.
+  @Test
+  public void testInsertion() throws Exception {
+    Configuration conf = new Configuration();
+    // 3 queues, 2 slots each.
+    fcq = Mockito.spy(new FairCallQueue<Schedulable>(3, 6, "ns", conf));
+
+    Schedulable p0 = mockCall("a", 0);
+    Schedulable p1 = mockCall("b", 1);
+    Schedulable p2 = mockCall("c", 2);
+
+    // add to first queue.
+    Mockito.reset(fcq);
+    fcq.add(p0);
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p0);
+    Mockito.reset(fcq);
+    // 0:x- 1:-- 2:--
+
+    // add to second queue.
+    Mockito.reset(fcq);
+    fcq.add(p1);
+    Mockito.verify(fcq, times(0)).offerQueue(0, p1);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p1);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p1);
+    // 0:x- 1:x- 2:--
+
+    // add to first queue.
+    Mockito.reset(fcq);
+    fcq.add(p0);
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p0);
+    // 0:xx 1:x- 2:--
+
+    // add to first full queue spills over to second.
+    Mockito.reset(fcq);
+    fcq.add(p0);
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p0);
+    // 0:xx 1:xx 2:--
+
+    // add to second full queue spills over to third.
+    Mockito.reset(fcq);
+    fcq.add(p1);
+    Mockito.verify(fcq, times(0)).offerQueue(0, p1);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p1);
+    Mockito.verify(fcq, times(1)).offerQueue(2, p1);
+    // 0:xx 1:xx 2:x-
+
+    // add to first and second full queue spills over to third.
+    Mockito.reset(fcq);
+    fcq.add(p0);
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(2, p0);
+    // 0:xx 1:xx 2:xx
+
+    // adding non-lowest priority with all queues full throws a
+    // non-disconnecting rpc server exception.
+    Mockito.reset(fcq);
+    try {
+      fcq.add(p0);
+      fail("didn't fail");
+    } catch (IllegalStateException ise) {
+      checkOverflowException(ise, RpcStatusProto.ERROR);
+    }
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(2, p0);
+
+    // adding non-lowest priority with all queues full throws a
+    // non-disconnecting rpc server exception.
+    Mockito.reset(fcq);
+    try {
+      fcq.add(p1);
+      fail("didn't fail");
+    } catch (IllegalStateException ise) {
+      checkOverflowException(ise, RpcStatusProto.ERROR);
+    }
+    Mockito.verify(fcq, times(0)).offerQueue(0, p1);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p1);
+    Mockito.verify(fcq, times(1)).offerQueue(2, p1);
+
+    // adding lowest priority with all queues full throws a
+    // fatal disconnecting rpc server exception.
+    Mockito.reset(fcq);
+    try {
+      fcq.add(p2);
+      fail("didn't fail");
+    } catch (IllegalStateException ise) {
+      checkOverflowException(ise, RpcStatusProto.FATAL);
+    }
+    Mockito.verify(fcq, times(0)).offerQueue(0, p2);
+    Mockito.verify(fcq, times(0)).offerQueue(1, p2);
+    Mockito.verify(fcq, times(1)).offerQueue(2, p2);
+    Mockito.reset(fcq);
+
+    // used to abort what would be a blocking operation.
+    Exception stopPuts = new RuntimeException();
+
+    // put should offer to all but last subqueue, only put to last subqueue.
+    Mockito.reset(fcq);
+    try {
+      doThrow(stopPuts).when(fcq).putQueue(anyInt(), anyObject());
+      fcq.put(p0);
+      fail("didn't fail");
+    } catch (Exception e) {
+      assertSame(stopPuts, e);
+    }
+    Mockito.verify(fcq, times(1)).offerQueue(0, p0);
+    Mockito.verify(fcq, times(1)).offerQueue(1, p0);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p0); // expect put, not offer.
+    Mockito.verify(fcq, times(1)).putQueue(2, p0);
+
+    // put with lowest priority should not offer, just put.
+    Mockito.reset(fcq);
+    try {
+      doThrow(stopPuts).when(fcq).putQueue(anyInt(), anyObject());
+      fcq.put(p2);
+      fail("didn't fail");
+    } catch (Exception e) {
+      assertSame(stopPuts, e);
+    }
+    Mockito.verify(fcq, times(0)).offerQueue(0, p2);
+    Mockito.verify(fcq, times(0)).offerQueue(1, p2);
+    Mockito.verify(fcq, times(0)).offerQueue(2, p2);
+    Mockito.verify(fcq, times(1)).putQueue(2, p2);
+  }
+
+  private void checkOverflowException(Exception ex, RpcStatusProto status) {
+    // should be an overflow exception
+    assertTrue(ex.getClass().getName() + " != CallQueueOverflowException",
+        ex instanceof CallQueueOverflowException);
+    IOException ioe = ((CallQueueOverflowException)ex).getCause();
+    assertNotNull(ioe);
+    assertTrue(ioe.getClass().getName() + " != RpcServerException",
+        ioe instanceof RpcServerException);
+    RpcServerException rse = (RpcServerException)ioe;
+    // check error/fatal status and if it embeds a retriable ex.
+    assertEquals(status, rse.getRpcStatusProto());
+    assertTrue(rse.getClass().getName() + " != RetriableException",
+        rse.getCause() instanceof RetriableException);
+  }
+
   //
   // Ensure that FairCallQueue properly implements BlockingQueue
   //

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd77c7f7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 3cc9916..166b205 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -1123,7 +1123,7 @@ public class TestRPC extends TestRpcBase {
                 return null;
               }
             }));
-        verify(spy, timeout(500).times(i + 1)).offer(Mockito.<Call>anyObject());
+        verify(spy, timeout(500).times(i + 1)).add(Mockito.<Call>anyObject());
       }
       try {
         proxy.sleep(null, newSleepRequest(100));
@@ -1194,7 +1194,7 @@ public class TestRPC extends TestRpcBase {
                 return null;
               }
             }));
-        verify(spy, timeout(500).times(i + 1)).offer(Mockito.<Call>anyObject());
+        verify(spy, timeout(500).times(i + 1)).add(Mockito.<Call>anyObject());
       }
       // Start another sleep RPC call and verify the call is backed off due to
       // avg response time(3s) exceeds threshold (2s).


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[44/50] [abbrv] hadoop git commit: HDFS-11840. Log HDFS Mover exception message of exit to its own log. Contributed by LiXin Ge.

Posted by xy...@apache.org.
HDFS-11840. Log HDFS Mover exception message of exit to its own log. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bbf158de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bbf158de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bbf158de

Branch: refs/heads/HDFS-7240
Commit: bbf158de74234fa6d70ba8a868a445f74f9ffb15
Parents: df7d952
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Jun 6 14:57:48 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java    | 1 +
 1 file changed, 1 insertion(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bbf158de/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index 7eac87d..1a2c889 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -180,6 +180,7 @@ public class Mover {
       return ExitStatus.ILLEGAL_ARGUMENTS;
     } catch (IOException e) {
       System.out.println(e + ".  Exiting ...");
+      LOG.error(e + ".  Exiting ...");
       return ExitStatus.IO_EXCEPTION;
     } finally {
       dispatcher.shutdownNow();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/50] [abbrv] hadoop git commit: YARN-6208. Improve the log when FinishAppEvent sent to the NodeManager which didn't run the application (Contributed by Akira Ajisaka via Daniel Templeton)

Posted by xy...@apache.org.
YARN-6208. Improve the log when FinishAppEvent sent to the NodeManager which didn't run the application
(Contributed by Akira Ajisaka via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/882891a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/882891a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/882891a6

Branch: refs/heads/HDFS-7240
Commit: 882891a643f9d43bb06801045309154d39f1d30e
Parents: 6618442
Author: Daniel Templeton <te...@apache.org>
Authored: Fri Jun 2 08:50:19 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../nodemanager/containermanager/ContainerManagerImpl.java     | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/882891a6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
index 50268b9..cbf617b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
@@ -1475,8 +1475,10 @@ public class ContainerManagerImpl extends CompositeService implements
       for (ApplicationId appID : appsFinishedEvent.getAppsToCleanup()) {
         Application app = this.context.getApplications().get(appID);
         if (app == null) {
-          LOG.warn("couldn't find application " + appID + " while processing"
-              + " FINISH_APPS event");
+          LOG.info("couldn't find application " + appID + " while processing"
+              + " FINISH_APPS event. The ResourceManager allocated resources"
+              + " for this application to the NodeManager but no active"
+              + " containers were found to process.");
           continue;
         }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/50] [abbrv] hadoop git commit: YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
new file mode 100644
index 0000000..948feb9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock
@@ -0,0 +1,4983 @@
+# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
+# yarn lockfile v1
+
+
+abbrev@1:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.0.tgz#d0554c2256636e2f56e7c2e5ad183f859428d81f"
+
+abbrev@~1.0.7:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.0.9.tgz#91b4792588a7738c25f35dd6f63752a2f8776135"
+
+accepts@~1.3.3:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.3.tgz#c3ca7434938648c3e0d9c1e328dd68b622c284ca"
+  dependencies:
+    mime-types "~2.1.11"
+    negotiator "0.6.1"
+
+acorn@^4.0.3:
+  version "4.0.11"
+  resolved "https://registry.yarnpkg.com/acorn/-/acorn-4.0.11.tgz#edcda3bd937e7556410d42ed5860f67399c794c0"
+
+after@0.8.1:
+  version "0.8.1"
+  resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627"
+
+align-text@^0.1.1, align-text@^0.1.3:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117"
+  dependencies:
+    kind-of "^3.0.2"
+    longest "^1.0.1"
+    repeat-string "^1.5.2"
+
+alter@~0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/alter/-/alter-0.2.0.tgz#c7588808617572034aae62480af26b1d4d1cb3cd"
+  dependencies:
+    stable "~0.1.3"
+
+amd-name-resolver@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/amd-name-resolver/-/amd-name-resolver-0.0.2.tgz#7bee4e112aabeecc2e14429c4ca750c55d8e5ecd"
+
+amdefine@>=0.0.4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/amdefine/-/amdefine-1.0.1.tgz#4a5282ac164729e93619bcfd3ad151f817ce91f5"
+
+ansi-regex@^0.2.0, ansi-regex@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-0.2.1.tgz#0d8e946967a3d8143f93e24e298525fc1b2235f9"
+
+ansi-regex@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-1.1.1.tgz#41c847194646375e6a1a5d10c3ca054ef9fc980d"
+
+ansi-regex@^2.0.0:
+  version "2.1.1"
+  resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df"
+
+ansi-styles@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.1.0.tgz#eaecbf66cd706882760b2f4691582b8f55d7a7de"
+
+ansi-styles@^2.0.1, ansi-styles@^2.1.0, ansi-styles@^2.2.1:
+  version "2.2.1"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe"
+
+ansi-styles@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-1.0.0.tgz#cb102df1c56f5123eab8b67cd7b98027a0279178"
+
+ansi@^0.3.0, ansi@~0.3.0, ansi@~0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ansi/-/ansi-0.3.1.tgz#0c42d4fb17160d5a9af1e484bace1c66922c1b21"
+
+ansicolors@~0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.2.1.tgz#be089599097b74a5c9c4a84a0cdbcdb62bd87aef"
+
+ansicolors@~0.3.2:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/ansicolors/-/ansicolors-0.3.2.tgz#665597de86a9ffe3aa9bfbe6cae5c6ea426b4979"
+
+ansistyles@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/ansistyles/-/ansistyles-0.1.3.tgz#5de60415bda071bb37127854c864f41b23254539"
+
+anymatch@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-1.3.0.tgz#a3e52fa39168c825ff57b0248126ce5a8ff95507"
+  dependencies:
+    arrify "^1.0.0"
+    micromatch "^2.1.5"
+
+archy@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40"
+
+are-we-there-yet@~1.0.0:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz#a2d28c93102aa6cc96245a26cb954de06ec53f0c"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.0 || ^1.1.13"
+
+are-we-there-yet@~1.1.2:
+  version "1.1.4"
+  resolved "https://registry.yarnpkg.com/are-we-there-yet/-/are-we-there-yet-1.1.4.tgz#bb5dca382bb94f05e15194373d16fd3ba1ca110d"
+  dependencies:
+    delegates "^1.0.0"
+    readable-stream "^2.0.6"
+
+argparse@^1.0.7, argparse@~1.0.2:
+  version "1.0.9"
+  resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.9.tgz#73d83bc263f86e97f8cc4f6bae1b0e90a7d22c86"
+  dependencies:
+    sprintf-js "~1.0.2"
+
+arr-diff@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/arr-diff/-/arr-diff-2.0.0.tgz#8f3b827f955a8bd669697e4a4256ac3ceae356cf"
+  dependencies:
+    arr-flatten "^1.0.1"
+
+arr-flatten@^1.0.1:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/arr-flatten/-/arr-flatten-1.0.3.tgz#a274ed85ac08849b6bd7847c4580745dc51adfb1"
+
+array-equal@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93"
+
+array-flatten@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2"
+
+array-index@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/array-index/-/array-index-1.0.0.tgz#ec56a749ee103e4e08c790b9c353df16055b97f9"
+  dependencies:
+    debug "^2.2.0"
+    es6-symbol "^3.0.2"
+
+array-unique@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/array-unique/-/array-unique-0.2.1.tgz#a1d97ccafcbc2625cc70fadceb36a50c58b01a53"
+
+arraybuffer.slice@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/arraybuffer.slice/-/arraybuffer.slice-0.0.6.tgz#f33b2159f0532a3f3107a272c0ccfbd1ad2979ca"
+
+arrify@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
+
+asap@^2.0.0, asap@~2.0.3:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.5.tgz#522765b50c3510490e52d7dcfe085ef9ba96958f"
+
+asn1@0.1.11:
+  version "0.1.11"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.1.11.tgz#559be18376d08a4ec4dbe80877d27818639b2df7"
+
+asn1@~0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86"
+
+assert-plus@1.0.0, assert-plus@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-1.0.0.tgz#f12e0f3c5d77b0b1cdd9146942e4e96c1e4dd525"
+
+assert-plus@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.1.5.tgz#ee74009413002d84cec7219c6ac811812e723160"
+
+assert-plus@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/assert-plus/-/assert-plus-0.2.0.tgz#d74e1b87e7affc0db8aadb7021f3fe48101ab234"
+
+ast-traverse@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ast-traverse/-/ast-traverse-0.1.1.tgz#69cf2b8386f19dcda1bb1e05d68fe359d8897de6"
+
+ast-types@0.8.12:
+  version "0.8.12"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.8.12.tgz#a0d90e4351bb887716c83fd637ebf818af4adfcc"
+
+ast-types@0.9.6:
+  version "0.9.6"
+  resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.9.6.tgz#102c9e9e9005d3e7e3829bf0c4fa24ee862ee9b9"
+
+async-disk-cache@^1.0.0:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/async-disk-cache/-/async-disk-cache-1.3.1.tgz#3394010d9448b16205b01e0e2e704180805413d3"
+  dependencies:
+    debug "^2.1.3"
+    heimdalljs "^0.2.3"
+    istextorbinary "2.1.0"
+    mkdirp "^0.5.0"
+    rimraf "^2.5.3"
+    rsvp "^3.0.18"
+
+async-some@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509"
+  dependencies:
+    dezalgo "^1.0.2"
+
+async@0.9.0:
+  version "0.9.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.9.0.tgz#ac3613b1da9bed1b47510bb4651b8931e47146c7"
+
+async@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a"
+
+async@^2.0.1:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-2.3.0.tgz#1013d1051047dd320fe24e494d5c66ecaf6147d9"
+  dependencies:
+    lodash "^4.14.0"
+
+async@~0.2.6, async@~0.2.9:
+  version "0.2.10"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.2.10.tgz#b6bbe0b0674b9d719708ca38de8c237cb526c3d1"
+
+async@~0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112"
+
+aws-sign2@~0.6.0:
+  version "0.6.0"
+  resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f"
+
+aws4@^1.2.1:
+  version "1.6.0"
+  resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e"
+
+babel-core@^5.0.0:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-5.8.38.tgz#1fcaee79d7e61b750b00b8e54f6dfc9d0af86558"
+  dependencies:
+    babel-plugin-constant-folding "^1.0.1"
+    babel-plugin-dead-code-elimination "^1.0.2"
+    babel-plugin-eval "^1.0.1"
+    babel-plugin-inline-environment-variables "^1.0.1"
+    babel-plugin-jscript "^1.0.4"
+    babel-plugin-member-expression-literals "^1.0.1"
+    babel-plugin-property-literals "^1.0.1"
+    babel-plugin-proto-to-assign "^1.0.3"
+    babel-plugin-react-constant-elements "^1.0.3"
+    babel-plugin-react-display-name "^1.0.3"
+    babel-plugin-remove-console "^1.0.1"
+    babel-plugin-remove-debugger "^1.0.1"
+    babel-plugin-runtime "^1.0.7"
+    babel-plugin-undeclared-variables-check "^1.0.2"
+    babel-plugin-undefined-to-void "^1.1.6"
+    babylon "^5.8.38"
+    bluebird "^2.9.33"
+    chalk "^1.0.0"
+    convert-source-map "^1.1.0"
+    core-js "^1.0.0"
+    debug "^2.1.1"
+    detect-indent "^3.0.0"
+    esutils "^2.0.0"
+    fs-readdir-recursive "^0.1.0"
+    globals "^6.4.0"
+    home-or-tmp "^1.0.0"
+    is-integer "^1.0.4"
+    js-tokens "1.0.1"
+    json5 "^0.4.0"
+    lodash "^3.10.0"
+    minimatch "^2.0.3"
+    output-file-sync "^1.1.0"
+    path-exists "^1.0.0"
+    path-is-absolute "^1.0.0"
+    private "^0.1.6"
+    regenerator "0.8.40"
+    regexpu "^1.3.0"
+    repeating "^1.1.2"
+    resolve "^1.1.6"
+    shebang-regex "^1.0.0"
+    slash "^1.0.0"
+    source-map "^0.5.0"
+    source-map-support "^0.2.10"
+    to-fast-properties "^1.0.0"
+    trim-right "^1.0.0"
+    try-resolve "^1.0.0"
+
+babel-plugin-constant-folding@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-constant-folding/-/babel-plugin-constant-folding-1.0.1.tgz#8361d364c98e449c3692bdba51eff0844290aa8e"
+
+babel-plugin-dead-code-elimination@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-dead-code-elimination/-/babel-plugin-dead-code-elimination-1.0.2.tgz#5f7c451274dcd7cccdbfbb3e0b85dd28121f0f65"
+
+babel-plugin-eval@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-eval/-/babel-plugin-eval-1.0.1.tgz#a2faed25ce6be69ade4bfec263f70169195950da"
+
+babel-plugin-htmlbars-inline-precompile@0.0.5:
+  version "0.0.5"
+  resolved "https://registry.yarnpkg.com/babel-plugin-htmlbars-inline-precompile/-/babel-plugin-htmlbars-inline-precompile-0.0.5.tgz#60fc2a3a453664cb524b21866892c212ee63ff70"
+
+babel-plugin-inline-environment-variables@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-inline-environment-variables/-/babel-plugin-inline-environment-variables-1.0.1.tgz#1f58ce91207ad6a826a8bf645fafe68ff5fe3ffe"
+
+babel-plugin-jscript@^1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-jscript/-/babel-plugin-jscript-1.0.4.tgz#8f342c38276e87a47d5fa0a8bd3d5eb6ccad8fcc"
+
+babel-plugin-member-expression-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-member-expression-literals/-/babel-plugin-member-expression-literals-1.0.1.tgz#cc5edb0faa8dc927170e74d6d1c02440021624d3"
+
+babel-plugin-property-literals@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-property-literals/-/babel-plugin-property-literals-1.0.1.tgz#0252301900192980b1c118efea48ce93aab83336"
+
+babel-plugin-proto-to-assign@^1.0.3:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/babel-plugin-proto-to-assign/-/babel-plugin-proto-to-assign-1.0.4.tgz#c49e7afd02f577bc4da05ea2df002250cf7cd123"
+  dependencies:
+    lodash "^3.9.3"
+
+babel-plugin-react-constant-elements@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-constant-elements/-/babel-plugin-react-constant-elements-1.0.3.tgz#946736e8378429cbc349dcff62f51c143b34e35a"
+
+babel-plugin-react-display-name@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/babel-plugin-react-display-name/-/babel-plugin-react-display-name-1.0.3.tgz#754fe38926e8424a4e7b15ab6ea6139dee0514fc"
+
+babel-plugin-remove-console@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-console/-/babel-plugin-remove-console-1.0.1.tgz#d8f24556c3a05005d42aaaafd27787f53ff013a7"
+
+babel-plugin-remove-debugger@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/babel-plugin-remove-debugger/-/babel-plugin-remove-debugger-1.0.1.tgz#fd2ea3cd61a428ad1f3b9c89882ff4293e8c14c7"
+
+babel-plugin-runtime@^1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/babel-plugin-runtime/-/babel-plugin-runtime-1.0.7.tgz#bf7c7d966dd56ecd5c17fa1cb253c9acb7e54aaf"
+
+babel-plugin-undeclared-variables-check@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undeclared-variables-check/-/babel-plugin-undeclared-variables-check-1.0.2.tgz#5cf1aa539d813ff64e99641290af620965f65dee"
+  dependencies:
+    leven "^1.0.2"
+
+babel-plugin-undefined-to-void@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/babel-plugin-undefined-to-void/-/babel-plugin-undefined-to-void-1.1.6.tgz#7f578ef8b78dfae6003385d8417a61eda06e2f81"
+
+babylon@^5.8.38:
+  version "5.8.38"
+  resolved "https://registry.yarnpkg.com/babylon/-/babylon-5.8.38.tgz#ec9b120b11bf6ccd4173a18bf217e60b79859ffd"
+
+backbone@^1.1.2:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/backbone/-/backbone-1.3.3.tgz#4cc80ea7cb1631ac474889ce40f2f8bc683b2999"
+  dependencies:
+    underscore ">=1.8.3"
+
+backo2@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/backo2/-/backo2-1.0.2.tgz#31ab1ac8b129363463e35b3ebb69f4dfcfba7947"
+
+balanced-match@^0.4.1:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838"
+
+base64-arraybuffer@0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.2.tgz#474df4a9f2da24e05df3158c3b1db3c3cd46a154"
+
+base64id@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/base64id/-/base64id-0.1.0.tgz#02ce0fdeee0cef4f40080e1e73e834f0b1bfce3f"
+
+basic-auth@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/basic-auth/-/basic-auth-1.1.0.tgz#45221ee429f7ee1e5035be3f51533f1cdfd29884"
+
+bcrypt-pbkdf@^1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.1.tgz#63bc5dcb61331b92bc05fd528953c33462a06f8d"
+  dependencies:
+    tweetnacl "^0.14.3"
+
+benchmark@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/benchmark/-/benchmark-1.0.0.tgz#2f1e2fa4c359f11122aa183082218e957e390c73"
+
+better-assert@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/better-assert/-/better-assert-1.0.2.tgz#40866b9e1b9e0b55b481894311e68faffaebc522"
+  dependencies:
+    callsite "1.0.0"
+
+"binaryextensions@1 || 2":
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/binaryextensions/-/binaryextensions-2.0.0.tgz#e597d1a7a6a3558a2d1c7241a16c99965e6aa40f"
+
+bl@~1.0.0:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.0.3.tgz#fc5421a28fd4226036c3b3891a66a25bc64d226e"
+  dependencies:
+    readable-stream "~2.0.5"
+
+bl@~1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/bl/-/bl-1.1.2.tgz#fdca871a99713aa00d19e3bbba41c44787a65398"
+  dependencies:
+    readable-stream "~2.0.5"
+
+blank-object@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/blank-object/-/blank-object-1.0.2.tgz#f990793fbe9a8c8dd013fb3219420bec81d5f4b9"
+
+blob@0.0.4:
+  version "0.0.4"
+  resolved "https://registry.yarnpkg.com/blob/-/blob-0.0.4.tgz#bcf13052ca54463f30f9fc7e95b9a47630a94921"
+
+block-stream@*, block-stream@0.0.8:
+  version "0.0.8"
+  resolved "https://registry.yarnpkg.com/block-stream/-/block-stream-0.0.8.tgz#0688f46da2bbf9cff0c4f68225a0cb95cbe8a46b"
+  dependencies:
+    inherits "~2.0.0"
+
+bluebird@^2.9.26, bluebird@^2.9.33:
+  version "2.11.0"
+  resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-2.11.0.tgz#534b9033c022c9579c56ba3b3e5a5caafbb650e1"
+
+body-parser@^1.2.0:
+  version "1.17.1"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.17.1.tgz#75b3bc98ddd6e7e0d8ffe750dfaca5c66993fa47"
+  dependencies:
+    bytes "2.4.0"
+    content-type "~1.0.2"
+    debug "2.6.1"
+    depd "~1.1.0"
+    http-errors "~1.6.1"
+    iconv-lite "0.4.15"
+    on-finished "~2.3.0"
+    qs "6.4.0"
+    raw-body "~2.2.0"
+    type-is "~1.6.14"
+
+body-parser@~1.14.0:
+  version "1.14.2"
+  resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.14.2.tgz#1015cb1fe2c443858259581db53332f8d0cf50f9"
+  dependencies:
+    bytes "2.2.0"
+    content-type "~1.0.1"
+    debug "~2.2.0"
+    depd "~1.1.0"
+    http-errors "~1.3.1"
+    iconv-lite "0.4.13"
+    on-finished "~2.3.0"
+    qs "5.2.0"
+    raw-body "~2.1.5"
+    type-is "~1.6.10"
+
+boom@2.x.x:
+  version "2.10.1"
+  resolved "https://registry.yarnpkg.com/boom/-/boom-2.10.1.tgz#39c8918ceff5799f83f9492a848f625add0c766f"
+  dependencies:
+    hoek "2.x.x"
+
+bower-config@0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60"
+  dependencies:
+    graceful-fs "~2.0.0"
+    mout "~0.9.0"
+    optimist "~0.6.0"
+    osenv "0.0.3"
+
+bower-endpoint-parser@0.2.2:
+  version "0.2.2"
+  resolved "https://registry.yarnpkg.com/bower-endpoint-parser/-/bower-endpoint-parser-0.2.2.tgz#00b565adbfab6f2d35addde977e97962acbcb3f6"
+
+bower-shrinkwrap-resolver-ext@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/bower-shrinkwrap-resolver-ext/-/bower-shrinkwrap-resolver-ext-0.1.0.tgz#963c1a87107501b0cb7823d8cbc84d5167c7fa23"
+  dependencies:
+    debuglog "^1.0.1"
+    json-stable-stringify "^1.0.1"
+    object-assign "^4.0.1"
+    semver "^5.3.0"
+    string.prototype.endswith "^0.2.0"
+
+bower@1.7.7, bower@^1.3.12:
+  version "1.7.7"
+  resolved "https://registry.yarnpkg.com/bower/-/bower-1.7.7.tgz#2fd7ff3ebdcba5a8ffcd84c397c8fdfe9f825f92"
+
+brace-expansion@^1.0.0:
+  version "1.1.7"
+  resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.7.tgz#3effc3c50e000531fb720eaff80f0ae8ef23cf59"
+  dependencies:
+    balanced-match "^0.4.1"
+    concat-map "0.0.1"
+
+braces@^1.8.2:
+  version "1.8.5"
+  resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7"
+  dependencies:
+    expand-range "^1.8.1"
+    preserve "^0.2.0"
+    repeat-element "^1.1.2"
+
+breakable@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/breakable/-/breakable-1.0.0.tgz#784a797915a38ead27bad456b5572cb4bbaa78c1"
+
+broccoli-asset-rev@2.4.2:
+  version "2.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rev/-/broccoli-asset-rev-2.4.2.tgz#b84953affbda78d17dffb41349398f50fc26125c"
+  dependencies:
+    broccoli-asset-rewrite "^1.0.9"
+    broccoli-filter "^1.2.2"
+    json-stable-stringify "^1.0.0"
+    matcher-collection "^1.0.1"
+    rsvp "~3.0.6"
+
+broccoli-asset-rewrite@^1.0.9:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-asset-rewrite/-/broccoli-asset-rewrite-1.1.0.tgz#77a5da56157aa318c59113245e8bafb4617f8830"
+  dependencies:
+    broccoli-filter "^1.2.3"
+
+broccoli-babel-transpiler@^5.4.5, broccoli-babel-transpiler@^5.5.0:
+  version "5.6.2"
+  resolved "https://registry.yarnpkg.com/broccoli-babel-transpiler/-/broccoli-babel-transpiler-5.6.2.tgz#958c72e43575b2f0a862a5096dba1ce1ebc7d74d"
+  dependencies:
+    babel-core "^5.0.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.0.1"
+    clone "^0.2.0"
+    hash-for-dep "^1.0.2"
+    json-stable-stringify "^1.0.0"
+
+broccoli-caching-writer@^2.0.0, broccoli-caching-writer@^2.0.4, broccoli-caching-writer@^2.2.0, broccoli-caching-writer@^2.3.1:
+  version "2.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-caching-writer/-/broccoli-caching-writer-2.3.1.tgz#b93cf58f9264f003075868db05774f4e7f25bd07"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-plugin "1.1.0"
+    debug "^2.1.1"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+    walk-sync "^0.2.5"
+
+broccoli-clean-css@0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-clean-css/-/broccoli-clean-css-0.2.0.tgz#15f1c265a6986585a972bfb070bf52e9c054c861"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    clean-css "^2.2.1"
+
+broccoli-config-loader@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/broccoli-config-loader/-/broccoli-config-loader-1.0.0.tgz#c3cf5ecfaffc04338c6f1d5d38dc36baeaa131ba"
+  dependencies:
+    broccoli-caching-writer "^2.0.4"
+
+broccoli-config-replace@^1.1.0:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/broccoli-config-replace/-/broccoli-config-replace-1.1.2.tgz#6ea879d92a5bad634d11329b51fc5f4aafda9c00"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.2.0"
+    debug "^2.2.0"
+    fs-extra "^0.24.0"
+
+broccoli-filter@^0.1.6:
+  version "0.1.14"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-0.1.14.tgz#23cae3891ff9ebb7b4d7db00c6dcf03535daf7ad"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.6"
+    broccoli-writer "^0.1.1"
+    mkdirp "^0.3.5"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rsvp "^3.0.16"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.1.3"
+
+broccoli-filter@^1.2.2, broccoli-filter@^1.2.3:
+  version "1.2.4"
+  resolved "https://registry.yarnpkg.com/broccoli-filter/-/broccoli-filter-1.2.4.tgz#409afb94b9a3a6da9fac8134e91e205f40cc7330"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    broccoli-plugin "^1.0.0"
+    copy-dereference "^1.0.0"
+    debug "^2.2.0"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-funnel@1.0.1, broccoli-funnel@^1.0.0, broccoli-funnel@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-funnel/-/broccoli-funnel-1.0.1.tgz#12cb76e342343592a3b18ae7840c0db3bd16d8af"
+  dependencies:
+    array-equal "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.0.0"
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.0"
+    fs-tree-diff "^0.3.0"
+    minimatch "^2.0.1"
+    mkdirp "^0.5.0"
+    path-posix "^1.0.0"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+    walk-sync "^0.2.6"
+
+broccoli-jshint@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/broccoli-jshint/-/broccoli-jshint-1.2.0.tgz#8cd565d11a04bfd32cb8f85a0f7ede1e5be7a6a2"
+  dependencies:
+    broccoli-persistent-filter "^1.2.0"
+    chalk "~0.4.0"
+    findup-sync "^0.3.0"
+    jshint "^2.7.0"
+    json-stable-stringify "^1.0.0"
+    mkdirp "~0.4.0"
+
+broccoli-kitchen-sink-helpers@^0.2.5, broccoli-kitchen-sink-helpers@^0.2.6, broccoli-kitchen-sink-helpers@^0.2.7:
+  version "0.2.9"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.2.9.tgz#a5e0986ed8d76fb5984b68c3f0450d3a96e36ecc"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-kitchen-sink-helpers@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/broccoli-kitchen-sink-helpers/-/broccoli-kitchen-sink-helpers-0.3.1.tgz#77c7c18194b9664163ec4fcee2793444926e0c06"
+  dependencies:
+    glob "^5.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-less-single@^0.6.4:
+  version "0.6.4"
+  resolved "https://registry.yarnpkg.com/broccoli-less-single/-/broccoli-less-single-0.6.4.tgz#200316f4146b8cf7e6ab97fc661b8085cc89bdb9"
+  dependencies:
+    broccoli-caching-writer "^2.3.1"
+    include-path-searcher "^0.1.0"
+    less "^2.5.0"
+    lodash.merge "^3.3.2"
+    mkdirp "^0.5.0"
+
+broccoli-merge-trees@1.1.1, broccoli-merge-trees@^1.0.0, broccoli-merge-trees@^1.1.0, broccoli-merge-trees@^1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-merge-trees/-/broccoli-merge-trees-1.1.1.tgz#1e283d18c686da922bb91a80d7aac0d161388e21"
+  dependencies:
+    broccoli-plugin "^1.0.0"
+    can-symlink "^1.0.0"
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+    fs-tree-diff "^0.4.3"
+    rimraf "^2.4.3"
+    symlink-or-copy "^1.0.0"
+
+broccoli-persistent-filter@^1.0.1, broccoli-persistent-filter@^1.0.3, broccoli-persistent-filter@^1.1.6, broccoli-persistent-filter@^1.2.0:
+  version "1.2.13"
+  resolved "https://registry.yarnpkg.com/broccoli-persistent-filter/-/broccoli-persistent-filter-1.2.13.tgz#61368669e2b8f35238fdd38a2a896597e4a1c821"
+  dependencies:
+    async-disk-cache "^1.0.0"
+    blank-object "^1.0.1"
+    broccoli-plugin "^1.0.0"
+    fs-tree-diff "^0.5.2"
+    hash-for-dep "^1.0.2"
+    heimdalljs "^0.2.1"
+    heimdalljs-logger "^0.1.7"
+    md5-hex "^1.0.2"
+    mkdirp "^0.5.1"
+    promise-map-series "^0.2.1"
+    rsvp "^3.0.18"
+    symlink-or-copy "^1.0.1"
+    walk-sync "^0.3.1"
+
+broccoli-plugin@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.1.0.tgz#73e2cfa05f8ea1e3fc1420c40c3d9e7dc724bf02"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.0.1"
+
+broccoli-plugin@^1.0.0, broccoli-plugin@^1.2.0, broccoli-plugin@^1.2.1, broccoli-plugin@^1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/broccoli-plugin/-/broccoli-plugin-1.3.0.tgz#bee704a8e42da08cb58e513aaa436efb7f0ef1ee"
+  dependencies:
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.3"
+    rimraf "^2.3.4"
+    symlink-or-copy "^1.1.8"
+
+broccoli-sane-watcher@^1.1.1:
+  version "1.1.5"
+  resolved "https://registry.yarnpkg.com/broccoli-sane-watcher/-/broccoli-sane-watcher-1.1.5.tgz#f2b0af9cf0afb74c7a49cd88eb11c6869ee8c0c0"
+  dependencies:
+    broccoli-slow-trees "^1.1.0"
+    debug "^2.1.0"
+    rsvp "^3.0.18"
+    sane "^1.1.1"
+
+broccoli-slow-trees@^1.0.0, broccoli-slow-trees@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-slow-trees/-/broccoli-slow-trees-1.1.0.tgz#426c5724e008107e4573f73e8a9ca702916b78f7"
+
+broccoli-source@^1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/broccoli-source/-/broccoli-source-1.1.0.tgz#54f0e82c8b73f46580cbbc4f578f0b32fca8f809"
+
+broccoli-sourcemap-concat@^1.1.6:
+  version "1.1.6"
+  resolved "https://registry.yarnpkg.com/broccoli-sourcemap-concat/-/broccoli-sourcemap-concat-1.1.6.tgz#7caa0e28e2553c58897c369a673da05600541872"
+  dependencies:
+    broccoli-caching-writer "^2.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    fast-sourcemap-concat " ^0.2.4"
+    lodash-node "^2.4.1"
+    lodash.uniq "^3.2.2"
+    mkdirp "^0.5.1"
+
+broccoli-sourcemap-concat@^2.0.2:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sourcemap-concat/-/broccoli-sourcemap-concat-2.0.2.tgz#64dbea4f9da4737c3fc5502efa20bb6322cd06a2"
+  dependencies:
+    broccoli-caching-writer "^2.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    fast-sourcemap-concat " ^0.2.4"
+    lodash-node "^2.4.1"
+    lodash.uniq "^3.2.2"
+    minimatch "^2.0.10"
+    mkdirp "^0.5.1"
+
+broccoli-sri-hash@^1.2.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/broccoli-sri-hash/-/broccoli-sri-hash-1.2.2.tgz#64e54401ac02ea49ebf2701169ae214c07588493"
+  dependencies:
+    broccoli-caching-writer "^2.2.0"
+    mkdirp "^0.5.1"
+    rsvp "^3.1.0"
+    sri-toolbox "^0.2.0"
+    symlink-or-copy "^1.0.1"
+
+broccoli-stew@^1.0.0:
+  version "1.4.2"
+  resolved "https://registry.yarnpkg.com/broccoli-stew/-/broccoli-stew-1.4.2.tgz#9ec4062fd7162c6026561a2fbf64558363aff8d6"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-persistent-filter "^1.1.6"
+    broccoli-plugin "^1.3.0"
+    chalk "^1.1.3"
+    debug "^2.4.0"
+    ensure-posix-path "^1.0.1"
+    fs-extra "^2.0.0"
+    minimatch "^3.0.2"
+    resolve "^1.1.6"
+    rsvp "^3.0.16"
+    sanitize-filename "^1.5.3"
+    symlink-or-copy "^1.1.8"
+    walk-sync "^0.3.0"
+
+broccoli-uglify-sourcemap@^1.0.0:
+  version "1.5.2"
+  resolved "https://registry.yarnpkg.com/broccoli-uglify-sourcemap/-/broccoli-uglify-sourcemap-1.5.2.tgz#04f84ab0db539031fa868ccfa563c9932d50cedb"
+  dependencies:
+    broccoli-plugin "^1.2.1"
+    debug "^2.2.0"
+    lodash.merge "^4.5.1"
+    matcher-collection "^1.0.0"
+    mkdirp "^0.5.0"
+    source-map-url "^0.3.0"
+    symlink-or-copy "^1.0.1"
+    uglify-js "^2.7.0"
+    walk-sync "^0.1.3"
+
+broccoli-unwatched-tree@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-unwatched-tree/-/broccoli-unwatched-tree-0.1.1.tgz#4312fde04bdafe67a05a967d72cc50b184a9f514"
+
+broccoli-viz@^2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/broccoli-viz/-/broccoli-viz-2.0.1.tgz#3f3ed2fb83e368aa5306fae460801dea552e40db"
+
+broccoli-writer@^0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/broccoli-writer/-/broccoli-writer-0.1.1.tgz#d4d71aa8f2afbc67a3866b91a2da79084b96ab2d"
+  dependencies:
+    quick-temp "^0.1.0"
+    rsvp "^3.0.6"
+
+broccoli@0.16.8:
+  version "0.16.8"
+  resolved "https://registry.yarnpkg.com/broccoli/-/broccoli-0.16.8.tgz#2a00f6b82a8106ec9cfb380a8ada4ad490b836d5"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.2.5"
+    broccoli-slow-trees "^1.0.0"
+    commander "^2.5.0"
+    connect "^3.3.3"
+    copy-dereference "^1.0.0"
+    findup-sync "^0.2.1"
+    handlebars "^3.0.1"
+    mime "^1.2.11"
+    promise-map-series "^0.2.1"
+    quick-temp "^0.1.2"
+    rimraf "^2.2.8"
+    rsvp "^3.0.17"
+
+bser@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/bser/-/bser-1.0.2.tgz#381116970b2a6deea5646dd15dd7278444b56169"
+  dependencies:
+    node-int64 "^0.4.0"
+
+buffer-shims@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/buffer-shims/-/buffer-shims-1.0.0.tgz#9978ce317388c649ad8793028c3477ef044a8b51"
+
+builtin-modules@^1.0.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/builtin-modules/-/builtin-modules-1.1.1.tgz#270f076c5a72c02f5b65a47df94c5fe3a278892f"
+
+builtins@0.0.7:
+  version "0.0.7"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-0.0.7.tgz#355219cd6cf18dbe7c01cc7fd2dce765cfdc549a"
+
+builtins@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/builtins/-/builtins-1.0.3.tgz#cb94faeb61c8696451db36534e1422f94f0aee88"
+
+bytes@2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.2.0.tgz#fd35464a403f6f9117c2de3609ecff9cae000588"
+
+bytes@2.3.0:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.3.0.tgz#d5b680a165b6201739acb611542aabc2d8ceb070"
+
+bytes@2.4.0:
+  version "2.4.0"
+  resolved "https://registry.yarnpkg.com/bytes/-/bytes-2.4.0.tgz#7d97196f9d5baf7f6935e25985549edd2a6c2339"
+
+callsite@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/callsite/-/callsite-1.0.0.tgz#280398e5d664bd74038b6f0905153e6e8af1bc20"
+
+camelcase@^1.0.2, camelcase@^1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-1.2.1.tgz#9bb5304d2e0b56698b2c758b08a3eaa9daa58a39"
+
+can-symlink@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/can-symlink/-/can-symlink-1.0.0.tgz#97b607d8a84bb6c6e228b902d864ecb594b9d219"
+  dependencies:
+    tmp "0.0.28"
+
+cardinal@^0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/cardinal/-/cardinal-0.5.0.tgz#00d5f661dbd4aabfdf7d41ce48a5a59bca35a291"
+  dependencies:
+    ansicolors "~0.2.1"
+    redeyed "~0.5.0"
+
+caseless@~0.11.0:
+  version "0.11.0"
+  resolved "https://registry.yarnpkg.com/caseless/-/caseless-0.11.0.tgz#715b96ea9841593cc33067923f5ec60ebda4f7d7"
+
+center-align@^0.1.1:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/center-align/-/center-align-0.1.3.tgz#aa0d32629b6ee972200411cbd4461c907bc2b7ad"
+  dependencies:
+    align-text "^0.1.3"
+    lazy-cache "^1.0.3"
+
+chalk@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.0.tgz#09b453cec497a75520e4a60ae48214a8700e0921"
+  dependencies:
+    ansi-styles "^2.1.0"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@^0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.5.1.tgz#663b3a648b68b55d04690d49167aa837858f2174"
+  dependencies:
+    ansi-styles "^1.1.0"
+    escape-string-regexp "^1.0.0"
+    has-ansi "^0.1.0"
+    strip-ansi "^0.3.0"
+    supports-color "^0.2.0"
+
+chalk@^1.0.0, chalk@^1.1.1, chalk@^1.1.3:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98"
+  dependencies:
+    ansi-styles "^2.2.1"
+    escape-string-regexp "^1.0.2"
+    has-ansi "^2.0.0"
+    strip-ansi "^3.0.0"
+    supports-color "^2.0.0"
+
+chalk@~0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/chalk/-/chalk-0.4.0.tgz#5199a3ddcd0c1efe23bc08c1b027b06176e0c64f"
+  dependencies:
+    ansi-styles "~1.0.0"
+    has-color "~0.1.0"
+    strip-ansi "~0.1.0"
+
+char-spinner@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/char-spinner/-/char-spinner-1.0.1.tgz#e6ea67bd247e107112983b7ab0479ed362800081"
+
+charm@^1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/charm/-/charm-1.0.2.tgz#8add367153a6d9a581331052c4090991da995e35"
+  dependencies:
+    inherits "^2.0.1"
+
+chmodr@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/chmodr/-/chmodr-1.0.2.tgz#04662b932d0f02ec66deaa2b0ea42811968e3eb9"
+
+chownr@^1.0.1, chownr@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.0.1.tgz#e2a75042a9551908bebd25b8523d5f9769d79181"
+
+clean-base-url@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/clean-base-url/-/clean-base-url-1.0.0.tgz#c901cf0a20b972435b0eccd52d056824a4351b7b"
+
+clean-css@^2.2.1:
+  version "2.2.23"
+  resolved "https://registry.yarnpkg.com/clean-css/-/clean-css-2.2.23.tgz#0590b5478b516c4903edc2d89bd3fdbdd286328c"
+  dependencies:
+    commander "2.2.x"
+
+cli-color@~0.3.2:
+  version "0.3.3"
+  resolved "https://registry.yarnpkg.com/cli-color/-/cli-color-0.3.3.tgz#12d5bdd158ff8a0b0db401198913c03df069f6f5"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    memoizee "~0.3.8"
+    timers-ext "0.1"
+
+cli-table@^0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cli-table/-/cli-table-0.3.1.tgz#f53b05266a8b1a0b934b3d0821e6e2dc5914ae23"
+  dependencies:
+    colors "1.0.3"
+
+cli@~1.0.0:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/cli/-/cli-1.0.1.tgz#22817534f24bfa4950c34d532d48ecbc621b8c14"
+  dependencies:
+    exit "0.1.2"
+    glob "^7.1.1"
+
+cliui@^2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/cliui/-/cliui-2.1.0.tgz#4b475760ff80264c762c3a1719032e91c7fea0d1"
+  dependencies:
+    center-align "^0.1.1"
+    right-align "^0.1.1"
+    wordwrap "0.0.2"
+
+clone@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-0.2.0.tgz#c6126a90ad4f72dbf5acdb243cc37724fe93fc1f"
+
+clone@^1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.2.tgz#260b7a99ebb1edfe247538175f783243cb19d149"
+
+cmd-shim@~2.0.1:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/cmd-shim/-/cmd-shim-2.0.2.tgz#6fcbda99483a8fd15d7d30a196ca69d688a2efdb"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "~0.5.0"
+
+colors@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-1.0.3.tgz#0433f44d809680fdeb60ed260f1b0c262e82a40b"
+
+colors@~0.6.0-1:
+  version "0.6.2"
+  resolved "https://registry.yarnpkg.com/colors/-/colors-0.6.2.tgz#2423fe6678ac0c5dae8852e5d0e5be08c997abcc"
+
+columnify@~1.5.2:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/columnify/-/columnify-1.5.4.tgz#4737ddf1c7b69a8a7c340570782e947eec8e78bb"
+  dependencies:
+    strip-ansi "^3.0.0"
+    wcwidth "^1.0.0"
+
+combined-stream@^1.0.5, combined-stream@~1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.5.tgz#938370a57b4a51dea2c77c15d5c5fdf895164009"
+  dependencies:
+    delayed-stream "~1.0.0"
+
+commander@2.2.x:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.2.0.tgz#175ad4b9317f3ff615f201c1e57224f55a3e91df"
+
+commander@^2.5.0, commander@^2.6.0, commander@^2.9.0:
+  version "2.9.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.9.0.tgz#9c99094176e12240cb22d6c5146098400fe0f7d4"
+  dependencies:
+    graceful-readlink ">= 1.0.0"
+
+commander@~2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/commander/-/commander-2.1.0.tgz#d121bbae860d9992a3d517ba96f56588e47c6781"
+
+commoner@~0.10.3:
+  version "0.10.8"
+  resolved "https://registry.yarnpkg.com/commoner/-/commoner-0.10.8.tgz#34fc3672cd24393e8bb47e70caa0293811f4f2c5"
+  dependencies:
+    commander "^2.5.0"
+    detective "^4.3.1"
+    glob "^5.0.15"
+    graceful-fs "^4.1.2"
+    iconv-lite "^0.4.5"
+    mkdirp "^0.5.0"
+    private "^0.1.6"
+    q "^1.1.2"
+    recast "^0.11.17"
+
+component-bind@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/component-bind/-/component-bind-1.0.0.tgz#00c608ab7dcd93897c0009651b1d3a8e1e73bbd1"
+
+component-emitter@1.1.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/component-emitter/-/component-emitter-1.1.2.tgz#296594f2753daa63996d2af08d15a95116c9aec3"
+
+component-inherit@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/component-inherit/-/component-inherit-0.0.3.tgz#645fc4adf58b72b649d5cae65135619db26ff143"
+
+compressible@~2.0.8:
+  version "2.0.10"
+  resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.10.tgz#feda1c7f7617912732b29bf8cf26252a20b9eecd"
+  dependencies:
+    mime-db ">= 1.27.0 < 2"
+
+compression@^1.4.4:
+  version "1.6.2"
+  resolved "https://registry.yarnpkg.com/compression/-/compression-1.6.2.tgz#cceb121ecc9d09c52d7ad0c3350ea93ddd402bc3"
+  dependencies:
+    accepts "~1.3.3"
+    bytes "2.3.0"
+    compressible "~2.0.8"
+    debug "~2.2.0"
+    on-headers "~1.0.1"
+    vary "~1.1.0"
+
+concat-map@0.0.1:
+  version "0.0.1"
+  resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b"
+
+concat-stream@1.5.0, concat-stream@^1.4.6:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/concat-stream/-/concat-stream-1.5.0.tgz#53f7d43c51c5e43f81c8fdd03321c631be68d611"
+  dependencies:
+    inherits "~2.0.1"
+    readable-stream "~2.0.0"
+    typedarray "~0.0.5"
+
+config-chain@~1.1.9:
+  version "1.1.11"
+  resolved "https://registry.yarnpkg.com/config-chain/-/config-chain-1.1.11.tgz#aba09747dfbe4c3e70e766a6e41586e1859fc6f2"
+  dependencies:
+    ini "^1.3.4"
+    proto-list "~1.2.1"
+
+configstore@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/configstore/-/configstore-1.2.1.tgz#00ad402c0dba027bd8b4b7228dc7d42cefe3c81a"
+  dependencies:
+    graceful-fs "^4.1.2"
+    mkdirp "^0.5.0"
+    object-assign "^3.0.0"
+    os-tmpdir "^1.0.0"
+    osenv "^0.1.0"
+    uuid "^2.0.1"
+    write-file-atomic "^1.1.2"
+    xdg-basedir "^2.0.0"
+
+connect@^3.3.3:
+  version "3.6.1"
+  resolved "https://registry.yarnpkg.com/connect/-/connect-3.6.1.tgz#b7760693a74f0454face1d9378edb3f885b43227"
+  dependencies:
+    debug "2.6.3"
+    finalhandler "1.0.1"
+    parseurl "~1.3.1"
+    utils-merge "1.0.0"
+
+console-browserify@1.1.x:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/console-browserify/-/console-browserify-1.1.0.tgz#f0241c45730a9fc6323b206dbf38edc741d0bb10"
+  dependencies:
+    date-now "^0.1.4"
+
+consolidate@^0.13.1:
+  version "0.13.1"
+  resolved "https://registry.yarnpkg.com/consolidate/-/consolidate-0.13.1.tgz#9e9503568eb4850889da6ed87a852c8dd2d13f64"
+  dependencies:
+    bluebird "^2.9.26"
+
+content-disposition@0.5.2:
+  version "0.5.2"
+  resolved "https://registry.yarnpkg.com/content-disposition/-/content-disposition-0.5.2.tgz#0cf68bb9ddf5f2be7961c3a85178cb85dba78cb4"
+
+content-type@~1.0.1, content-type@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.2.tgz#b7d113aee7a8dd27bd21133c4dc2529df1721eed"
+
+convert-source-map@^1.1.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-1.5.0.tgz#9acd70851c6d5dfdd93d9282e5edf94a03ff46b5"
+
+cookie-signature@1.0.6:
+  version "1.0.6"
+  resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
+
+cookie@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.3.1.tgz#e7e0a1f9ef43b4c8ba925c5c5a96e806d16873bb"
+
+copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/copy-dereference/-/copy-dereference-1.0.0.tgz#6b131865420fd81b413ba994b44d3655311152b6"
+
+core-js@^1.0.0:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/core-js/-/core-js-1.2.7.tgz#652294c14651db28fa93bd2d5ff2983a4f08c636"
+
+core-object@0.0.2:
+  version "0.0.2"
+  resolved "https://registry.yarnpkg.com/core-object/-/core-object-0.0.2.tgz#c9a6fee8f712e281fa9f6fba10243409ea2debc3"
+  dependencies:
+    lodash-node "^2.4.1"
+
+core-util-is@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.2.tgz#b5fd54220aa2bc5ab57aab7140c940754503c1a7"
+
+cpr@0.4.2:
+  version "0.4.2"
+  resolved "https://registry.yarnpkg.com/cpr/-/cpr-0.4.2.tgz#cc5083e6d2fa31f52bbfeefae508a445fe6180f2"
+  dependencies:
+    graceful-fs "~4.1.2"
+    mkdirp "~0.5.0"
+    rimraf "~2.4.3"
+
+cross-spawn-async@^2.0.0:
+  version "2.2.5"
+  resolved "https://registry.yarnpkg.com/cross-spawn-async/-/cross-spawn-async-2.2.5.tgz#845ff0c0834a3ded9d160daca6d390906bb288cc"
+  dependencies:
+    lru-cache "^4.0.0"
+    which "^1.2.8"
+
+cryptiles@2.x.x:
+  version "2.0.5"
+  resolved "https://registry.yarnpkg.com/cryptiles/-/cryptiles-2.0.5.tgz#3bdfecdc608147c1c67202fa291e7dca59eaa3b8"
+  dependencies:
+    boom "2.x.x"
+
+ctype@0.5.3:
+  version "0.5.3"
+  resolved "https://registry.yarnpkg.com/ctype/-/ctype-0.5.3.tgz#82c18c2461f74114ef16c135224ad0b9144ca12f"
+
+d@1:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/d/-/d-1.0.0.tgz#754bb5bfe55451da69a58b94d45f4c5b0462d58f"
+  dependencies:
+    es5-ext "^0.10.9"
+
+d@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/d/-/d-0.1.1.tgz#da184c535d18d8ee7ba2aa229b914009fae11309"
+  dependencies:
+    es5-ext "~0.10.2"
+
+dashdash@^1.12.0:
+  version "1.14.1"
+  resolved "https://registry.yarnpkg.com/dashdash/-/dashdash-1.14.1.tgz#853cfa0f7cbe2fed5de20326b8dd581035f6e2f0"
+  dependencies:
+    assert-plus "^1.0.0"
+
+date-now@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/date-now/-/date-now-0.1.4.tgz#eaf439fd4d4848ad74e5cc7dbef200672b9e345b"
+
+debug@0.7.4:
+  version "0.7.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-0.7.4.tgz#06e1ea8082c2cb14e39806e22e2f6f757f92af39"
+
+debug@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.2.tgz#3849591c10cce648476c3c7c2e2e3416db5963c4"
+  dependencies:
+    ms "0.6.2"
+
+debug@1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.3.tgz#fc8c6b2d6002804b4081c0208e0f6460ba1fa3e4"
+  dependencies:
+    ms "0.6.2"
+
+debug@1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-1.0.4.tgz#5b9c256bd54b6ec02283176fa8a0ede6d154cbf8"
+  dependencies:
+    ms "0.6.2"
+
+debug@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.1.0.tgz#33ab915659d8c2cc8a41443d94d6ebd37697ed21"
+  dependencies:
+    ms "0.6.2"
+
+debug@2.6.1:
+  version "2.6.1"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.1.tgz#79855090ba2c4e3115cc7d8769491d58f0491351"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.3:
+  version "2.6.3"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.3.tgz#0f7eb8c30965ec08c72accfa0130c8b79984141d"
+  dependencies:
+    ms "0.7.2"
+
+debug@2.6.4, debug@^2.1.0, debug@^2.1.1, debug@^2.1.3, debug@^2.2.0, debug@^2.4.0:
+  version "2.6.4"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.4.tgz#7586a9b3c39741c0282ae33445c4e8ac74734fe0"
+  dependencies:
+    ms "0.7.3"
+
+debug@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/debug/-/debug-2.2.0.tgz#f87057e995b1a1f6ae6a4960664137bc56f039da"
+  dependencies:
+    ms "0.7.1"
+
+debuglog@^1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/debuglog/-/debuglog-1.0.1.tgz#aa24ffb9ac3df9a2351837cfb2d279360cd78492"
+
+decamelize@^1.0.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
+
+defaults@^1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d"
+  dependencies:
+    clone "^1.0.2"
+
+defined@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/defined/-/defined-1.0.0.tgz#c98d9bcef75674188e110969151199e39b1fa693"
+
+defs@~1.1.0:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/defs/-/defs-1.1.1.tgz#b22609f2c7a11ba7a3db116805c139b1caffa9d2"
+  dependencies:
+    alter "~0.2.0"
+    ast-traverse "~0.1.1"
+    breakable "~1.0.0"
+    esprima-fb "~15001.1001.0-dev-harmony-fb"
+    simple-fmt "~0.1.0"
+    simple-is "~0.2.0"
+    stringmap "~0.2.2"
+    stringset "~0.2.1"
+    tryor "~0.1.2"
+    yargs "~3.27.0"
+
+delayed-stream@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
+
+delegates@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/delegates/-/delegates-1.0.0.tgz#84c6e159b81904fdca59a0ef44cd870d31250f9a"
+
+depd@1.1.0, depd@~1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/depd/-/depd-1.1.0.tgz#e1bd82c6aab6ced965b97b88b17ed3e528ca18c3"
+
+destroy@~1.0.4:
+  version "1.0.4"
+  resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.0.4.tgz#978857442c44749e4206613e37946205826abd80"
+
+detect-indent@^3.0.0:
+  version "3.0.1"
+  resolved "https://registry.yarnpkg.com/detect-indent/-/detect-indent-3.0.1.tgz#9dc5e5ddbceef8325764b9451b02bc6d54084f75"
+  dependencies:
+    get-stdin "^4.0.1"
+    minimist "^1.1.0"
+    repeating "^1.1.0"
+
+detective@^4.3.1:
+  version "4.5.0"
+  resolved "https://registry.yarnpkg.com/detective/-/detective-4.5.0.tgz#6e5a8c6b26e6c7a254b1c6b6d7490d98ec91edd1"
+  dependencies:
+    acorn "^4.0.3"
+    defined "^1.0.0"
+
+dezalgo@^1.0.0, dezalgo@^1.0.1, dezalgo@^1.0.2, dezalgo@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/dezalgo/-/dezalgo-1.0.3.tgz#7f742de066fc748bc8db820569dddce49bf0d456"
+  dependencies:
+    asap "^2.0.0"
+    wrappy "1"
+
+did_it_work@0.0.6:
+  version "0.0.6"
+  resolved "https://registry.yarnpkg.com/did_it_work/-/did_it_work-0.0.6.tgz#5180cb9e16ebf9a8753a0cc6b4af9ccdff71ec05"
+
+diff@^1.3.1:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/diff/-/diff-1.4.0.tgz#7f28d2eb9ee7b15a97efd89ce63dcfdaa3ccbabf"
+
+dom-serializer@0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-0.1.0.tgz#073c697546ce0780ce23be4a28e293e40bc30c82"
+  dependencies:
+    domelementtype "~1.1.1"
+    entities "~1.1.1"
+
+domelementtype@1:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.3.0.tgz#b17aed82e8ab59e52dd9c19b1756e0fc187204c2"
+
+domelementtype@~1.1.1:
+  version "1.1.3"
+  resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-1.1.3.tgz#bd28773e2642881aec51544924299c5cd822185b"
+
+domhandler@2.3:
+  version "2.3.0"
+  resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-2.3.0.tgz#2de59a0822d5027fabff6f032c2b25a2a8abe738"
+  dependencies:
+    domelementtype "1"
+
+domutils@1.5:
+  version "1.5.1"
+  resolved "https://registry.yarnpkg.com/domutils/-/domutils-1.5.1.tgz#dcd8488a26f563d61079e48c9f7b7e32373682cf"
+  dependencies:
+    dom-serializer "0"
+    domelementtype "1"
+
+ecc-jsbn@~0.1.1:
+  version "0.1.1"
+  resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505"
+  dependencies:
+    jsbn "~0.1.0"
+
+editions@^1.1.1:
+  version "1.3.3"
+  resolved "https://registry.yarnpkg.com/editions/-/editions-1.3.3.tgz#0907101bdda20fac3cbe334c27cbd0688dc99a5b"
+
+editor@~1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/editor/-/editor-1.0.0.tgz#60c7f87bd62bcc6a894fa8ccd6afb7823a24f742"
+
+ee-first@1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d"
+
+em-helpers@^0.8.0:
+  version "0.8.0"
+  resolved "https://registry.yarnpkg.com/em-helpers/-/em-helpers-0.8.0.tgz#01678f3692a61d563cce68e49459e206d14db095"
+  dependencies:
+    ember-cli-htmlbars "^1.0.1"
+    ember-cli-less "^1.4.0"
+    source-map "^0.5.6"
+  optionalDependencies:
+    phantomjs-prebuilt "2.1.13"
+
+em-table@^0.7.0:
+  version "0.7.2"
+  resolved "https://registry.yarnpkg.com/em-table/-/em-table-0.7.2.tgz#867ff734701df9765f2505e02acd74768edb0f71"
+  dependencies:
+    ember-cli-htmlbars "^1.0.1"
+    ember-cli-less "^1.4.0"
+    source-map "^0.5.6"
+  optionalDependencies:
+    phantomjs-prebuilt "2.1.13"
+
+ember-array-contains-helper@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ember-array-contains-helper/-/ember-array-contains-helper-1.0.2.tgz#53427e6e9dfcfceb443bfeb6965928b5f624f6a0"
+  dependencies:
+    ember-cli-babel "^5.1.5"
+
+ember-bootstrap@0.5.1:
+  version "0.5.1"
+  resolved "https://registry.yarnpkg.com/ember-bootstrap/-/ember-bootstrap-0.5.1.tgz#bbad60b2818c47b3fb31562967ae02ee7e92d38c"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-wormhole "^0.3.4"
+
+ember-cli-app-version@1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-app-version/-/ember-cli-app-version-1.0.0.tgz#6963591abb3a176f68ab1507f41324e8154d0e66"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-cli-htmlbars "^1.0.0"
+    git-repo-version "0.3.0"
+
+ember-cli-babel@5.1.6, ember-cli-babel@^5.0.0, ember-cli-babel@^5.1.3, ember-cli-babel@^5.1.5, ember-cli-babel@^5.1.6:
+  version "5.1.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-babel/-/ember-cli-babel-5.1.6.tgz#d3e4fe59d96589adf7db1d99ff4f6b9dfa9dc132"
+  dependencies:
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-funnel "^1.0.0"
+    clone "^1.0.2"
+    ember-cli-version-checker "^1.0.2"
+    resolve "^1.1.2"
+
+ember-cli-content-security-policy@0.4.0:
+  version "0.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-content-security-policy/-/ember-cli-content-security-policy-0.4.0.tgz#71e4f228e68bcefc313f0ffae26f3600a0093276"
+  dependencies:
+    body-parser "^1.2.0"
+
+ember-cli-copy-dereference@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-copy-dereference/-/ember-cli-copy-dereference-1.0.0.tgz#a1795bf6c70650317df4ab8674dd02e0bea5d4fd"
+
+ember-cli-dependency-checker@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-dependency-checker/-/ember-cli-dependency-checker-1.2.0.tgz#0d1d4fc93a48d9a105fbb120d262d05485dd7425"
+  dependencies:
+    chalk "^0.5.1"
+    is-git-url "0.2.0"
+    semver "^4.1.0"
+
+ember-cli-get-dependency-depth@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-get-dependency-depth/-/ember-cli-get-dependency-depth-1.0.0.tgz#e0afecf82a2d52f00f28ab468295281aec368d11"
+
+ember-cli-htmlbars-inline-precompile@0.3.1:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars-inline-precompile/-/ember-cli-htmlbars-inline-precompile-0.3.1.tgz#5e37101d7017c61ae11b721ee709ae0c1802ce59"
+  dependencies:
+    babel-plugin-htmlbars-inline-precompile "0.0.5"
+    ember-cli-babel "^5.1.3"
+    ember-cli-htmlbars "^1.0.0"
+
+ember-cli-htmlbars@0.7.6:
+  version "0.7.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-0.7.6.tgz#07e068eea68133d7e2fa6b505d87673f4bce145f"
+  dependencies:
+    broccoli-filter "^0.1.6"
+    ember-cli-version-checker "^1.0.2"
+
+ember-cli-htmlbars@1.0.2, ember-cli-htmlbars@^1.0.0, ember-cli-htmlbars@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ember-cli-htmlbars/-/ember-cli-htmlbars-1.0.2.tgz#53b3e503ed3aaccb8c23592f292bc2e10ae467a1"
+  dependencies:
+    broccoli-persistent-filter "^1.0.3"
+    ember-cli-version-checker "^1.0.2"
+    json-stable-stringify "^1.0.0"
+    strip-bom "^2.0.0"
+
+ember-cli-ic-ajax@0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-ic-ajax/-/ember-cli-ic-ajax-0.2.1.tgz#0dd9a2c9f9d16f4da98ade15fef427ee63cf8710"
+  dependencies:
+    ic-ajax "~2.0.1"
+
+ember-cli-inject-live-reload@1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-inject-live-reload/-/ember-cli-inject-live-reload-1.4.0.tgz#1dac5b4a2fecc51cea3c17bce9089596115a7fbd"
+
+ember-cli-is-package-missing@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-is-package-missing/-/ember-cli-is-package-missing-1.0.0.tgz#6e6184cafb92635dd93ca6c946b104292d4e3390"
+
+ember-cli-jquery-ui@0.0.20:
+  version "0.0.20"
+  resolved "https://registry.yarnpkg.com/ember-cli-jquery-ui/-/ember-cli-jquery-ui-0.0.20.tgz#c9949a18c5dc3c650ad9ab6bd8ac107cddf15a40"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+    ember-cli-htmlbars "0.7.6"
+
+ember-cli-less@^1.4.0:
+  version "1.5.4"
+  resolved "https://registry.yarnpkg.com/ember-cli-less/-/ember-cli-less-1.5.4.tgz#4cfbc05c6f23712fe9665f93be9bc8f2cccb0f71"
+  dependencies:
+    broccoli-less-single "^0.6.4"
+    broccoli-merge-trees "^1.0.0"
+    ember-cli-version-checker "^1.1.4"
+    lodash.merge "^3.3.2"
+
+ember-cli-moment-shim@0.7.3:
+  version "0.7.3"
+  resolved "https://registry.yarnpkg.com/ember-cli-moment-shim/-/ember-cli-moment-shim-0.7.3.tgz#bb4f6a36ad726acb9e432b0c73270cf1cd193973"
+  dependencies:
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-stew "^1.0.0"
+    chalk "^1.1.1"
+    ember-cli-babel "^5.0.0"
+    exists-sync "0.0.3"
+    lodash.defaults "^3.1.2"
+
+ember-cli-node-assets@^0.1.4:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/ember-cli-node-assets/-/ember-cli-node-assets-0.1.6.tgz#6488a2949048c801ad6d9e33753c7bce32fc1146"
+  dependencies:
+    broccoli-funnel "^1.0.1"
+    broccoli-merge-trees "^1.1.1"
+    broccoli-unwatched-tree "^0.1.1"
+    debug "^2.2.0"
+    lodash "^4.5.1"
+    resolve "^1.1.7"
+
+ember-cli-normalize-entity-name@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-normalize-entity-name/-/ember-cli-normalize-entity-name-1.0.0.tgz#0b14f7bcbc599aa117b5fddc81e4fd03c4bad5b7"
+  dependencies:
+    silent-error "^1.0.0"
+
+ember-cli-numeral@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-numeral/-/ember-cli-numeral-0.2.0.tgz#60c984aa9e8b97bf79ded777104848935796528a"
+  dependencies:
+    ember-cli-node-assets "^0.1.4"
+    numeral "^1.5.3"
+
+ember-cli-path-utils@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-path-utils/-/ember-cli-path-utils-1.0.0.tgz#4e39af8b55301cddc5017739b77a804fba2071ed"
+
+ember-cli-preprocess-registry@^1.0.3:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-preprocess-registry/-/ember-cli-preprocess-registry-1.1.0.tgz#1a8f848876de2851507842e4c0c9051f62b4aac6"
+  dependencies:
+    broccoli-clean-css "0.2.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-merge-trees "^1.0.0"
+    debug "^2.2.0"
+    exists-sync "0.0.3"
+    lodash "^3.10.0"
+    process-relative-require "^1.0.0"
+    silent-error "^1.0.0"
+
+ember-cli-qunit@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-qunit/-/ember-cli-qunit-1.2.1.tgz#b728bc72ced1b4991d1044eb906b88dbfb019abe"
+  dependencies:
+    broccoli-babel-transpiler "^5.5.0"
+    broccoli-jshint "^1.0.0"
+    broccoli-merge-trees "^1.1.0"
+    broccoli-sourcemap-concat "^1.1.6"
+    ember-cli-version-checker "^1.1.4"
+    ember-qunit "^0.4.18"
+    qunitjs "^1.20.0"
+    resolve "^1.1.6"
+
+ember-cli-release@0.2.8:
+  version "0.2.8"
+  resolved "https://registry.yarnpkg.com/ember-cli-release/-/ember-cli-release-0.2.8.tgz#e9fddd06058c0f3bc2ea57ab2667e9611f8fb205"
+  dependencies:
+    chalk "^1.0.0"
+    git-tools "^0.1.4"
+    make-array "^0.1.2"
+    merge "^1.2.0"
+    moment-timezone "^0.3.0"
+    nopt "^3.0.3"
+    rsvp "^3.0.17"
+    semver "^4.3.1"
+    silent-error "^1.0.0"
+
+ember-cli-sri@1.2.1:
+  version "1.2.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-sri/-/ember-cli-sri-1.2.1.tgz#105b1f8bfb88fff8817caa14d0776ecb06f857ee"
+  dependencies:
+    broccoli-sri-hash "^1.2.2"
+
+ember-cli-string-utils@^1.0.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-string-utils/-/ember-cli-string-utils-1.1.0.tgz#39b677fc2805f55173735376fcef278eaa4452a1"
+
+ember-cli-test-info@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-test-info/-/ember-cli-test-info-1.0.0.tgz#ed4e960f249e97523cf891e4aed2072ce84577b4"
+  dependencies:
+    ember-cli-string-utils "^1.0.0"
+
+ember-cli-uglify@1.2.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/ember-cli-uglify/-/ember-cli-uglify-1.2.0.tgz#3208c32b54bc2783056e8bb0d5cfe9bbaf17ffb2"
+  dependencies:
+    broccoli-uglify-sourcemap "^1.0.0"
+
+ember-cli-version-checker@^1.0.2, ember-cli-version-checker@^1.1.4:
+  version "1.3.1"
+  resolved "https://registry.yarnpkg.com/ember-cli-version-checker/-/ember-cli-version-checker-1.3.1.tgz#0bc2d134c830142da64bf9627a0eded10b61ae72"
+  dependencies:
+    semver "^5.3.0"
+
+ember-cli@1.13.14:
+  version "1.13.14"
+  resolved "https://registry.yarnpkg.com/ember-cli/-/ember-cli-1.13.14.tgz#1ff35577a0b4fbb8efad24710f52bb7b874a7765"
+  dependencies:
+    amd-name-resolver "0.0.2"
+    bower "^1.3.12"
+    bower-config "0.6.1"
+    bower-endpoint-parser "0.2.2"
+    broccoli "0.16.8"
+    broccoli-babel-transpiler "^5.4.5"
+    broccoli-config-loader "^1.0.0"
+    broccoli-config-replace "^1.1.0"
+    broccoli-funnel "^1.0.0"
+    broccoli-kitchen-sink-helpers "^0.2.7"
+    broccoli-merge-trees "^1.0.0"
+    broccoli-plugin "^1.2.0"
+    broccoli-sane-watcher "^1.1.1"
+    broccoli-source "^1.1.0"
+    broccoli-sourcemap-concat "^2.0.2"
+    broccoli-viz "^2.0.1"
+    chalk "1.1.0"
+    clean-base-url "^1.0.0"
+    compression "^1.4.4"
+    configstore "1.2.1"
+    core-object "0.0.2"
+    cpr "0.4.2"
+    debug "^2.1.3"
+    diff "^1.3.1"
+    ember-cli-copy-dereference "^1.0.0"
+    ember-cli-get-dependency-depth "^1.0.0"
+    ember-cli-is-package-missing "^1.0.0"
+    ember-cli-normalize-entity-name "^1.0.0"
+    ember-cli-path-utils "^1.0.0"
+    ember-cli-preprocess-registry "^1.0.3"
+    ember-cli-string-utils "^1.0.0"
+    ember-cli-test-info "^1.0.0"
+    ember-router-generator "^1.0.0"
+    escape-string-regexp "^1.0.3"
+    exists-sync "0.0.3"
+    exit "^0.1.2"
+    express "^4.12.3"
+    findup "0.1.5"
+    findup-sync "^0.2.1"
+    fs-extra "0.22.1"
+    fs-monitor-stack "^1.0.2"
+    git-repo-info "^1.0.4"
+    glob "5.0.13"
+    http-proxy "^1.9.0"
+    inflection "^1.7.0"
+    inquirer "0.5.1"
+    is-git-url "^0.2.0"
+    isbinaryfile "^2.0.3"
+    leek "0.0.18"
+    lodash "^3.6.0"
+    markdown-it "4.3.0"
+    markdown-it-terminal "0.0.2"
+    merge-defaults "^0.2.1"
+    minimatch "^2.0.4"
+    morgan "^1.5.2"
+    node-modules-path "^1.0.0"
+    node-uuid "^1.4.3"
+    nopt "^3.0.1"
+    npm "2.14.10"
+    pleasant-progress "^1.0.2"
+    portfinder "^0.4.0"
+    promise-map-series "^0.2.1"
+    quick-temp "0.1.3"
+    readline2 "0.1.1"
+    resolve "^1.1.6"
+    rsvp "^3.0.17"
+    sane "^1.1.1"
+    semver "^4.3.3"
+    silent-error "^1.0.0"
+    symlink-or-copy "^1.0.1"
+    temp "0.8.1"
+    testem "0.9.11"
+    through "^2.3.6"
+    tiny-lr "0.2.0"
+    walk-sync "0.1.3"
+    yam "0.0.18"
+
+ember-d3@0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/ember-d3/-/ember-d3-0.1.0.tgz#f670809632298b3f7124c398ea48f1d7503ef1e1"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-data@2.1.0:
+  version "2.1.0"
+  resolved "https://registry.yarnpkg.com/ember-data/-/ember-data-2.1.0.tgz#9aa0f95042010513250818119d63ace3888c7dd9"
+  dependencies:
+    rsvp "^3.0.18"
+
+ember-disable-proxy-controllers@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/ember-disable-proxy-controllers/-/ember-disable-proxy-controllers-1.0.1.tgz#1254eeec0ba025c24eb9e8da611afa7b38754281"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-export-application-global@1.0.5:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/ember-export-application-global/-/ember-export-application-global-1.0.5.tgz#73bd641b19e3474190f717c9b504617511506bea"
+
+ember-qunit@^0.4.18:
+  version "0.4.24"
+  resolved "https://registry.yarnpkg.com/ember-qunit/-/ember-qunit-0.4.24.tgz#b54cf6688c442d07eacea47c3285879cdd7c2163"
+  dependencies:
+    ember-test-helpers "^0.5.32"
+
+ember-resolver@2.0.3:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/ember-resolver/-/ember-resolver-2.0.3.tgz#7ed5cc60049906def4edc80c25eedb301cace3b2"
+  dependencies:
+    ember-cli-babel "^5.1.3"
+    ember-cli-version-checker "^1.1.4"
+
+ember-router-generator@^1.0.0:
+  version "1.2.3"
+  resolved "https://registry.yarnpkg.com/ember-router-generator/-/ember-router-generator-1.2.3.tgz#8ed2ca86ff323363120fc14278191e9e8f1315ee"
+  dependencies:
+    recast "^0.11.3"
+
+ember-spin-spinner@0.2.3:
+  version "0.2.3"
+  resolved "https://registry.yarnpkg.com/ember-spin-spinner/-/ember-spin-spinner-0.2.3.tgz#932c7823686f33274fc4daf08557fcb18037e876"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+ember-test-helpers@^0.5.32:
+  version "0.5.34"
+  resolved "https://registry.yarnpkg.com/ember-test-helpers/-/ember-test-helpers-0.5.34.tgz#c8439108d1cba1d7d838c212208a5c4061471b83"
+  dependencies:
+    klassy "^0.1.3"
+
+ember-truth-helpers@1.3.0:
+  version "1.3.0"
+  resolved "https://registry.yarnpkg.com/ember-truth-helpers/-/ember-truth-helpers-1.3.0.tgz#6ed9f83ce9a49f52bb416d55e227426339a64c60"
+  dependencies:
+    ember-cli-babel "^5.1.6"
+
+ember-wormhole@^0.3.4:
+  version "0.3.6"
+  resolved "https://registry.yarnpkg.com/ember-wormhole/-/ember-wormhole-0.3.6.tgz#bbe21bb5478ad254efe4fff4019ac6710f4ad85c"
+  dependencies:
+    ember-cli-babel "^5.0.0"
+
+encodeurl@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.1.tgz#79e3d58655346909fe6f0f45a5de68103b294d20"
+
+engine.io-client-pure@1.5.9:
+  version "1.5.9"
+  resolved "https://registry.yarnpkg.com/engine.io-client-pure/-/engine.io-client-pure-1.5.9.tgz#fc3c4977b00ffc5b059dfa73c06216ad838496e2"
+  dependencies:
+    component-emitter "1.1.2"
+    component-inherit "0.0.3"
+    debug "1.0.4"
+    engine.io-parser "1.2.2"
+    has-cors "1.1.0"
+    indexof "0.0.1"
+    parsejson "0.0.1"
+    parseqs "0.0.2"
+    parseuri "0.0.4"
+    ws-pure "0.8.0"
+    xmlhttprequest-ssl "1.5.1"
+
+engine.io-parser@1.2.2:
+  version "1.2.2"
+  resolved "https://registry.yarnpkg.com/engine.io-parser/-/engine.io-parser-1.2.2.tgz#cd081041feea39c64323ff79b82a90a72afcccdd"
+  dependencies:
+    after "0.8.1"
+    arraybuffer.slice "0.0.6"
+    base64-arraybuffer "0.1.2"
+    blob "0.0.4"
+    has-binary "0.1.6"
+    utf8 "2.1.0"
+
+engine.io-pure@1.5.9:
+  version "1.5.9"
+  resolved "https://registry.yarnpkg.com/engine.io-pure/-/engine.io-pure-1.5.9.tgz#d46f763e0945e5f818d6a59061bf93f1e05c89b6"
+  dependencies:
+    base64id "0.1.0"
+    debug "1.0.3"
+    engine.io-parser "1.2.2"
+    ws-pure "0.8.0"
+
+ensure-posix-path@^1.0.0, ensure-posix-path@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/ensure-posix-path/-/ensure-posix-path-1.0.2.tgz#a65b3e42d0b71cfc585eb774f9943c8d9b91b0c2"
+
+entities@1.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.0.0.tgz#b2987aa3821347fcde642b24fdfc9e4fb712bf26"
+
+entities@~1.1.1:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/entities/-/entities-1.1.1.tgz#6e5c2d0a5621b5dadaecef80b90edfb5cd7772f0"
+
+errno@^0.1.1:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/errno/-/errno-0.1.4.tgz#b896e23a9e5e8ba33871fc996abd3635fc9a1c7d"
+  dependencies:
+    prr "~0.0.0"
+
+es5-ext@^0.10.14, es5-ext@^0.10.9, es5-ext@~0.10.11, es5-ext@~0.10.14, es5-ext@~0.10.2, es5-ext@~0.10.5, es5-ext@~0.10.6:
+  version "0.10.15"
+  resolved "https://registry.yarnpkg.com/es5-ext/-/es5-ext-0.10.15.tgz#c330a5934c1ee21284a7c081a86e5fd937c91ea6"
+  dependencies:
+    es6-iterator "2"
+    es6-symbol "~3.1"
+
+es6-iterator@2:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-2.0.1.tgz#8e319c9f0453bf575d374940a655920e59ca5512"
+  dependencies:
+    d "1"
+    es5-ext "^0.10.14"
+    es6-symbol "^3.1"
+
+es6-iterator@~0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/es6-iterator/-/es6-iterator-0.1.3.tgz#d6f58b8c4fc413c249b4baa19768f8e4d7c8944e"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+    es6-symbol "~2.0.1"
+
+es6-promise@~4.0.3:
+  version "4.0.5"
+  resolved "https://registry.yarnpkg.com/es6-promise/-/es6-promise-4.0.5.tgz#7882f30adde5b240ccfa7f7d78c548330951ae42"
+
+es6-symbol@^3.0.2, es6-symbol@^3.1, es6-symbol@~3.1:
+  version "3.1.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-3.1.1.tgz#bf00ef4fdab6ba1b46ecb7b629b4c7ed5715cc77"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+es6-symbol@~2.0.1:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/es6-symbol/-/es6-symbol-2.0.1.tgz#761b5c67cfd4f1d18afb234f691d678682cb3bf3"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.5"
+
+es6-weak-map@~0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/es6-weak-map/-/es6-weak-map-0.1.4.tgz#706cef9e99aa236ba7766c239c8b9e286ea7d228"
+  dependencies:
+    d "~0.1.1"
+    es5-ext "~0.10.6"
+    es6-iterator "~0.1.3"
+    es6-symbol "~2.0.1"
+
+escape-html@~1.0.3:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988"
+
+escape-string-regexp@^1.0.0, escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.3:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
+
+esprima-fb@~12001.1.0-dev-harmony-fb:
+  version "12001.1.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-12001.1.0-dev-harmony-fb.tgz#d84400384ba95ce2678c617ad24a7f40808da915"
+
+esprima-fb@~15001.1001.0-dev-harmony-fb:
+  version "15001.1001.0-dev-harmony-fb"
+  resolved "https://registry.yarnpkg.com/esprima-fb/-/esprima-fb-15001.1001.0-dev-harmony-fb.tgz#43beb57ec26e8cf237d3dd8b33e42533577f2659"
+
+esprima@^2.6.0:
+  version "2.7.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-2.7.3.tgz#96e3b70d5779f6ad49cd032673d1c312767ba581"
+
+esprima@^3.1.1, esprima@~3.1.0:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/esprima/-/esprima-3.1.3.tgz#fdca51cee6133895e3c88d535ce49dbff62a4633"
+
+esutils@^2.0.0:
+  version "2.0.2"
+  resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.2.tgz#0abf4f1caa5bcb1f7a9d8acc6dea4faaa04bac9b"
+
+etag@~1.8.0:
+  version "1.8.0"
+  resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.0.tgz#6f631aef336d6c46362b51764044ce216be3c051"
+
+event-emitter@~0.3.4:
+  version "0.3.5"
+  resolved "https://registry.yarnpkg.com/event-emitter/-/event-emitter-0.3.5.tgz#df8c69eef1647923c7157b9ce83840610b02cc39"
+  dependencies:
+    d "1"
+    es5-ext "~0.10.14"
+
+eventemitter3@1.x.x:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-1.2.0.tgz#1c86991d816ad1e504750e73874224ecf3bec508"
+
+events-to-array@^1.0.1:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/events-to-array/-/events-to-array-1.1.2.tgz#2d41f563e1fe400ed4962fe1a4d5c6a7539df7f6"
+
+exec-sh@^0.2.0:
+  version "0.2.0"
+  resolved "https://registry.yarnpkg.com/exec-sh/-/exec-sh-0.2.0.tgz#14f75de3f20d286ef933099b2ce50a90359cef10"
+  dependencies:
+    merge "^1.1.3"
+
+exists-sync@0.0.3:
+  version "0.0.3"
+  resolved "https://registry.yarnpkg.com/exists-sync/-/exists-sync-0.0.3.tgz#b910000bedbb113b378b82f5f5a7638107622dcf"
+
+exit@0.1.2, exit@0.1.x, exit@^0.1.2:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c"
+
+expand-brackets@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/expand-brackets/-/expand-brackets-0.1.5.tgz#df07284e342a807cd733ac5af72411e581d1177b"
+  dependencies:
+    is-posix-bracket "^0.1.0"
+
+expand-range@^1.8.1:
+  version "1.8.2"
+  resolved "https://registry.yarnpkg.com/expand-range/-/expand-range-1.8.2.tgz#a299effd335fe2721ebae8e257ec79644fc85337"
+  dependencies:
+    fill-range "^2.1.0"
+
+express@^4.10.7, express@^4.12.3:
+  version "4.15.2"
+  resolved "https://registry.yarnpkg.com/express/-/express-4.15.2.tgz#af107fc148504457f2dca9a6f2571d7129b97b35"
+  dependencies:
+    accepts "~1.3.3"
+    array-flatten "1.1.1"
+    content-disposition "0.5.2"
+    content-type "~1.0.2"
+    cookie "0.3.1"
+    cookie-signature "1.0.6"
+    debug "2.6.1"
+    depd "~1.1.0"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    etag "~1.8.0"
+    finalhandler "~1.0.0"
+    fresh "0.5.0"
+    merge-descriptors "1.0.1"
+    methods "~1.1.2"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    path-to-regexp "0.1.7"
+    proxy-addr "~1.1.3"
+    qs "6.4.0"
+    range-parser "~1.2.0"
+    send "0.15.1"
+    serve-static "1.12.1"
+    setprototypeof "1.0.3"
+    statuses "~1.3.1"
+    type-is "~1.6.14"
+    utils-merge "1.0.0"
+    vary "~1.1.0"
+
+extend@~3.0.0:
+  version "3.0.0"
+  resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.0.tgz#5a474353b9f3353ddd8176dfd37b91c83a46f1d4"
+
+extglob@^0.3.1:
+  version "0.3.2"
+  resolved "https://registry.yarnpkg.com/extglob/-/extglob-0.3.2.tgz#2e18ff3d2f49ab2765cec9023f011daa8d8349a1"
+  dependencies:
+    is-extglob "^1.0.0"
+
+extract-zip@~1.5.0:
+  version "1.5.0"
+  resolved "https://registry.yarnpkg.com/extract-zip/-/extract-zip-1.5.0.tgz#92ccf6d81ef70a9fa4c1747114ccef6d8688a6c4"
+  dependencies:
+    concat-stream "1.5.0"
+    debug "0.7.4"
+    mkdirp "0.5.0"
+    yauzl "2.4.1"
+
+extsprintf@1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/extsprintf/-/extsprintf-1.0.2.tgz#e1080e0658e300b06294990cc70e1502235fd550"
+
+fast-ordered-set@^1.0.0, fast-ordered-set@^1.0.2:
+  version "1.0.3"
+  resolved "https://registry.yarnpkg.com/fast-ordered-set/-/fast-ordered-set-1.0.3.tgz#3fbb36634f7be79e4f7edbdb4a357dee25d184eb"
+  dependencies:
+    blank-object "^1.0.1"
+
+"fast-sourcemap-concat@ ^0.2.4":
+  version "0.2.7"
+  resolved "https://registry.yarnpkg.com/fast-sourcemap-concat/-/fast-sourcemap-concat-0.2.7.tgz#b5d68a6d33e52f9d326fec38b836fa44d9b0d8fc"
+  dependencies:
+    chalk "^0.5.1"
+    debug "^2.2.0"
+    mkdirp "^0.5.0"
+    rsvp "^3.0.14"
+    source-map "^0.4.2"
+    source-map-url "^0.3.0"
+
+faye-websocket@~0.10.0:
+  version "0.10.0"
+  resolved "https://registry.yarnpkg.com/faye-websocket/-/faye-websocket-0.10.0.tgz#4e492f8d04dfb6f89003507f6edbf2d501e7c6f4"
+  dependencies:
+    websocket-driver ">=0.5.1"
+
+fb-watchman@^1.8.0:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-1.9.2.tgz#a24cf47827f82d38fb59a69ad70b76e3b6ae7383"
+  dependencies:
+    bser "1.0.2"
+
+fd-slicer@~1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/fd-slicer/-/fd-slicer-1.0.1.tgz#8b5bcbd9ec327c5041bf9ab023fd6750f1177e65"
+  dependencies:
+    pend "~1.2.0"
+
+filename-regex@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/filename-regex/-/filename-regex-2.0.0.tgz#996e3e80479b98b9897f15a8a58b3d084e926775"
+
+fileset@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/fileset/-/fileset-0.2.1.tgz#588ef8973c6623b2a76df465105696b96aac8067"
+  dependencies:
+    glob "5.x"
+    minimatch "2.x"
+
+fill-range@^2.1.0:
+  version "2.2.3"
+  resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-2.2.3.tgz#50b77dfd7e469bc7492470963699fe7a8485a723"
+  dependencies:
+    is-number "^2.1.0"
+    isobject "^2.0.0"
+    randomatic "^1.1.3"
+    repeat-element "^1.1.2"
+    repeat-string "^1.5.2"
+
+finalhandler@1.0.1:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.1.tgz#bcd15d1689c0e5ed729b6f7f541a6df984117db8"
+  dependencies:
+    debug "2.6.3"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+finalhandler@~1.0.0:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.0.2.tgz#d0e36f9dbc557f2de14423df6261889e9d60c93a"
+  dependencies:
+    debug "2.6.4"
+    encodeurl "~1.0.1"
+    escape-html "~1.0.3"
+    on-finished "~2.3.0"
+    parseurl "~1.3.1"
+    statuses "~1.3.1"
+    unpipe "~1.0.0"
+
+findup-sync@^0.2.1:
+  version "0.2.1"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.2.1.tgz#e0a90a450075c49466ee513732057514b81e878c"
+  dependencies:
+    glob "~4.3.0"
+
+findup-sync@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/findup-sync/-/findup-sync-0.3.0.tgz#37930aa5d816b777c03445e1966cc6790a4c0b16"
+  dependencies:
+    glob "~5.0.0"
+
+findup@0.1.5, findup@^0.1.5:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/findup/-/findup-0.1.5.tgz#8ad929a3393bac627957a7e5de4623b06b0e2ceb"
+  dependencies:
+    colors "~0.6.0-1"
+    commander "~2.1.0"
+
+fireworm@^0.6.6:
+  version "0.6.6"
+  resolved "https://registry.yarnpkg.com/fireworm/-/fireworm-0.6.6.tgz#6023218e215c8ae628ac5105a60e470a50983f6f"
+  dependencies:
+    async "~0.2.9"
+    is-type "0.0.1"
+    lodash "~2.3.0"
+    minimatch "~0.2.9"
+
+for-in@^1.0.1:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/for-in/-/for-in-1.0.2.tgz#81068d295a8142ec0ac726c6e2200c30fb6d5e80"
+
+for-own@^0.1.4:
+  version "0.1.5"
+  resolved "https://registry.yarnpkg.com/for-own/-/for-own-0.1.5.tgz#5265c681a4f294dabbf17c9509b6763aa84510ce"
+  dependencies:
+    for-in "^1.0.1"
+
+forever-agent@~0.6.1:
+  version "0.6.1"
+  resolved "https://registry.yarnpkg.com/forever-agent/-/forever-agent-0.6.1.tgz#fbc71f0c41adeb37f96c577ad1ed42d8fdacca91"
+
+form-data@~1.0.0-rc3, form-data@~1.0.0-rc4:
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/form-data/-/form-data-1.0.1.tgz#ae315db9a4907fa065502304a66d7733475ee37c"
+  dependencies:
+    async "^2.0.1"
+    combined-stream "^1.0.5"
+    mime-types "^2.1.11"
+
+forwarded@~0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/forwarded/-/forwarded-0.1.0.tgz#19ef9874c4ae1c297bcf078fde63a09b66a84363"
+
+fresh@0.5.0:
+  version "0.5.0"
+  resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.0.tgz#f474ca5e6a9246d6fd8e0953cfa9b9c805afa78e"
+
+fs-extra@0.22.1:
+  version "0.22.1"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.22.1.tgz#5fd6f8049dc976ca19eb2355d658173cabcce056"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.16.3:
+  version "0.16.5"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.16.5.tgz#1ad661fa6c86c9608cd1b49efc6fce834939a750"
+  dependencies:
+    graceful-fs "^3.0.5"
+    jsonfile "^2.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^0.24.0:
+  version "0.24.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.24.0.tgz#d4e4342a96675cb7846633a6099249332b539952"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-extra@^2.0.0:
+  version "2.1.2"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-2.1.2.tgz#046c70163cef9aad46b0e4a7fa467fb22d71de35"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+
+fs-extra@~0.30.0:
+  version "0.30.0"
+  resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-0.30.0.tgz#f233ffcc08d4da7d432daa449776989db1df93f0"
+  dependencies:
+    graceful-fs "^4.1.2"
+    jsonfile "^2.1.0"
+    klaw "^1.0.0"
+    path-is-absolute "^1.0.0"
+    rimraf "^2.2.8"
+
+fs-monitor-stack@^1.0.2:
+  version "1.1.1"
+  resolved "https://registry.yarnpkg.com/fs-monitor-stack/-/fs-monitor-stack-1.1.1.tgz#c4038d5977939b6b4e38396d7e7cd0895a7ac6b3"
+
+fs-readdir-recursive@^0.1.0:
+  version "0.1.2"
+  resolved "https://registry.yarnpkg.com/fs-readdir-recursive/-/fs-readdir-recursive-0.1.2.tgz#315b4fb8c1ca5b8c47defef319d073dad3568059"
+
+fs-tree-diff@^0.3.0:
+  version "0.3.1"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.3.1.tgz#41a84ee34994bd564c63d9852f1109c5de7f9290"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.4.3:
+  version "0.4.4"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.4.4.tgz#f6b75d70db22c1f3b05d592270f4ed6c9c2f82dd"
+  dependencies:
+    debug "^2.2.0"
+    fast-ordered-set "^1.0.2"
+
+fs-tree-diff@^0.5.2:
+  version "0.5.6"
+  resolved "https://registry.yarnpkg.com/fs-tree-diff/-/fs-tree-diff-0.5.6.tgz#342665749e8dca406800b672268c8f5073f3e623"
+  dependencies:
+    heimdalljs-logger "^0.1.7"
+    object-assign "^4.1.0"
+    path-posix "^1.0.0"
+    symlink-or-copy "^1.1.8"
+
+fs-vacuum@~1.2.7:
+  version "1.2.10"
+  resolved "https://registry.yarnpkg.com/fs-vacuum/-/fs-vacuum-1.2.10.tgz#b7629bec07a4031a2548fdf99f5ecf1cc8b31e36"
+  dependencies:
+    graceful-fs "^4.1.2"
+    path-is-inside "^1.0.1"
+    rimraf "^2.5.2"
+
+fs-write-stream-atomic@~1.0.4:
+  version "1.0.10"
+  resolved "https://registry.yarnpkg.com/fs-write-stream-atomic/-/fs-write-stream-atomic-1.0.10.tgz#b47df53493ef911df75731e70a9ded0189db40c9"
+  dependencies:
+    graceful-fs "^4.1.2"
+    iferr "^0.1.5"
+    imurmurhash "^0.1.4"
+    readable-stream "1 || 2"
+
+fs.realpath@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f"
+
+fstream-ignore@^1.0.0:
+  version "1.0.5"
+  resolved "https://registry.yarnpkg.com/fstream-ignore/-/fstream-ignore-1.0.5.tgz#9c31dae34767018fe1d249b24dada67d092da105"
+  dependencies:
+    fstream "^1.0.0"
+    inherits "2"
+    minimatch "^3.0.0"
+
+fstream-npm@~1.0.7:
+  version "1.0.7"
+  resolved "https://registry.yarnpkg.com/fstream-npm/-/fstream-npm-1.0.7.tgz#7ed0d1ac13d7686dd9e1bf6ceb8be273bf6d2f86"
+  dependencies:
+    fstream-ignore "^1.0.0"
+    inherits "2"
+
+fstream@^1.0.0, fstream@^1.0.2, fstream@~1.0.8:
+  version "1.0.11"
+  resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
+  dependencies:
+    graceful-fs "^4.1.2"
+    inherits "~2.0.0"
+    mkdirp ">=0.5 0"
+    rimraf "2"
+
+gauge@~1.2.0, gauge@~1.2.5:
+  version "1.2.7"
+  resolved "https://registry.yarnpkg.com/gauge/-/gauge-1.2.7.tgz#e9cec5483d3d4ee0ef44b60a7d99e4935e136d93"
+  dependencies:
+    ansi "^0.3.0"
+    has-unicode "^2.0.0"
+    lodash.pad "^4.1.0"
+    lodash.padend "^4.1.0"
+    lodash.padstart "^4.1.0"
+
+generate-function@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.0.0.tgz#6858fe7c0969b7d4e9093337647ac79f60dfbe74"
+
+generate-object-property@^1.1.0:
+  version "1.2.0"
+  resolved "https://registry.yarnpkg.com/generate-object-property/-/generate-object-property-1.2.0.tgz#9c0e1c40308ce804f4783618b937fa88f99d50d0"
+  dependencies:
+    is-property "^1.0.0"
+
+get-stdin@^4.0.1:
+  version "4.0.1"
+  resolved "https://registry.yarnpkg.com/get-stdin/-/get-stdin-4.0.1.tgz#b968c6b0a04384324902e8bf1a5df32579a450fe"
+
+getpass@^0.1.1:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/getpass/-/getpass-0.1.6.tgz#283ffd9fc1256840875311c1b60e8c40187110e6"
+  dependencies:
+    assert-plus "^1.0.0"
+
+git-repo-info@^1.0.4:
+  version "1.4.1"
+  resolved "https://registry.yarnpkg.com/git-repo-info/-/git-repo-info-1.4.1.tgz#2a072823254aaf62fcf0766007d7b6651bd41943"
+
+git-repo-version@0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/git-repo-version/-/git-repo-version-0.3.0.tgz#c9b97d0d21c4357d669dc1269c2b6a75da6cc0e9"
+  dependencies:
+    git-repo-info "^1.0.4"
+
+git-tools@^0.1.4:
+  version "0.1.4"
+  resolved "https://registry.yarnpkg.com/git-tools/-/git-tools-0.1.4.tgz#5e43e59443b8a5dedb39dba663da49e79f943978"
+  dependencies:
+    spawnback "~1.0.0"
+
+github-url-from-git@~1.4.0:
+  version "1.4.0"
+  resolved "https://registry.yarnpkg.com/github-url-from-git/-/github-url-from-git-1.4.0.tgz#285e6b520819001bde128674704379e4ff03e0de"
+
+github-url-from-username-repo@~1.0.2:
+  version "1.0.2"
+  resolved "https://registry.yarnpkg.com/github-url-from-username-repo/-/github-url-from-username-repo-1.0.2.tgz#7dd79330d2abe69c10c2cef79714c97215791dfa"
+
+glob-base@^0.3.0:
+  version "0.3.0"
+  resolved "https://registry.yarnpkg.com/glob-base/-/glob-base-0.3.0.tgz#dbb164f6221b1c0b1ccf82aea328b497df0ea3c4"
+  dependencies:
+    glob-parent "^2.0.0"
+    is-glob "^2.0.0"
+
+glob-parent@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-2.0.0.tgz#81383d72db054fcccf5336daa902f182f6edbb28"
+  dependencies:
+    is-glob "^2.0.0"
+
+"glob@3 || 4", glob@~4.3.0:
+  version "4.3.5"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-4.3.5.tgz#80fbb08ca540f238acce5d11d1e9bc41e75173d3"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+
+glob@5.0.13, glob@^5.0.10:
+  version "5.0.13"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.13.tgz#0b6ffc3ac64eb90669f723a00a0ebb7281b33f8f"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^2.0.1"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@5.x, glob@^5.0.15, glob@~5.0.0, glob@~5.0.15:
+  version "5.0.15"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-5.0.15.tgz#1bc936b9e02f4a603fcc222ecf7633d30b8b93b1"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^6.0.1:
+  version "6.0.4"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-6.0.4.tgz#0f08860f6a155127b2fadd4f9ce24b1aab6e4d22"
+  dependencies:
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "2 || 3"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+glob@^7.0.5, glob@^7.1.1:
+  version "7.1.1"
+  resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.1.tgz#805211df04faaf1c63a3600306cdf5ade50b2ec8"
+  dependencies:
+    fs.realpath "^1.0.0"
+    inflight "^1.0.4"
+    inherits "2"
+    minimatch "^3.0.2"
+    once "^1.3.0"
+    path-is-absolute "^1.0.0"
+
+globals@^6.4.0:
+  version "6.4.1"
+  resolved "https://registry.yarnpkg.com/globals/-/globals-6.4.1.tgz#8498032b3b6d1cc81eebc5f79690d8fe29fabf4f"
+
+graceful-fs@^3.0.5:
+  version "3.0.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-3.0.11.tgz#7613c778a1afea62f25c630a086d7f3acbbdd818"
+  dependencies:
+    natives "^1.1.0"
+
+graceful-fs@^4.1.11, graceful-fs@^4.1.2, graceful-fs@^4.1.4, graceful-fs@^4.1.6, graceful-fs@^4.1.9, graceful-fs@~4.1.2:
+  version "4.1.11"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
+
+graceful-fs@~2.0.0:
+  version "2.0.3"
+  resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-2.0.3.tgz#7cd2cdb228a4a3f36e95efa6cc142de7d1a136d0"
+
+"graceful-readlink@>= 1.0.0":
+  version "1.0.1"
+  resolved "https://registry.yarnpkg.com/graceful-readlink/-/graceful-readlink-1.0.1.tgz#4cafad76bc62f02fa039b2f94e9a3dd3a391a725"
+
+growl@^1.8.1:
+  version "1.9.2"
+  resolved "https://registry.yarnpkg.com/growl/-/growl-1.9.2.tgz#0ea7743715db8d8de2c5ede1775e1b45ac85c02f"
+
+handlebars@^3.0.1:
+  version "3.0.3"
+  resolved "https://registry.yarnpkg.com/handlebars/-/handlebars-3.0.3.tgz#0e09651a2f0fb3c949160583710d551f92e6d2ad"
+  dependencies:
+    optimist "^0.6.1"
+    source-map "^0.1.40"
+  optionalDependencies:
+    uglify-js "~2.3"
+
+har-validator@~2.0.2, har-validator@~2.0.6:
+  version "2.0.6"
+  resolved "https://registry.yarnpkg.com/har-validator/-/har-validator-2.0.6.tgz#cdcbc08188265ad119b6a5a7c8ab70eecfb5d27d"
+  dependencies:
+    chalk "^1.1.1"
+    commander "^2.9.0"
+    is-my-json-valid "^2.12.4"
+    pinkie-promise "^2.0.0"
+
+has-ansi@^0.1.0:
+  version "0.1.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-0.1.0.tgz#84f265aae8c0e6a88a12d7022894b7568894c62e"
+  dependencies:
+    ansi-regex "^0.2.0"
+
+has-ansi@^2.0.0:
+  version "2.0.0"
+  resolved "https://registry.yarnpkg.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91"
+  dependencies:
+    ansi-regex "^2.0.0"
+
+has-binary-data@0.1.3:
+  version "0.1.3"
+  resolved "https://registry.yarnpkg.com/has-binary-data/-/has-binary-data-0.1.3.tgz#8ebb18388b57f19a5231275a16fc18d51f379aae"
+  dependencies:
+    isarray "0.0.1"
+
+has-binary@0.1.6:
+  version "0.1.6"
+  resolved "https://registry.yarnpkg.com/has-binary/-/has-binary-0.1.6.tgz#25326f39cfa4f616ad8787894e3af2cfbc7b6e10"
+  dependencies:
+    isarray "0.0.1"
+
+has-color@~0.1.0:
+  version "0.1.7"
+  resolved "https://registry.yarnpkg.com/has-color/-/has-color-0.1.7.tgz#67144a5260c34fc3cca677d041daf52fe7b78b2f"
+
+has-cors@1.1.0:
+  version "1.1.0"
+  resolved "https://registry.yarnpkg.com/has-cors/-/has-cors-1.1.0.tgz#5e474793f7ea9843d1bb99c23eef49ff126fff39"
+
+has-unicode@^2.0.0:
+  version "2.0.1"
+  resolved "https://registry.yarnpkg.com/has-unicode/-/has-unicode-2.0.1.tgz#e0e6fe6a28cf51138855e086d1691e771de2a8b9"
+
+hash-for-dep@^1.0.2:
+  version "1.1.2"
+  resolved "https://registry.yarnpkg.com/hash-for-dep/-/hash-for-dep-1.1.2.tgz#e3347ed92960eb0bb53a2c6c2b70e36d75b7cd0c"
+  dependencies:
+    broccoli-kitchen-sink-helpers "^0.3.1"
+    heimdalljs "^0.2.3"
+    heimdalljs-logger "^0.1.7"
+    resolve "^1.1.6"
+
+hasha@~2.2.0:
+  version "2.2.0"
+  resolved "https://registry.yarnpkg.com/hasha/-/hasha-2.2.0.tgz#78d7cbfc1e6d66303fe79837365984517b2f6ee1"
+  dependencies:
+    is-stream "^1.0.1"
+    pinkie-promise "^2.0.0"
+
+hawk@~3.1.0, hawk@~3.1.3:
+  version "3.1.3"
+  resolved "https://registry.yarnpkg.com/hawk/-/hawk-3.1.3.tgz#078444bd7c1640b0fe540d2c9b73d59678e8e1c4"
+  dependencies:
+    boom "2.x.x"
+    cryptiles "2.x.x"
+    hoek "2.x.x"
+    sntp "1.x.x"
+
+heimdalljs-logger@^0.1.7:
+  version "0.1.9"
+  resolved "https://registry.yarnpkg.com/heimdalljs-logger/-/heimdalljs-logger-0.1.9.tgz#d76ada4e45b7bb6f786fc9c010a68eb2e2faf176"
+  dependencies:
+    debug "^2.2.0"
+    heimdalljs "^0.2.0"
+
+heimdalljs@^0.2.0, heimdalljs@^0.2.1, heimdalljs@^0.2.3:
+  version "0.2.4"
+  resolved "https://registry.yarnpkg.com/heimdalljs/-/heimdalljs-0.2.4.tgz#34ead16eab422c94803065d33abeba1f7b24a910"
+  dependencies:
+    rsvp "~3.2.1"
+
+hoek@2.x.x:
+  version "2.16.3"
+  resolved "https://registry.yarnpkg.com/hoek/-/hoek-2.16.3.tgz#20bb7403d3cea398e91dc4710a8ff1b8274a25ed"
+
+home-or-tmp@^1.0.0:
+  version "1.0.0"
+  resolved "https://registry.yarnpkg.com/home-or-tmp/-/home-or-tmp-1.0.

<TRUNCATED>

---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/50] [abbrv] hadoop git commit: HDFS-11741. Long running balancer may fail due to expired DataEncryptionKey. Contributed by Wei-Chiu Chuang and Xiao Chen.

Posted by xy...@apache.org.
HDFS-11741. Long running balancer may fail due to expired DataEncryptionKey. Contributed by Wei-Chiu Chuang and Xiao Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb622bc6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb622bc6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb622bc6

Branch: refs/heads/HDFS-7240
Commit: cb622bc619a8897e1f433c388586d83791b1cb23
Parents: 5c6f22d
Author: Xiao Chen <xi...@apache.org>
Authored: Wed May 31 16:50:33 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../token/block/BlockTokenSecretManager.java    | 23 ++++--
 .../hadoop/hdfs/server/balancer/KeyManager.java | 33 +++++++-
 .../hdfs/server/balancer/TestKeyManager.java    | 87 ++++++++++++++++++++
 3 files changed, 131 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb622bc6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
index 6b54490..8be22d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.util.Time;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.Timer;
 
 /**
  * BlockTokenSecretManager can be instantiated in 2 modes, master mode
@@ -84,6 +85,11 @@ public class BlockTokenSecretManager extends
   private final SecureRandom nonceGenerator = new SecureRandom();
 
   /**
+   * Timer object for querying the current time. Separated out for
+   * unit testing.
+   */
+  private Timer timer;
+  /**
    * Constructor for workers.
    *
    * @param keyUpdateInterval how often a new key will be generated
@@ -130,6 +136,7 @@ public class BlockTokenSecretManager extends
     this.blockPoolId = blockPoolId;
     this.encryptionAlgorithm = encryptionAlgorithm;
     this.useProto = useProto;
+    this.timer = new Timer();
     generateKeys();
   }
 
@@ -160,10 +167,10 @@ public class BlockTokenSecretManager extends
      * more.
      */
     setSerialNo(serialNo + 1);
-    currentKey = new BlockKey(serialNo, Time.now() + 2
+    currentKey = new BlockKey(serialNo, timer.now() + 2
         * keyUpdateInterval + tokenLifetime, generateSecret());
     setSerialNo(serialNo + 1);
-    nextKey = new BlockKey(serialNo, Time.now() + 3
+    nextKey = new BlockKey(serialNo, timer.now() + 3
         * keyUpdateInterval + tokenLifetime, generateSecret());
     allKeys.put(currentKey.getKeyId(), currentKey);
     allKeys.put(nextKey.getKeyId(), nextKey);
@@ -180,7 +187,7 @@ public class BlockTokenSecretManager extends
   }
 
   private synchronized void removeExpiredKeys() {
-    long now = Time.now();
+    long now = timer.now();
     for (Iterator<Map.Entry<Integer, BlockKey>> it = allKeys.entrySet()
         .iterator(); it.hasNext();) {
       Map.Entry<Integer, BlockKey> e = it.next();
@@ -230,15 +237,15 @@ public class BlockTokenSecretManager extends
     removeExpiredKeys();
     // set final expiry date of retiring currentKey
     allKeys.put(currentKey.getKeyId(), new BlockKey(currentKey.getKeyId(),
-        Time.now() + keyUpdateInterval + tokenLifetime,
+        timer.now() + keyUpdateInterval + tokenLifetime,
         currentKey.getKey()));
     // update the estimated expiry date of new currentKey
-    currentKey = new BlockKey(nextKey.getKeyId(), Time.now()
+    currentKey = new BlockKey(nextKey.getKeyId(), timer.now()
         + 2 * keyUpdateInterval + tokenLifetime, nextKey.getKey());
     allKeys.put(currentKey.getKeyId(), currentKey);
     // generate a new nextKey
     setSerialNo(serialNo + 1);
-    nextKey = new BlockKey(serialNo, Time.now() + 3
+    nextKey = new BlockKey(serialNo, timer.now() + 3
         * keyUpdateInterval + tokenLifetime, generateSecret());
     allKeys.put(nextKey.getKeyId(), nextKey);
     return true;
@@ -410,7 +417,7 @@ public class BlockTokenSecretManager extends
     }
     if (key == null)
       throw new IllegalStateException("currentKey hasn't been initialized.");
-    identifier.setExpiryDate(Time.now() + tokenLifetime);
+    identifier.setExpiryDate(timer.now() + tokenLifetime);
     identifier.setKeyId(key.getKeyId());
     if (LOG.isDebugEnabled()) {
       LOG.debug("Generating block token for " + identifier.toString());
@@ -461,7 +468,7 @@ public class BlockTokenSecretManager extends
     }
     byte[] encryptionKey = createPassword(nonce, key.getKey());
     return new DataEncryptionKey(key.getKeyId(), blockPoolId, nonce,
-        encryptionKey, Time.now() + tokenLifetime,
+        encryptionKey, timer.now() + tokenLifetime,
         encryptionAlgorithm);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb622bc6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
index faf95b7..5644ef7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/KeyManager.java
@@ -21,8 +21,6 @@ import java.io.Closeable;
 import java.io.IOException;
 import java.util.EnumSet;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
@@ -37,13 +35,16 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The class provides utilities for key and token management.
  */
 @InterfaceAudience.Private
 public class KeyManager implements Closeable, DataEncryptionKeyFactory {
-  private static final Log LOG = LogFactory.getLog(KeyManager.class);
+  private static final Logger LOG = LoggerFactory.getLogger(KeyManager.class);
 
   private final NamenodeProtocol namenode;
 
@@ -54,11 +55,17 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
   private final BlockTokenSecretManager blockTokenSecretManager;
   private final BlockKeyUpdater blockKeyUpdater;
   private DataEncryptionKey encryptionKey;
+  /**
+   * Timer object for querying the current time. Separated out for
+   * unit testing.
+   */
+  private Timer timer;
 
   public KeyManager(String blockpoolID, NamenodeProtocol namenode,
       boolean encryptDataTransfer, Configuration conf) throws IOException {
     this.namenode = namenode;
     this.encryptDataTransfer = encryptDataTransfer;
+    this.timer = new Timer();
 
     final ExportedBlockKeys keys = namenode.getBlockKeys();
     this.isBlockTokenEnabled = keys.isBlockTokenEnabled();
@@ -113,7 +120,25 @@ public class KeyManager implements Closeable, DataEncryptionKeyFactory {
   public DataEncryptionKey newDataEncryptionKey() {
     if (encryptDataTransfer) {
       synchronized (this) {
-        if (encryptionKey == null) {
+        if (encryptionKey == null ||
+            encryptionKey.expiryDate < timer.now()) {
+          // Encryption Key (EK) is generated from Block Key (BK).
+          // Check if EK is expired, and generate a new one using the current BK
+          // if so, otherwise continue to use the previously generated EK.
+          //
+          // It's important to make sure that when EK is not expired, the BK
+          // used to generate the EK is not expired and removed, because
+          // the same BK will be used to re-generate the EK
+          // by BlockTokenSecretManager.
+          //
+          // The current implementation ensures that when an EK is not expired
+          // (within tokenLifetime), the BK that's used to generate it
+          // still has at least "keyUpdateInterval" of life time before
+          // the BK gets expired and removed.
+          // See BlockTokenSecretManager for details.
+          LOG.debug("Generating new data encryption key because current key "
+              + (encryptionKey == null ?
+              "is null." : "expired on " + encryptionKey.expiryDate));
           encryptionKey = blockTokenSecretManager.generateDataEncryptionKey();
         }
         return encryptionKey;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb622bc6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
new file mode 100644
index 0000000..58cffb4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestKeyManager.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.util.FakeTimer;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.mockito.internal.util.reflection.Whitebox;
+
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test KeyManager class.
+ */
+public class TestKeyManager {
+  @Rule
+  public Timeout globalTimeout = new Timeout(120000);
+
+  @Test
+  public void testNewDataEncryptionKey() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    // Enable data transport encryption and access token
+    conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
+
+    final long keyUpdateInterval = 2 * 1000;
+    final long tokenLifeTime = keyUpdateInterval;
+    final String blockPoolId = "bp-foo";
+    FakeTimer fakeTimer = new FakeTimer();
+    BlockTokenSecretManager btsm = new BlockTokenSecretManager(
+        keyUpdateInterval, tokenLifeTime, 0, 1, blockPoolId, null, false);
+    Whitebox.setInternalState(btsm, "timer", fakeTimer);
+
+    // When KeyManager asks for block keys, return them from btsm directly
+    NamenodeProtocol namenode = mock(NamenodeProtocol.class);
+    when(namenode.getBlockKeys()).thenReturn(btsm.exportKeys());
+
+    // Instantiate a KeyManager instance and get data encryption key.
+    KeyManager keyManager = new KeyManager(blockPoolId, namenode,
+        true, conf);
+    Whitebox.setInternalState(keyManager, "timer", fakeTimer);
+    Whitebox.setInternalState(
+        Whitebox.getInternalState(keyManager, "blockTokenSecretManager"),
+        "timer", fakeTimer);
+    final DataEncryptionKey dek = keyManager.newDataEncryptionKey();
+    final long remainingTime = dek.expiryDate - fakeTimer.now();
+    assertEquals("KeyManager dataEncryptionKey should expire in 2 seconds",
+        keyUpdateInterval, remainingTime);
+    // advance the timer to expire the block key and data encryption key
+    fakeTimer.advance(keyUpdateInterval + 1);
+
+    // After the initial data encryption key expires, KeyManager should
+    // regenerate a valid data encryption key using the current block key.
+    final DataEncryptionKey dekAfterExpiration =
+        keyManager.newDataEncryptionKey();
+    assertNotEquals("KeyManager should generate a new data encryption key",
+        dek, dekAfterExpiration);
+    assertTrue("KeyManager has an expired DataEncryptionKey!",
+        dekAfterExpiration.expiryDate > fakeTimer.now());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/50] [abbrv] hadoop git commit: HDFS-11893. Fix TestDFSShell.testMoveWithTargetPortEmpty failure. Contributed by Brahma Reddy Battula.

Posted by xy...@apache.org.
HDFS-11893. Fix TestDFSShell.testMoveWithTargetPortEmpty failure. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/173e391c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/173e391c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/173e391c

Branch: refs/heads/HDFS-7240
Commit: 173e391c4e23f5ca5b4badd2ef4fa0aebfa062a0
Parents: 9d9e56c
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Thu Jun 1 22:29:29 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java         | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/173e391c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 3f6b268..c82c045 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -789,7 +789,7 @@ public class TestDFSShell {
       argv = new String[3];
       argv[0] = "-mv";
       argv[1] = srcFs.getUri() + "/testfile";
-      argv[2] = "hdfs://localhost/testfile2";
+      argv[2] = "hdfs://" + srcFs.getUri().getHost() + "/testfile2";
       int ret = ToolRunner.run(shell, argv);
       assertEquals("mv should have succeeded", 0, ret);
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/50] [abbrv] hadoop git commit: YARN-6316 Provide help information and documentation for TimelineSchemaCreator (Contributed by Haibo Chen via Vrushali C)

Posted by xy...@apache.org.
YARN-6316 Provide help information and documentation for TimelineSchemaCreator (Contributed by Haibo Chen via Vrushali C)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d48f2f68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d48f2f68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d48f2f68

Branch: refs/heads/HDFS-7240
Commit: d48f2f68398609b84f08389bea8e44746f9f0d65
Parents: 0a09e1f
Author: vrushali <vr...@apache.org>
Authored: Thu Jun 1 18:30:23 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../storage/TimelineSchemaCreator.java          | 144 +++++++++++++------
 .../src/site/markdown/TimelineServiceV2.md      |   5 +-
 2 files changed, 101 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d48f2f68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
index 9369d6a..b436eec 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/TimelineSchemaCreator.java
@@ -64,6 +64,8 @@ public final class TimelineSchemaCreator {
   private static final String APP_TO_FLOW_TABLE_NAME_SHORT = "a2f";
   private static final String TTL_OPTION_SHORT = "m";
   private static final String ENTITY_TABLE_NAME_SHORT = "e";
+  private static final String HELP_SHORT = "h";
+  private static final String CREATE_TABLES_SHORT = "c";
 
   public static void main(String[] args) throws Exception {
 
@@ -75,54 +77,44 @@ public final class TimelineSchemaCreator {
     // Grab the arguments we're looking for.
     CommandLine commandLine = parseArgs(otherArgs);
 
-    // Grab the entityTableName argument
-    String entityTableName
-        = commandLine.getOptionValue(ENTITY_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(entityTableName)) {
-      hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
-    }
-    String entityTableTTLMetrics = commandLine.getOptionValue(TTL_OPTION_SHORT);
-    if (StringUtils.isNotBlank(entityTableTTLMetrics)) {
-      int metricsTTL = Integer.parseInt(entityTableTTLMetrics);
-      new EntityTable().setMetricsTTL(metricsTTL, hbaseConf);
-    }
-    // Grab the appToflowTableName argument
-    String appToflowTableName = commandLine.getOptionValue(
-        APP_TO_FLOW_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(appToflowTableName)) {
-      hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
-    }
-    // Grab the applicationTableName argument
-    String applicationTableName = commandLine.getOptionValue(
-        APP_TABLE_NAME_SHORT);
-    if (StringUtils.isNotBlank(applicationTableName)) {
-      hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
-          applicationTableName);
-    }
-
-    List<Exception> exceptions = new ArrayList<>();
-    try {
-      boolean skipExisting
-          = commandLine.hasOption(SKIP_EXISTING_TABLE_OPTION_SHORT);
-      if (skipExisting) {
-        LOG.info("Will skip existing tables and continue on htable creation "
-            + "exceptions!");
+    if (commandLine.hasOption(HELP_SHORT)) {
+      // -help option has the highest precedence
+      printUsage();
+    } else if (commandLine.hasOption(CREATE_TABLES_SHORT)) {
+      // Grab the entityTableName argument
+      String entityTableName = commandLine.getOptionValue(
+          ENTITY_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(entityTableName)) {
+        hbaseConf.set(EntityTable.TABLE_NAME_CONF_NAME, entityTableName);
       }
-      createAllTables(hbaseConf, skipExisting);
-      LOG.info("Successfully created HBase schema. ");
-    } catch (IOException e) {
-      LOG.error("Error in creating hbase tables: " + e.getMessage());
-      exceptions.add(e);
-    }
-
-    if (exceptions.size() > 0) {
-      LOG.warn("Schema creation finished with the following exceptions");
-      for (Exception e : exceptions) {
-        LOG.warn(e.getMessage());
+      // Grab the TTL argument
+      String entityTableTTLMetrics =commandLine.getOptionValue(
+          TTL_OPTION_SHORT);
+      if (StringUtils.isNotBlank(entityTableTTLMetrics)) {
+        int metricsTTL = Integer.parseInt(entityTableTTLMetrics);
+        new EntityTable().setMetricsTTL(metricsTTL, hbaseConf);
       }
-      System.exit(-1);
+      // Grab the appToflowTableName argument
+      String appToflowTableName = commandLine.getOptionValue(
+          APP_TO_FLOW_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(appToflowTableName)) {
+        hbaseConf.set(AppToFlowTable.TABLE_NAME_CONF_NAME, appToflowTableName);
+      }
+      // Grab the applicationTableName argument
+      String applicationTableName = commandLine.getOptionValue(
+          APP_TABLE_NAME_SHORT);
+      if (StringUtils.isNotBlank(applicationTableName)) {
+        hbaseConf.set(ApplicationTable.TABLE_NAME_CONF_NAME,
+            applicationTableName);
+      }
+
+      // create all table schemas in hbase
+      final boolean skipExisting = commandLine.hasOption(
+          SKIP_EXISTING_TABLE_OPTION_SHORT);
+      createAllSchemas(hbaseConf, skipExisting);
     } else {
-      LOG.info("Schema creation finished successfully");
+      // print usage information if -create is not specified
+      printUsage();
     }
   }
 
@@ -138,7 +130,16 @@ public final class TimelineSchemaCreator {
     Options options = new Options();
 
     // Input
-    Option o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
+    Option o = new Option(HELP_SHORT, "help", false, "print help information");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(CREATE_TABLES_SHORT, "create", false,
+        "a mandatory option to create hbase tables");
+    o.setRequired(false);
+    options.addOption(o);
+
+    o = new Option(ENTITY_TABLE_NAME_SHORT, "entityTableName", true,
         "entity table name");
     o.setArgName("entityTableName");
     o.setRequired(false);
@@ -183,6 +184,57 @@ public final class TimelineSchemaCreator {
     return commandLine;
   }
 
+  private static void printUsage() {
+    StringBuilder usage = new StringBuilder("Command Usage: \n");
+    usage.append("TimelineSchemaCreator [-help] Display help info" +
+        " for all commands. Or\n");
+    usage.append("TimelineSchemaCreator -create [OPTIONAL_OPTIONS]" +
+        " Create hbase tables.\n\n");
+    usage.append("The Optional options for creating tables include: \n");
+    usage.append("[-entityTableName <Entity Table Name>] " +
+        "The name of the Entity table\n");
+    usage.append("[-metricsTTL <Entity Table Metrics TTL>]" +
+        " TTL for metrics in the Entity table\n");
+    usage.append("[-appToflowTableName <AppToflow Table Name>]" +
+        " The name of the AppToFlow table\n");
+    usage.append("[-applicationTableName <Application Table Name>]" +
+        " The name of the Application table\n");
+    usage.append("[-skipExistingTable] Whether to skip existing" +
+        " hbase tables\n");
+    System.out.println(usage.toString());
+  }
+
+  /**
+   * Create all table schemas and log success or exception if failed.
+   * @param hbaseConf the hbase configuration to create tables with
+   * @param skipExisting whether to skip existing hbase tables
+   */
+  private static void createAllSchemas(Configuration hbaseConf,
+      boolean skipExisting) {
+    List<Exception> exceptions = new ArrayList<>();
+    try {
+      if (skipExisting) {
+        LOG.info("Will skip existing tables and continue on htable creation "
+            + "exceptions!");
+      }
+      createAllTables(hbaseConf, skipExisting);
+      LOG.info("Successfully created HBase schema. ");
+    } catch (IOException e) {
+      LOG.error("Error in creating hbase tables: " + e.getMessage());
+      exceptions.add(e);
+    }
+
+    if (exceptions.size() > 0) {
+      LOG.warn("Schema creation finished with the following exceptions");
+      for (Exception e : exceptions) {
+        LOG.warn(e.getMessage());
+      }
+      System.exit(-1);
+    } else {
+      LOG.info("Schema creation finished successfully");
+    }
+  }
+
   @VisibleForTesting
   public static void createAllTables(Configuration hbaseConf,
       boolean skipExisting) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d48f2f68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
index bcbe0b7..04822c9 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServiceV2.md
@@ -183,11 +183,12 @@ to a dynamically (table coprocessor).
 
 Finally, run the schema creator tool to create the necessary tables:
 
-    bin/hbase org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator
+    bin/hadoop org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator -create
 
 The `TimelineSchemaCreator` tool supports a few options that may come handy especially when you
 are testing. For example, you can use `-skipExistingTable` (`-s` for short) to skip existing tables
-and continue to create other tables rather than failing the schema creation.
+and continue to create other tables rather than failing the schema creation. When no option or '-help'
+('-h' for short) is provided, the command usage is printed.
 
 #### Enabling Timeline Service v.2
 Following are the basic configurations to start Timeline service v.2:


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[35/50] [abbrv] hadoop git commit: HDFS-11914. Add more diagnosis info for fsimage transfer failure. Contributed by Yongjun Zhang.

Posted by xy...@apache.org.
HDFS-11914. Add more diagnosis info for fsimage transfer failure. Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b15b22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b15b22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b15b22

Branch: refs/heads/HDFS-7240
Commit: 23b15b223a00578f71b44280afb3cbaa3bce9cbe
Parents: 2777b1d
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Mon Jun 5 16:31:03 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../apache/hadoop/hdfs/server/common/Util.java  |  6 ++---
 .../hdfs/server/namenode/TransferFsImage.java   | 23 +++++++++++++++++---
 2 files changed, 23 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b15b22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index e9ceeb0..5dee16a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -225,6 +225,7 @@ public final class Util {
       stream = new DigestInputStream(stream, digester);
     }
     boolean finishedReceiving = false;
+    int num = 1;
 
     List<FileOutputStream> outputStreams = Lists.newArrayList();
 
@@ -256,7 +257,6 @@ public final class Util {
         }
       }
 
-      int num = 1;
       byte[] buf = new byte[IO_FILE_BUFFER_SIZE];
       while (num > 0) {
         num = stream.read(buf);
@@ -305,8 +305,8 @@ public final class Util {
         // exception that makes it look like a server-side problem!
         deleteTmpFiles(localPaths);
         throw new IOException("File " + url + " received length " + received +
-            " is not of the advertised size " +
-            advertisedSize);
+            " is not of the advertised size " + advertisedSize +
+            ". Fsimage name: " + fsImageName + " lastReceived: " + num);
       }
     }
     xferStats.insert(0, String.format("Combined time for file download and" +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b15b22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index 7316414..771a43e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -339,6 +339,11 @@ public class TransferFsImage {
       FileInputStream infile, DataTransferThrottler throttler,
       Canceler canceler) throws IOException {
     byte buf[] = new byte[IO_FILE_BUFFER_SIZE];
+    long total = 0;
+    int num = 1;
+    IOException ioe = null;
+    String reportStr = "Sending fileName: " + localfile.getAbsolutePath()
+      + ", fileSize: " + localfile.length() + ".";
     try {
       CheckpointFaultInjector.getInstance()
           .aboutToSendFile(localfile);
@@ -352,7 +357,6 @@ public class TransferFsImage {
           // and the rest of the image will be sent over the wire
           infile.read(buf);
       }
-      int num = 1;
       while (num > 0) {
         if (canceler != null && canceler.isCancelled()) {
           throw new SaveNamespaceCancelledException(
@@ -368,16 +372,29 @@ public class TransferFsImage {
           LOG.warn("SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!");
           buf[0]++;
         }
-        
+
         out.write(buf, 0, num);
+        total += num;
         if (throttler != null) {
           throttler.throttle(num, canceler);
         }
       }
     } catch (EofException e) {
-      LOG.info("Connection closed by client");
+      reportStr += " Connection closed by client.";
+      ioe = e;
       out = null; // so we don't close in the finally
+    } catch (IOException ie) {
+      ioe = ie;
+      throw ie;
     } finally {
+      reportStr += " Sent total: " + total +
+          " bytes. Size of last segment intended to send: " + num
+          + " bytes.";
+      if (ioe != null) {
+        LOG.info(reportStr, ioe);
+      } else {
+        LOG.info(reportStr);
+      }
       if (out != null) {
         out.close();
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/50] [abbrv] hadoop git commit: YARN-6683. Invalid event: COLLECTOR_UPDATE at KILLED. Contributed by Rohith Sharma K S

Posted by xy...@apache.org.
YARN-6683. Invalid event: COLLECTOR_UPDATE at KILLED.  Contributed by Rohith Sharma K S


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ad147ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ad147ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ad147ef

Branch: refs/heads/HDFS-7240
Commit: 2ad147ef2974dde5b08954f3bdf7218020ca23cf
Parents: ce63488
Author: Jian He <ji...@apache.org>
Authored: Mon Jun 5 13:15:08 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../resourcemanager/ResourceTrackerService.java |  6 +--
 .../rmapp/RMAppCollectorUpdateEvent.java        | 40 --------------------
 .../server/resourcemanager/rmapp/RMAppImpl.java | 36 +-----------------
 3 files changed, 2 insertions(+), 80 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ad147ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
index 40bd610..aa7f524 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java
@@ -69,7 +69,6 @@ import org.apache.hadoop.yarn.server.api.records.NodeStatus;
 import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NodeLabelsUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppCollectorUpdateEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
@@ -651,10 +650,7 @@ public class ResourceTrackerService extends AbstractService implements
             String previousCollectorAddr = rmApp.getCollectorAddr();
             if (previousCollectorAddr == null
                 || !previousCollectorAddr.equals(collectorAddr)) {
-              // sending collector update event.
-              RMAppCollectorUpdateEvent event =
-                  new RMAppCollectorUpdateEvent(appId, collectorAddr);
-              rmContext.getDispatcher().getEventHandler().handle(event);
+              rmApp.setCollectorAddr(collectorAddr);
             }
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ad147ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
deleted file mode 100644
index 9642911..0000000
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppCollectorUpdateEvent.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.server.resourcemanager.rmapp;
-
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-
-/**
- * Event used for updating collector address in RMApp on node heartbeat.
- */
-public class RMAppCollectorUpdateEvent extends RMAppEvent {
-
-  private final String appCollectorAddr;
-
-  public RMAppCollectorUpdateEvent(ApplicationId appId,
-      String appCollectorAddr) {
-    super(appId, RMAppEventType.COLLECTOR_UPDATE);
-    this.appCollectorAddr = appCollectorAddr;
-  }
-
-  public String getAppCollectorAddr(){
-    return this.appCollectorAddr;
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ad147ef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
index 78df913..dda9474 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java
@@ -165,7 +165,7 @@ public class RMAppImpl implements RMApp, Recoverable {
   private long storedFinishTime = 0;
   private int firstAttemptIdInStateStore = 1;
   private int nextAttemptId = 1;
-  private String collectorAddr;
+  private volatile String collectorAddr;
   // This field isn't protected by readlock now.
   private volatile RMAppAttempt currentAttempt;
   private String queue;
@@ -217,8 +217,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from NEW state
     .addTransition(RMAppState.NEW, RMAppState.NEW,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.NEW, RMAppState.NEW,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.NEW, RMAppState.NEW_SAVING,
         RMAppEventType.START, new RMAppNewlySavingTransition())
     .addTransition(RMAppState.NEW, EnumSet.of(RMAppState.SUBMITTED,
@@ -235,8 +233,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     // Transitions from NEW_SAVING state
     .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.NEW_SAVING, RMAppState.NEW_SAVING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.NEW_SAVING, RMAppState.SUBMITTED,
         RMAppEventType.APP_NEW_SAVED, new AddApplicationToSchedulerTransition())
     .addTransition(RMAppState.NEW_SAVING, RMAppState.FINAL_SAVING,
@@ -251,8 +247,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from SUBMITTED state
     .addTransition(RMAppState.SUBMITTED, RMAppState.SUBMITTED,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.SUBMITTED, RMAppState.SUBMITTED,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.SUBMITTED, RMAppState.FINAL_SAVING,
         RMAppEventType.APP_REJECTED,
         new FinalSavingTransition(
@@ -267,8 +261,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from ACCEPTED state
     .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.ACCEPTED, RMAppState.ACCEPTED,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.ACCEPTED, RMAppState.RUNNING,
         RMAppEventType.ATTEMPT_REGISTERED, new RMAppStateUpdateTransition(
             YarnApplicationState.RUNNING))
@@ -294,8 +286,6 @@ public class RMAppImpl implements RMApp, Recoverable {
      // Transitions from RUNNING state
     .addTransition(RMAppState.RUNNING, RMAppState.RUNNING,
         RMAppEventType.NODE_UPDATE, new RMAppNodeUpdateTransition())
-    .addTransition(RMAppState.RUNNING, RMAppState.RUNNING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.RUNNING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_UNREGISTERED,
         new FinalSavingTransition(
@@ -325,8 +315,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINAL_SAVING, RMAppState.FINAL_SAVING,
         EnumSet.of(RMAppEventType.NODE_UPDATE, RMAppEventType.KILL,
@@ -338,8 +326,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.FINISHING, RMAppState.FINISHING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     // ignorable transitions
     .addTransition(RMAppState.FINISHING, RMAppState.FINISHING,
       EnumSet.of(RMAppEventType.NODE_UPDATE,
@@ -351,8 +337,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     .addTransition(RMAppState.KILLING, RMAppState.KILLING, 
         RMAppEventType.APP_RUNNING_ON_NODE,
         new AppRunningOnNodeTransition())
-    .addTransition(RMAppState.KILLING, RMAppState.KILLING,
-        RMAppEventType.COLLECTOR_UPDATE, new RMAppCollectorUpdateTransition())
     .addTransition(RMAppState.KILLING, RMAppState.FINAL_SAVING,
         RMAppEventType.ATTEMPT_KILLED,
         new FinalSavingTransition(
@@ -1020,24 +1004,6 @@ public class RMAppImpl implements RMApp, Recoverable {
     };
   }
 
-  private static final class RMAppCollectorUpdateTransition
-      extends RMAppTransition {
-
-    public void transition(RMAppImpl app, RMAppEvent event) {
-      if (YarnConfiguration.timelineServiceV2Enabled(app.conf)) {
-        LOG.info("Updating collector info for app: " + app.getApplicationId());
-
-        RMAppCollectorUpdateEvent appCollectorUpdateEvent =
-            (RMAppCollectorUpdateEvent) event;
-        // Update collector address
-        app.setCollectorAddr(appCollectorUpdateEvent.getAppCollectorAddr());
-
-        // TODO persistent to RMStateStore for recover
-        // Save to RMStateStore
-      }
-    };
-  }
-
   private static final class RMAppNodeUpdateTransition extends RMAppTransition {
     public void transition(RMAppImpl app, RMAppEvent event) {
       RMAppNodeUpdateEvent nodeUpdateEvent = (RMAppNodeUpdateEvent) event;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[48/50] [abbrv] hadoop git commit: HDFS-11861. ipc.Client.Connection#sendRpcRequest should log request name. Contributed by John Zhuge.

Posted by xy...@apache.org.
HDFS-11861. ipc.Client.Connection#sendRpcRequest should log request name. Contributed by John Zhuge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/25e6378f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/25e6378f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/25e6378f

Branch: refs/heads/HDFS-7240
Commit: 25e6378f14a9f8f04b8a65afa4ebb902d8405464
Parents: d2f0ddc
Author: John Zhuge <jz...@apache.org>
Authored: Sun May 21 00:18:35 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:53 2017 -0700

----------------------------------------------------------------------
 .../hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/25e6378f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index c0a5be9..6b21c75 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -1109,7 +1109,8 @@ public class Client implements AutoCloseable {
                   return;
                 }
                 if (LOG.isDebugEnabled()) {
-                  LOG.debug(getName() + " sending #" + call.id);
+                  LOG.debug(getName() + " sending #" + call.id
+                      + " " + call.rpcRequest);
                 }
                 // RpcRequestHeader + RpcRequest
                 ipcStreams.sendRequest(buf.toByteArray());


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/50] [abbrv] hadoop git commit: YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)

Posted by xy...@apache.org.
YARN-6477. Dispatcher no longer needs the raw types suppression. (Maya Wexler via Haibo Chen)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5511c4e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5511c4e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5511c4e5

Branch: refs/heads/HDFS-7240
Commit: 5511c4e575738e42bdd07b3fe14da6520ddbee06
Parents: 611d452
Author: Haibo Chen <ha...@apache.org>
Authored: Tue May 30 16:58:15 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:48 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java      | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5511c4e5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
index 0858a0b..ce5a513 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/event/Dispatcher.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.classification.InterfaceStability.Evolving;
  * event handlers based on event types.
  * 
  */
-@SuppressWarnings("rawtypes")
 @Public
 @Evolving
 public interface Dispatcher {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/50] [abbrv] hadoop git commit: HDFS-11905. Fix license header inconsistency in hdfs. Contributed by Yeliang Cang.

Posted by xy...@apache.org.
HDFS-11905. Fix license header inconsistency in hdfs. Contributed by Yeliang Cang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/93f2aaf8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/93f2aaf8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/93f2aaf8

Branch: refs/heads/HDFS-7240
Commit: 93f2aaf82687e99f8fb191f42a1d7562519ec55c
Parents: 173e391
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Jun 2 00:28:33 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java   | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/93f2aaf8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index b0504f0..9f4df70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -1,6 +1,4 @@
-/*
- * UpgradeUtilities.java
- *
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/50] [abbrv] hadoop git commit: HDFS-11899. ASF License warnings generated intermittently in trunk. Contributed by Yiqun Lin.

Posted by xy...@apache.org.
HDFS-11899. ASF License warnings generated intermittently in trunk. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe76c599
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe76c599
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe76c599

Branch: refs/heads/HDFS-7240
Commit: fe76c59954c1dee8d8764fd761c8c2d399bc9b6e
Parents: 882891a
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Jun 3 22:07:24 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/balancer/TestBalancer.java | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe76c599/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 167997e..dd28914 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -1022,14 +1022,14 @@ public class TestBalancer {
     if (!p.getExcludedNodes().isEmpty()) {
       args.add("-exclude");
       if (useFile) {
-        excludeHostsFile = new File ("exclude-hosts-file");
+        excludeHostsFile = GenericTestUtils.getTestDir("exclude-hosts-file");
         PrintWriter pw = new PrintWriter(excludeHostsFile);
         for (String host : p.getExcludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
-        args.add("exclude-hosts-file");
+        args.add(excludeHostsFile.getAbsolutePath());
       } else {
         args.add(StringUtils.join(p.getExcludedNodes(), ','));
       }
@@ -1039,14 +1039,14 @@ public class TestBalancer {
     if (!p.getIncludedNodes().isEmpty()) {
       args.add("-include");
       if (useFile) {
-        includeHostsFile = new File ("include-hosts-file");
+        includeHostsFile = GenericTestUtils.getTestDir("include-hosts-file");
         PrintWriter pw = new PrintWriter(includeHostsFile);
         for (String host : p.getIncludedNodes()) {
           pw.write( host + "\n");
         }
         pw.close();
         args.add("-f");
-        args.add("include-hosts-file");
+        args.add(includeHostsFile.getAbsolutePath());
       } else {
         args.add(StringUtils.join(p.getIncludedNodes(), ','));
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/50] [abbrv] hadoop git commit: HADOOP-14472. Azure: TestReadAndSeekPageBlobAfterWrite fails intermittently. Contributed by Mingliang Liu

Posted by xy...@apache.org.
HADOOP-14472. Azure: TestReadAndSeekPageBlobAfterWrite fails intermittently. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/756ff412
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/756ff412
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/756ff412

Branch: refs/heads/HDFS-7240
Commit: 756ff412afe48ce811c2e967e044d592ae43ef9c
Parents: cc8bcf1
Author: Mingliang Liu <li...@apache.org>
Authored: Tue Jun 6 11:06:49 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../fs/azure/TestReadAndSeekPageBlobAfterWrite.java       | 10 ----------
 1 file changed, 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/756ff412/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
index e6219df..41b8386 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestReadAndSeekPageBlobAfterWrite.java
@@ -275,18 +275,8 @@ public class TestReadAndSeekPageBlobAfterWrite {
         writesSinceHFlush++;
         output.flush();
         if ((i % SYNC_INTERVAL) == 0) {
-          long start = Time.monotonicNow();
           output.hflush();
           writesSinceHFlush = 0;
-          long end = Time.monotonicNow();
-
-          // A true, round-trip synchronous flush to Azure must take
-          // a significant amount of time or we are not syncing to storage correctly.
-          LOG.debug("hflush duration = " + (end - start) + " msec.");
-          assertTrue(String.format(
-            "hflush duration of %d, less than minimum expected of %d",
-            end - start, MINIMUM_EXPECTED_TIME),
-            end - start >= MINIMUM_EXPECTED_TIME);
         }
       }
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
index 5f9b883..c1df562 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.yarn.server.nodemanager;
 
 import static org.mockito.Matchers.argThat;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
@@ -28,6 +27,7 @@ import java.io.File;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
@@ -58,19 +58,17 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.NMTokenIdentifier;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentMatcher;
 
 public class TestNodeManagerReboot {
 
@@ -195,19 +193,18 @@ public class TestNodeManagerReboot {
     // restart the NodeManager
     restartNM(MAX_TRIES);
     checkNumOfLocalDirs();
-    
-    verify(delService, times(1)).delete(
-      (String) isNull(),
-      argThat(new PathInclude(ResourceLocalizationService.NM_PRIVATE_DIR
-          + "_DEL_")));
-    verify(delService, times(1)).delete((String) isNull(),
-      argThat(new PathInclude(ContainerLocalizer.FILECACHE + "_DEL_")));
-    verify(delService, times(1)).scheduleFileDeletionTask(
-      argThat(new FileDeletionInclude(user, null,
-        new String[] { destinationFile })));
-    verify(delService, times(1)).scheduleFileDeletionTask(
-      argThat(new FileDeletionInclude(null, ContainerLocalizer.USERCACHE
-          + "_DEL_", new String[] {})));
+
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null,
+        new Path(ResourceLocalizationService.NM_PRIVATE_DIR + "_DEL_"), null)));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null, new Path(ContainerLocalizer.FILECACHE + "_DEL_"),
+        null)));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, user, null, Arrays.asList(new Path(destinationFile)))));
+    verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+        delService, null, new Path(ContainerLocalizer.USERCACHE + "_DEL_"),
+        new ArrayList<Path>())));
     
     // restart the NodeManager again
     // this time usercache directory should be empty
@@ -329,72 +326,4 @@ public class TestNodeManagerReboot {
       return conf;
     }
   }
-
-  class PathInclude extends ArgumentMatcher<Path> {
-
-    final String part;
-
-    PathInclude(String part) {
-      this.part = part;
-    }
-
-    @Override
-    public boolean matches(Object o) {
-      return ((Path) o).getName().indexOf(part) != -1;
-    }
-  }
-  
-  class FileDeletionInclude extends ArgumentMatcher<FileDeletionTask> {
-    final String user;
-    final String subDirIncludes;
-    final String[] baseDirIncludes;
-    
-    public FileDeletionInclude(String user, String subDirIncludes,
-        String [] baseDirIncludes) {
-      this.user = user;
-      this.subDirIncludes = subDirIncludes;
-      this.baseDirIncludes = baseDirIncludes;
-    }
-    
-    @Override
-    public boolean matches(Object o) {
-      FileDeletionTask fd = (FileDeletionTask)o;
-      if (fd.getUser() == null && user != null) {
-        return false;
-      } else if (fd.getUser() != null && user == null) {
-        return false;
-      } else if (fd.getUser() != null && user != null) {
-        return fd.getUser().equals(user);
-      }
-      if (!comparePaths(fd.getSubDir(), subDirIncludes)) {
-        return false;
-      }
-      if (baseDirIncludes == null && fd.getBaseDirs() != null) {
-        return false;
-      } else if (baseDirIncludes != null && fd.getBaseDirs() == null ) {
-        return false;
-      } else if (baseDirIncludes != null && fd.getBaseDirs() != null) {
-        if (baseDirIncludes.length != fd.getBaseDirs().size()) {
-          return false;
-        }
-        for (int i =0 ; i < baseDirIncludes.length; i++) {
-          if (!comparePaths(fd.getBaseDirs().get(i), baseDirIncludes[i])) {
-            return false;
-          }
-        }
-      }
-      return true;
-    }
-    
-    public boolean comparePaths(Path p1, String p2) {
-      if (p1 == null && p2 != null){
-        return false;
-      } else if (p1 != null && p2 == null) {
-        return false;
-      } else if (p1 != null && p2 != null ){
-        return p1.toUri().getPath().contains(p2.toString());
-      }
-      return true;
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
new file mode 100644
index 0000000..69e01bc
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/TestNMProtoUtils.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
+import org.junit.Test;
+
+import java.util.Arrays;
+import java.util.List;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Test conversion to {@link DeletionTask}.
+ */
+public class TestNMProtoUtils {
+
+  @Test
+  public void testConvertProtoToDeletionTask() throws Exception {
+    DeletionService deletionService = mock(DeletionService.class);
+    DeletionServiceDeleteTaskProto.Builder protoBuilder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    int id = 0;
+    protoBuilder.setId(id);
+    DeletionServiceDeleteTaskProto proto = protoBuilder.build();
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToDeletionTask(proto, deletionService);
+    assertEquals(DeletionTaskType.FILE, deletionTask.getDeletionTaskType());
+    assertEquals(id, deletionTask.getTaskId());
+  }
+
+  @Test
+  public void testConvertProtoToFileDeletionTask() throws Exception {
+    DeletionService deletionService = mock(DeletionService.class);
+    int id = 0;
+    String user = "user";
+    Path subdir = new Path("subdir");
+    Path basedir = new Path("basedir");
+    DeletionServiceDeleteTaskProto.Builder protoBuilder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    protoBuilder
+        .setId(id)
+        .setUser("user")
+        .setSubdir(subdir.getName())
+        .addBasedirs(basedir.getName());
+    DeletionServiceDeleteTaskProto proto = protoBuilder.build();
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToFileDeletionTask(proto, deletionService, id);
+    assertEquals(DeletionTaskType.FILE.name(),
+        deletionTask.getDeletionTaskType().name());
+    assertEquals(id, deletionTask.getTaskId());
+    assertEquals(subdir, ((FileDeletionTask) deletionTask).getSubDir());
+    assertEquals(basedir,
+        ((FileDeletionTask) deletionTask).getBaseDirs().get(0));
+  }
+
+  @Test
+  public void testConvertProtoToDeletionTaskRecoveryInfo() throws Exception {
+    long delTime = System.currentTimeMillis();
+    List<Integer> successorTaskIds = Arrays.asList(1);
+    DeletionTask deletionTask = mock(DeletionTask.class);
+    DeletionTaskRecoveryInfo info =
+        new DeletionTaskRecoveryInfo(deletionTask, successorTaskIds, delTime);
+    assertEquals(deletionTask, info.getTask());
+    assertEquals(successorTaskIds, info.getSuccessorTaskIds());
+    assertEquals(delTime, info.getDeletionTimestamp());
+  }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
index 2991c0c..7980a80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java
@@ -75,6 +75,7 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
@@ -260,10 +261,10 @@ public abstract class BaseContainerManagerTest {
   protected DeletionService createDeletionService() {
     return new DeletionService(exec) {
       @Override
-      public void delete(String user, Path subDir, Path... baseDirs) {
+      public void delete(DeletionTask deletionTask) {
         // Don't do any deletions.
-        LOG.info("Psuedo delete: user - " + user + ", subDir - " + subDir
-            + ", baseDirs - " + Arrays.asList(baseDirs));
+        LOG.info("Psuedo delete: user - " + user
+            + ", type - " + deletionTask.getDeletionTaskType());
       };
     };
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
new file mode 100644
index 0000000..faad456
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionMatcher.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.mockito.ArgumentMatcher;
+
+import java.util.List;
+
+/**
+ * ArgumentMatcher to check the arguments of the {@link FileDeletionTask}.
+ */
+public class FileDeletionMatcher extends ArgumentMatcher<FileDeletionTask> {
+
+  private final DeletionService delService;
+  private final String user;
+  private final Path subDirIncludes;
+  private final List<Path> baseDirIncludes;
+
+  public FileDeletionMatcher(DeletionService delService, String user,
+      Path subDirIncludes, List<Path> baseDirIncludes) {
+    this.delService = delService;
+    this.user = user;
+    this.subDirIncludes = subDirIncludes;
+    this.baseDirIncludes = baseDirIncludes;
+  }
+
+  @Override
+  public boolean matches(Object o) {
+    FileDeletionTask fd = (FileDeletionTask) o;
+    if (fd.getUser() == null && user != null) {
+      return false;
+    } else if (fd.getUser() != null && user == null) {
+      return false;
+    } else if (fd.getUser() != null && user != null) {
+      return fd.getUser().equals(user);
+    }
+    if (!comparePaths(fd.getSubDir(), subDirIncludes.getName())) {
+      return false;
+    }
+    if (baseDirIncludes == null && fd.getBaseDirs() != null) {
+      return false;
+    } else if (baseDirIncludes != null && fd.getBaseDirs() == null) {
+      return false;
+    } else if (baseDirIncludes != null && fd.getBaseDirs() != null) {
+      if (baseDirIncludes.size() != fd.getBaseDirs().size()) {
+        return false;
+      }
+      for (int i = 0; i < baseDirIncludes.size(); i++) {
+        if (!comparePaths(fd.getBaseDirs().get(i),
+            baseDirIncludes.get(i).getName())) {
+          return false;
+        }
+      }
+    }
+    return true;
+  }
+
+  public boolean comparePaths(Path p1, String p2) {
+    if (p1 == null && p2 != null) {
+      return false;
+    } else if (p1 != null && p2 == null) {
+      return false;
+    } else if (p1 != null && p2 != null) {
+      return p1.toUri().getPath().contains(p2.toString());
+    }
+    return true;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
new file mode 100644
index 0000000..fd2e4fb
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/TestFileDeletionTask.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.mock;
+
+/**
+ * Test the attributes of the {@link FileDeletionTask} class.
+ */
+public class TestFileDeletionTask {
+
+  private static final int ID = 0;
+  private static final String USER = "user";
+  private static final Path SUBDIR = new Path("subdir");
+  private static final Path BASEDIR = new Path("basedir");
+
+  private List<Path> baseDirs = new ArrayList<>();
+  private DeletionService deletionService;
+  private FileDeletionTask deletionTask;
+
+  @Before
+  public void setUp() throws Exception {
+    deletionService = mock(DeletionService.class);
+    baseDirs.add(BASEDIR);
+    deletionTask = new FileDeletionTask(ID, deletionService, USER, SUBDIR,
+        baseDirs);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    baseDirs.clear();
+  }
+
+  @Test
+  public void testGetUser() throws Exception {
+    assertEquals(USER, deletionTask.getUser());
+  }
+
+  @Test
+  public void testGetSubDir() throws Exception {
+    assertEquals(SUBDIR, deletionTask.getSubDir());
+  }
+
+  @Test
+  public void testGetBaseDirs() throws Exception {
+    assertEquals(1, deletionTask.getBaseDirs().size());
+    assertEquals(baseDirs, deletionTask.getBaseDirs());
+  }
+
+  @Test
+  public void testConvertDeletionTaskToProto() throws Exception {
+    DeletionServiceDeleteTaskProto proto =
+        deletionTask.convertDeletionTaskToProto();
+    assertEquals(ID, proto.getId());
+    assertEquals(USER, proto.getUser());
+    assertEquals(SUBDIR, new Path(proto.getSubdir()));
+    assertEquals(BASEDIR, new Path(proto.getBasedirs(0)));
+    assertEquals(1, proto.getBasedirsCount());
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
index 2874acb..6cab593 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
 
 import static org.mockito.Matchers.any;
+import static org.mockito.Mockito.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.mock;
@@ -54,6 +55,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceLocalizedEvent;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizerResourceRequestEvent;
@@ -823,7 +825,8 @@ public class TestLocalResourcesTrackerImpl {
       Path rPath = tracker.getPathForLocalization(req1, base_path,
           delService);
       Assert.assertFalse(lfs.util().exists(rPath));
-      verify(delService, times(1)).delete(eq(user), eq(conflictPath));
+      verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+          delService, user, conflictPath, null)));
     } finally {
       lfs.delete(base_path, true);
       if (dispatcher != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
index 89cbeb4..d863c6a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
@@ -31,7 +31,6 @@ import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
-import static org.mockito.Matchers.isNull;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -68,6 +67,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.executor.LocalizerStartContext;
 import org.junit.Assert;
 import org.apache.commons.io.FileUtils;
@@ -1066,7 +1066,8 @@ public class TestResourceLocalizationService {
       verify(containerBus, times(3)).handle(argThat(matchesContainerLoc));
         
       // Verify deletion of localization token.
-      verify(delService).delete((String)isNull(), eq(localizationTokenPath));
+      verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+          delService, null, localizationTokenPath, null)));
     } finally {
       spyService.stop();
       dispatcher.stop();
@@ -1340,8 +1341,8 @@ public class TestResourceLocalizationService {
       Thread.sleep(50);
     }
     // Verify if downloading resources were submitted for deletion.
-    verify(delService).delete(eq(user), (Path) eq(null),
-        argThat(new DownloadingPathsMatcher(paths)));
+    verify(delService, times(2)).delete(argThat(new FileDeletionMatcher(
+        delService, user, null, new ArrayList<>(paths))));
 
     LocalResourcesTracker tracker = spyService.getLocalResourcesTracker(
         LocalResourceVisibility.PRIVATE, "user0", appId);
@@ -2753,15 +2754,19 @@ public class TestResourceLocalizationService {
       for (int i = 0; i < containerLocalDirs.size(); ++i) {
         if (i == 2) {
           try {
-            verify(delService).delete(user, containerLocalDirs.get(i));
-            verify(delService).delete(null, nmLocalContainerDirs.get(i));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, user, containerLocalDirs.get(i), null)));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, null, nmLocalContainerDirs.get(i), null)));
             Assert.fail("deletion attempts for invalid dirs");
           } catch (Throwable e) {
             continue;
           }
         } else {
-          verify(delService).delete(user, containerLocalDirs.get(i));
-          verify(delService).delete(null, nmLocalContainerDirs.get(i));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, user, containerLocalDirs.get(i), null)));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, null, nmLocalContainerDirs.get(i), null)));
         }
       }
 
@@ -2802,15 +2807,19 @@ public class TestResourceLocalizationService {
       for (int i = 0; i < containerLocalDirs.size(); ++i) {
         if (i == 3) {
           try {
-            verify(delService).delete(user, containerLocalDirs.get(i));
-            verify(delService).delete(null, nmLocalContainerDirs.get(i));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, user, containerLocalDirs.get(i), null)));
+            verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+                delService, null, nmLocalContainerDirs.get(i), null)));
             Assert.fail("deletion attempts for invalid dirs");
           } catch (Throwable e) {
             continue;
           }
         } else {
-          verify(delService).delete(user, appLocalDirs.get(i));
-          verify(delService).delete(null, nmLocalAppDirs.get(i));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, user, containerLocalDirs.get(i), null)));
+          verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+              delService, null, nmLocalContainerDirs.get(i), null)));
         }
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
index 097146b..b4bd9d7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestAppLogAggregatorImpl.java
@@ -42,17 +42,16 @@ import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager;
 import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
-import org.apache.hadoop.yarn.util.ConverterUtils;
 
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
-import org.mockito.Matchers;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 
@@ -311,16 +310,18 @@ public class TestAppLogAggregatorImpl {
         @Override
         public Void answer(InvocationOnMock invocationOnMock) throws Throwable {
           Set<String> paths = new HashSet<>();
-          Object[] args = invocationOnMock.getArguments();
-          for(int i = 2; i < args.length; i++) {
-            Path path = (Path) args[i];
-            paths.add(path.toUri().getRawPath());
+          Object[] tasks = invocationOnMock.getArguments();
+          for(int i = 0; i < tasks.length; i++) {
+            FileDeletionTask task = (FileDeletionTask) tasks[i];
+            for (Path path: task.getBaseDirs()) {
+              paths.add(path.toUri().getRawPath());
+            }
           }
           verifyFilesToDelete(expectedPathsForDeletion, paths);
           return null;
         }
       }).doNothing().when(deletionServiceWithExpectedFiles).delete(
-          any(String.class), any(Path.class), Matchers.<Path>anyVararg());
+          any(FileDeletionTask.class));
 
     return deletionServiceWithExpectedFiles;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index bc1b4b0..37fe77a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -120,6 +120,8 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.TestNonAggregatingLogHandler;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
@@ -218,8 +220,10 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     // ensure filesystems were closed
     verify(logAggregationService).closeFileSystems(
         any(UserGroupInformation.class));
-    verify(delSrvc).delete(eq(user), eq((Path) null),
-      eq(new Path(app1LogDir.getAbsolutePath())));
+    List<Path> dirList = new ArrayList<>();
+    dirList.add(new Path(app1LogDir.toURI()));
+    verify(delSrvc, times(2)).delete(argThat(new FileDeletionMatcher(
+        delSrvc, user, null, dirList)));
     
     String containerIdStr = container11.toString();
     File containerLogDir = new File(app1LogDir, containerIdStr);
@@ -333,7 +337,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     logAggregationService.stop();
     delSrvc.stop();
     // Aggregated logs should not be deleted if not uploaded.
-    verify(delSrvc, times(0)).delete(user, null);
+    FileDeletionTask deletionTask = new FileDeletionTask(delSrvc, user, null,
+        null);
+    verify(delSrvc, times(0)).delete(deletionTask);
   }
 
   @Test
@@ -815,8 +821,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
     assertEquals(0, logAggregationService.getNumAggregators());
     // local log dir shouldn't be deleted given log aggregation cannot
     // continue due to aggregated log dir creation failure on remoteFS.
-    verify(spyDelSrvc, never()).delete(eq(user), any(Path.class),
-        Mockito.<Path>anyVararg());
+    FileDeletionTask deletionTask = new FileDeletionTask(spyDelSrvc, user,
+        null, null);
+    verify(spyDelSrvc, never()).delete(deletionTask);
     verify(logAggregationService).closeFileSystems(
         any(UserGroupInformation.class));
     // make sure local log dir is not deleted in case log aggregation

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
index ec3757e..7a4ea88 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/TestNonAggregatingLogHandler.java
@@ -22,12 +22,14 @@ import static org.junit.Assert.assertFalse;
 
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.argThat;
 import static org.mockito.Matchers.eq;
 import static org.mockito.Matchers.isA;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.never;
 import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import java.io.File;
@@ -36,6 +38,7 @@ import java.io.IOException;
 import java.io.NotSerializableException;
 import java.io.ObjectInputStream;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 import java.util.HashMap;
@@ -66,6 +69,7 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionMatcher;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerContainerFinishedEvent;
@@ -531,8 +535,8 @@ public class TestNonAggregatingLogHandler {
     boolean matched = false;
     while (!matched && System.currentTimeMillis() < verifyStartTime + timeout) {
       try {
-        verify(delService).delete(eq(user), (Path) eq(null),
-          Mockito.argThat(new DeletePathsMatcher(matchPaths)));
+        verify(delService, times(1)).delete(argThat(new FileDeletionMatcher(
+            delService, user, null, Arrays.asList(matchPaths))));
         matched = true;
       } catch (WantedButNotInvoked e) {
         notInvokedException = e;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[32/50] [abbrv] hadoop git commit: HDFS-11928. Segment overflow in FileDistributionCalculator. Contributed by LiXin Ge.

Posted by xy...@apache.org.
HDFS-11928. Segment overflow in FileDistributionCalculator. Contributed by LiXin Ge.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b55a3463
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b55a3463
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b55a3463

Branch: refs/heads/HDFS-7240
Commit: b55a34639d73eabc2978bdcb12231f9ed19e8106
Parents: 14f782b
Author: Arpit Agarwal <ar...@apache.org>
Authored: Mon Jun 5 13:21:22 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../tools/offlineImageViewer/FileDistributionCalculator.java     | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b55a3463/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index 71fb822..25a7bbe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -160,8 +160,8 @@ final class FileDistributionCalculator {
               + StringUtils.byteDesc(((long) (i == 0 ? 0 : i - 1) * steps))
               + ", "
               + StringUtils.byteDesc((long)
-                  (i == distribution.length - 1 ? maxFileSize : i * steps))
-                  + "]\t" + distribution[i]);
+                  (i == distribution.length - 1 ? maxFileSize :
+                      (long) i * steps)) + "]\t" + distribution[i]);
         } else {
           out.print(((long) i * steps) + "\t" + distribution[i]);
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/50] [abbrv] hadoop git commit: HADOOP-14436. Remove the redundant colon in ViewFs.md. Contributed by maobaolong.

Posted by xy...@apache.org.
HADOOP-14436. Remove the redundant colon in ViewFs.md. Contributed by maobaolong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66184428
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66184428
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66184428

Branch: refs/heads/HDFS-7240
Commit: 6618442809094aef2370b859fe80b577297725d8
Parents: 60a7f57
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Fri Jun 2 22:39:10 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66184428/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
index 6e8ce67..3810e28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ViewFs.md
@@ -118,7 +118,7 @@ Hence on Cluster X, where the `core-site.xml` is set to make the default fs to u
 
     * It is an URI for referring a pathname on another cluster such as Cluster Y. In particular, the command for copying files from cluster Y to Cluster Z looks like:
 
-            distcp viewfs://clusterY:/pathSrc viewfs://clusterZ/pathDest
+            distcp viewfs://clusterY/pathSrc viewfs://clusterZ/pathDest
 
 4.  `viewfs://clusterX-webhdfs/foo/bar`
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[49/50] [abbrv] hadoop git commit: HDFS-11953. Remove Guava v21 usage from HDFS-7240. Contributed by Xiaoyu Yao.

Posted by xy...@apache.org.
HDFS-11953. Remove Guava v21 usage from HDFS-7240. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ec6464a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ec6464a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ec6464a

Branch: refs/heads/HDFS-7240
Commit: 2ec6464a1d8c6c9fdb80b0fb2caa30473dd37608
Parents: 25e6378
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Thu Jun 8 10:43:55 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:53 2017 -0700

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/fs/FileUtil.java            | 5 ++---
 .../src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java | 2 +-
 2 files changed, 3 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec6464a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
index 4e0be30..3ab5483 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs;
 
 import com.google.common.base.Preconditions;
-import com.google.common.base.Verify;
 import org.apache.commons.collections.map.CaseInsensitiveMap;
 import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
 import org.apache.commons.compress.archivers.tar.TarArchiveInputStream;
@@ -637,8 +636,8 @@ public class FileUtil {
       }
     }
     // Exit condition -- ZipFile must exist.
-    Verify.verify(archiveName.exists(), "Expected archive file missing: {}",
-        archiveName.toPath());
+    Preconditions.checkState(archiveName.exists(),
+        "Expected archive file missing: {}", archiveName.toPath());
     long crc32 = checksum.getChecksum().getValue();
     checksum.close();
     return crc32;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ec6464a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java
index f38dd7e..e337d12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneClientUtils.java
@@ -382,7 +382,7 @@ public final class OzoneClientUtils {
     if ((value == null) || value.isEmpty()) {
       return Optional.absent();
     }
-    return Optional.of(HostAndPort.fromString(value).getHost());
+    return Optional.of(HostAndPort.fromString(value).getHostText());
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[38/50] [abbrv] hadoop git commit: YARN-6634. [API] Refactor ResourceManager WebServices to make API explicit. (Giovanni Matteo Fumarola via curino)

Posted by xy...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5c15bca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index bd0602b..acfb2b8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -204,18 +204,26 @@ import com.google.inject.Inject;
 import com.google.inject.Singleton;
 
 @Singleton
-@Path("/ws/v1/cluster")
-public class RMWebServices extends WebServices {
+@Path(RMWSConsts.RM_WEB_SERVICE_PATH)
+public class RMWebServices extends WebServices implements RMWebServiceProtocol {
+
   private static final Log LOG =
       LogFactory.getLog(RMWebServices.class.getName());
-  private static final String EMPTY = "";
-  private static final String ANY = "*";
+
   private final ResourceManager rm;
-  private static RecordFactory recordFactory = RecordFactoryProvider
-      .getRecordFactory(null);
+  private static RecordFactory recordFactory =
+      RecordFactoryProvider.getRecordFactory(null);
   private final Configuration conf;
   private @Context HttpServletResponse response;
 
+  // -------Default values of QueryParams for RMWebServiceProtocol--------
+
+  public static final String DEFAULT_QUEUE = "default";
+  public static final String DEFAULT_RESERVATION_ID = "";
+  public static final String DEFAULT_START_TIME = "0";
+  public static final String DEFAULT_END_TIME = "-1";
+  public static final String DEFAULT_INCLUDE_RESOURCE = "false";
+
   @VisibleForTesting
   boolean isCentralizedNodeLabelConfiguration = true;
 
@@ -241,15 +249,15 @@ public class RMWebServices extends WebServices {
     // Check for the authorization.
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     List<String> forwardedAddresses = null;
-    String forwardedFor = hsr.getHeader("X-Forwarded-For");
+    String forwardedFor = hsr.getHeader(RMWSConsts.FORWARDED_FOR);
     if (forwardedFor != null) {
       forwardedAddresses = Arrays.asList(forwardedFor.split(","));
     }
     if (callerUGI != null
         && !(this.rm.getApplicationACLsManager().checkAccess(callerUGI,
-              ApplicationAccessType.VIEW_APP, app.getUser(),
-              app.getApplicationId()) ||
-            this.rm.getQueueACLsManager().checkAccess(callerUGI,
+            ApplicationAccessType.VIEW_APP, app.getUser(),
+            app.getApplicationId())
+            || this.rm.getQueueACLsManager().checkAccess(callerUGI,
                 QueueACL.ADMINISTER_QUEUE, app, hsr.getRemoteAddr(),
                 forwardedAddresses))) {
       return false;
@@ -258,39 +266,43 @@ public class RMWebServices extends WebServices {
   }
 
   private void init() {
-    //clear content type
+    // clear content type
     response.setContentType(null);
   }
 
   @GET
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public ClusterInfo get() {
     return getClusterInfo();
   }
 
   @GET
-  @Path("/info")
+  @Path(RMWSConsts.INFO)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public ClusterInfo getClusterInfo() {
     init();
     return new ClusterInfo(this.rm);
   }
 
   @GET
-  @Path("/metrics")
+  @Path(RMWSConsts.METRICS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public ClusterMetricsInfo getClusterMetricsInfo() {
     init();
     return new ClusterMetricsInfo(this.rm);
   }
 
   @GET
-  @Path("/scheduler")
+  @Path(RMWSConsts.SCHEDULER)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public SchedulerTypeInfo getSchedulerInfo() {
     init();
     ResourceScheduler rs = rm.getResourceScheduler();
@@ -298,8 +310,7 @@ public class RMWebServices extends WebServices {
     if (rs instanceof CapacityScheduler) {
       CapacityScheduler cs = (CapacityScheduler) rs;
       CSQueue root = cs.getRootQueue();
-      sinfo =
-          new CapacitySchedulerInfo(root, cs);
+      sinfo = new CapacitySchedulerInfo(root, cs);
     } else if (rs instanceof FairScheduler) {
       FairScheduler fs = (FairScheduler) rs;
       sinfo = new FairSchedulerInfo(fs);
@@ -312,10 +323,11 @@ public class RMWebServices extends WebServices {
   }
 
   @POST
-  @Path("/scheduler/logs")
+  @Path(RMWSConsts.SCHEDULER_LOGS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public String dumpSchedulerLogs(@FormParam("time") String time,
+  @Override
+  public String dumpSchedulerLogs(@FormParam(RMWSConsts.TIME) String time,
       @Context HttpServletRequest hsr) throws IOException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
@@ -345,52 +357,51 @@ public class RMWebServices extends WebServices {
     return "Capacity scheduler logs are being created.";
   }
 
-  /**
-   * Returns all nodes in the cluster. If the states param is given, returns
-   * all nodes that are in the comma-separated list of states.
-   */
   @GET
-  @Path("/nodes")
+  @Path(RMWSConsts.NODES)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public NodesInfo getNodes(@QueryParam("states") String states) {
+  @Override
+  public NodesInfo getNodes(@QueryParam(RMWSConsts.STATES) String states) {
     init();
     ResourceScheduler sched = this.rm.getResourceScheduler();
     if (sched == null) {
       throw new NotFoundException("Null ResourceScheduler instance");
     }
-    
+
     EnumSet<NodeState> acceptedStates;
     if (states == null) {
       acceptedStates = EnumSet.allOf(NodeState.class);
     } else {
       acceptedStates = EnumSet.noneOf(NodeState.class);
       for (String stateStr : states.split(",")) {
-        acceptedStates.add(
-            NodeState.valueOf(StringUtils.toUpperCase(stateStr)));
+        acceptedStates
+            .add(NodeState.valueOf(StringUtils.toUpperCase(stateStr)));
       }
     }
-    
-    Collection<RMNode> rmNodes = RMServerUtils.queryRMNodes(
-        this.rm.getRMContext(), acceptedStates);
+
+    Collection<RMNode> rmNodes =
+        RMServerUtils.queryRMNodes(this.rm.getRMContext(), acceptedStates);
     NodesInfo nodesInfo = new NodesInfo();
     for (RMNode rmNode : rmNodes) {
       NodeInfo nodeInfo = new NodeInfo(rmNode, sched);
-      if (EnumSet.of(NodeState.LOST, NodeState.DECOMMISSIONED, NodeState.REBOOTED)
+      if (EnumSet
+          .of(NodeState.LOST, NodeState.DECOMMISSIONED, NodeState.REBOOTED)
           .contains(rmNode.getState())) {
-        nodeInfo.setNodeHTTPAddress(EMPTY);
+        nodeInfo.setNodeHTTPAddress(RMWSConsts.EMPTY);
       }
       nodesInfo.add(nodeInfo);
     }
-    
+
     return nodesInfo;
   }
 
   @GET
-  @Path("/nodes/{nodeId}")
+  @Path(RMWSConsts.NODES_NODEID)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
+  @Override
+  public NodeInfo getNode(@PathParam(RMWSConsts.NODEID) String nodeId) {
     init();
     if (nodeId == null || nodeId.isEmpty()) {
       throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
@@ -411,28 +422,29 @@ public class RMWebServices extends WebServices {
     }
     NodeInfo nodeInfo = new NodeInfo(ni, sched);
     if (isInactive) {
-      nodeInfo.setNodeHTTPAddress(EMPTY);
+      nodeInfo.setNodeHTTPAddress(RMWSConsts.EMPTY);
     }
     return nodeInfo;
   }
 
   @GET
-  @Path("/apps")
+  @Path(RMWSConsts.APPS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppsInfo getApps(@Context HttpServletRequest hsr,
-      @QueryParam("state") String stateQuery,
-      @QueryParam("states") Set<String> statesQuery,
-      @QueryParam("finalStatus") String finalStatusQuery,
-      @QueryParam("user") String userQuery,
-      @QueryParam("queue") String queueQuery,
-      @QueryParam("limit") String count,
-      @QueryParam("startedTimeBegin") String startedBegin,
-      @QueryParam("startedTimeEnd") String startedEnd,
-      @QueryParam("finishedTimeBegin") String finishBegin,
-      @QueryParam("finishedTimeEnd") String finishEnd,
-      @QueryParam("applicationTypes") Set<String> applicationTypes,
-      @QueryParam("applicationTags") Set<String> applicationTags) {
+      @QueryParam(RMWSConsts.STATE) String stateQuery,
+      @QueryParam(RMWSConsts.STATES) Set<String> statesQuery,
+      @QueryParam(RMWSConsts.FINAL_STATUS) String finalStatusQuery,
+      @QueryParam(RMWSConsts.USER) String userQuery,
+      @QueryParam(RMWSConsts.QUEUE) String queueQuery,
+      @QueryParam(RMWSConsts.LIMIT) String count,
+      @QueryParam(RMWSConsts.STARTED_TIME_BEGIN) String startedBegin,
+      @QueryParam(RMWSConsts.STARTED_TIME_END) String startedEnd,
+      @QueryParam(RMWSConsts.FINISHED_TIME_BEGIN) String finishBegin,
+      @QueryParam(RMWSConsts.FINISHED_TIME_END) String finishEnd,
+      @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> applicationTypes,
+      @QueryParam(RMWSConsts.APPLICATION_TAGS) Set<String> applicationTags) {
     boolean checkCount = false;
     boolean checkStart = false;
     boolean checkEnd = false;
@@ -460,7 +472,8 @@ public class RMWebServices extends WebServices {
       checkStart = true;
       sBegin = Long.parseLong(startedBegin);
       if (sBegin < 0) {
-        throw new BadRequestException("startedTimeBegin must be greater than 0");
+        throw new BadRequestException(
+            "startedTimeBegin must be greater than 0");
       }
     }
     if (startedEnd != null && !startedEnd.isEmpty()) {
@@ -563,8 +576,8 @@ public class RMWebServices extends WebServices {
 
     List<ApplicationReport> appReports = null;
     try {
-      appReports = rm.getClientRMService()
-          .getApplications(request, false).getApplicationList();
+      appReports = rm.getClientRMService().getApplications(request, false)
+          .getApplicationList();
     } catch (YarnException e) {
       LOG.error("Unable to retrieve apps from ClientRMService", e);
       throw new YarnRuntimeException(
@@ -588,19 +601,20 @@ public class RMWebServices extends WebServices {
         }
       }
 
-      AppInfo app = new AppInfo(rm, rmapp,
-          hasAccess(rmapp, hsr), WebAppUtils.getHttpSchemePrefix(conf));
+      AppInfo app = new AppInfo(rm, rmapp, hasAccess(rmapp, hsr),
+          WebAppUtils.getHttpSchemePrefix(conf));
       allApps.add(app);
     }
     return allApps;
   }
 
   @GET
-  @Path("/scheduler/activities")
+  @Path(RMWSConsts.SCHEDULER_ACTIVITIES)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public ActivitiesInfo getActivities(@Context HttpServletRequest hsr,
-      @QueryParam("nodeId") String nodeId) {
+      @QueryParam(RMWSConsts.NODEID) String nodeId) {
     YarnScheduler scheduler = rm.getRMContext().getScheduler();
 
     if (scheduler instanceof AbstractYarnScheduler) {
@@ -636,10 +650,12 @@ public class RMWebServices extends WebServices {
 
           boolean correctNodeId = false;
           for (FiCaSchedulerNode node : nodeList) {
-            if ((portName.equals("") && node.getRMNode().getHostName().equals(
-                hostName)) || (!portName.equals("") && node.getRMNode()
-                .getHostName().equals(hostName) && String.valueOf(
-                node.getRMNode().getCommandPort()).equals(portName))) {
+            if ((portName.equals("")
+                && node.getRMNode().getHostName().equals(hostName))
+                || (!portName.equals("")
+                    && node.getRMNode().getHostName().equals(hostName)
+                    && String.valueOf(node.getRMNode().getCommandPort())
+                        .equals(portName))) {
               correctNodeId = true;
               nodeId = node.getNodeID().toString();
               break;
@@ -665,11 +681,13 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/scheduler/app-activities")
+  @Path(RMWSConsts.SCHEDULER_APP_ACTIVITIES)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppActivitiesInfo getAppActivities(@Context HttpServletRequest hsr,
-      @QueryParam("appId") String appId, @QueryParam("maxTime") String time) {
+      @QueryParam(RMWSConsts.APP_ID) String appId,
+      @QueryParam(RMWSConsts.MAX_TIME) String time) {
     YarnScheduler scheduler = rm.getRMContext().getScheduler();
 
     if (scheduler instanceof AbstractYarnScheduler) {
@@ -683,7 +701,7 @@ public class RMWebServices extends WebServices {
         return new AppActivitiesInfo(errMessage, appId);
       }
 
-      if(appId == null) {
+      if (appId == null) {
         String errMessage = "Must provide an application Id";
         return new AppActivitiesInfo(errMessage, null);
       }
@@ -716,13 +734,14 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/appstatistics")
+  @Path(RMWSConsts.APP_STATISTICS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public ApplicationStatisticsInfo getAppStatistics(
       @Context HttpServletRequest hsr,
-      @QueryParam("states") Set<String> stateQueries,
-      @QueryParam("applicationTypes") Set<String> typeQueries) {
+      @QueryParam(RMWSConsts.STATES) Set<String> stateQueries,
+      @QueryParam(RMWSConsts.APPLICATION_TYPES) Set<String> typeQueries) {
     init();
 
     // parse the params and build the scoreboard
@@ -731,7 +750,7 @@ public class RMWebServices extends WebServices {
     Set<String> types = parseQueries(typeQueries, false);
     // if no types, counts the applications of any types
     if (types.size() == 0) {
-      types.add(ANY);
+      types.add(RMWSConsts.ANY);
     } else if (types.size() != 1) {
       throw new BadRequestException("# of applicationTypes = " + types.size()
           + ", we temporarily support at most one applicationType");
@@ -752,10 +771,9 @@ public class RMWebServices extends WebServices {
     for (RMApp rmapp : apps.values()) {
       YarnApplicationState state = rmapp.createApplicationState();
       String type = StringUtils.toLowerCase(rmapp.getApplicationType().trim());
-      if (states.contains(
-          StringUtils.toLowerCase(state.toString()))) {
-        if (types.contains(ANY)) {
-          countApp(scoreboard, state, ANY);
+      if (states.contains(StringUtils.toLowerCase(state.toString()))) {
+        if (types.contains(RMWSConsts.ANY)) {
+          countApp(scoreboard, state, RMWSConsts.ANY);
         } else if (types.contains(type)) {
           countApp(scoreboard, state, type);
         }
@@ -764,10 +782,10 @@ public class RMWebServices extends WebServices {
 
     // fill the response object
     ApplicationStatisticsInfo appStatInfo = new ApplicationStatisticsInfo();
-    for (Map.Entry<YarnApplicationState, Map<String, Long>> partScoreboard
-        : scoreboard.entrySet()) {
-      for (Map.Entry<String, Long> statEntry
-          : partScoreboard.getValue().entrySet()) {
+    for (Map.Entry<YarnApplicationState, Map<String, Long>> partScoreboard : scoreboard
+        .entrySet()) {
+      for (Map.Entry<String, Long> statEntry : partScoreboard.getValue()
+          .entrySet()) {
         StatisticsItemInfo statItem = new StatisticsItemInfo(
             partScoreboard.getKey(), statEntry.getKey(), statEntry.getValue());
         appStatInfo.add(statItem);
@@ -777,9 +795,9 @@ public class RMWebServices extends WebServices {
   }
 
   private static Map<YarnApplicationState, Map<String, Long>> buildScoreboard(
-     Set<String> states, Set<String> types) {
-    Map<YarnApplicationState, Map<String, Long>> scoreboard
-        = new HashMap<YarnApplicationState, Map<String, Long>>();
+      Set<String> states, Set<String> types) {
+    Map<YarnApplicationState, Map<String, Long>> scoreboard =
+        new HashMap<YarnApplicationState, Map<String, Long>>();
     // default states will result in enumerating all YarnApplicationStates
     assert !states.isEmpty();
     for (String state : states) {
@@ -804,11 +822,12 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}")
+  @Path(RMWSConsts.APPS_APPID)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppInfo getApp(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) {
+      @PathParam(RMWSConsts.APPID) String appId) {
     init();
     ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
     RMApp app = rm.getRMContext().getRMApps().get(id);
@@ -819,11 +838,12 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}/appattempts")
+  @Path(RMWSConsts.APPS_APPID_APPATTEMPTS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppAttemptsInfo getAppAttempts(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) {
+      @PathParam(RMWSConsts.APPID) String appId) {
 
     init();
     ApplicationId id = WebAppUtils.parseApplicationId(recordFactory, appId);
@@ -843,25 +863,27 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}/appattempts/{appattemptid}")
+  @Path(RMWSConsts.APPS_APPID_APPATTEMPTS_APPATTEMPTID)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
-  public org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo getAppAttempt(@Context HttpServletRequest req,
-      @Context HttpServletResponse res, @PathParam("appid") String appId,
-      @PathParam("appattemptid") String appAttemptId) {
+  public org.apache.hadoop.yarn.server.webapp.dao.AppAttemptInfo getAppAttempt(
+      @Context HttpServletRequest req, @Context HttpServletResponse res,
+      @PathParam(RMWSConsts.APPID) String appId,
+      @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
     init(res);
     return super.getAppAttempt(req, res, appId, appAttemptId);
   }
 
   @GET
-  @Path("/apps/{appid}/appattempts/{appattemptid}/containers")
+  @Path(RMWSConsts.APPS_APPID_APPATTEMPTS_APPATTEMPTID_CONTAINERS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public ContainersInfo getContainers(@Context HttpServletRequest req,
-      @Context HttpServletResponse res, @PathParam("appid") String appId,
-      @PathParam("appattemptid") String appAttemptId) {
+      @Context HttpServletResponse res,
+      @PathParam(RMWSConsts.APPID) String appId,
+      @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId) {
     init(res);
     return super.getContainers(req, res, appId, appAttemptId);
   }
@@ -872,8 +894,9 @@ public class RMWebServices extends WebServices {
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Override
   public ContainerInfo getContainer(@Context HttpServletRequest req,
-      @Context HttpServletResponse res, @PathParam("appid") String appId,
-      @PathParam("appattemptid") String appAttemptId,
+      @Context HttpServletResponse res,
+      @PathParam(RMWSConsts.APPID) String appId,
+      @PathParam(RMWSConsts.APPATTEMPTID) String appAttemptId,
       @PathParam("containerid") String containerId) {
     init(res);
     return super.getContainer(req, res, appId, appAttemptId, containerId);
@@ -883,8 +906,9 @@ public class RMWebServices extends WebServices {
   @Path("/apps/{appid}/state")
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppState getAppState(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) throws AuthorizationException {
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "";
@@ -896,8 +920,8 @@ public class RMWebServices extends WebServices {
       app = getRMAppForAppId(appId);
     } catch (NotFoundException e) {
       RMAuditLogger.logFailure(userName, AuditConstants.GET_APP_STATE,
-        "UNKNOWN", "RMWebService",
-        "Trying to get state of an absent application " + appId);
+          "UNKNOWN", "RMWebService",
+          "Trying to get state of an absent application " + appId);
       throw e;
     }
 
@@ -912,14 +936,15 @@ public class RMWebServices extends WebServices {
   // to 202
 
   @PUT
-  @Path("/apps/{appid}/state")
+  @Path(RMWSConsts.APPS_APPID_STATE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response updateAppState(AppState targetState,
-      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
-      throws AuthorizationException, YarnException, InterruptedException,
-      IOException {
+      @Context HttpServletRequest hsr,
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
@@ -939,8 +964,8 @@ public class RMWebServices extends WebServices {
       app = getRMAppForAppId(appId);
     } catch (NotFoundException e) {
       RMAuditLogger.logFailure(userName, AuditConstants.KILL_APP_REQUEST,
-        "UNKNOWN", "RMWebService", "Trying to kill an absent application "
-            + appId);
+          "UNKNOWN", "RMWebService",
+          "Trying to kill an absent application " + appId);
       throw e;
     }
 
@@ -948,12 +973,13 @@ public class RMWebServices extends WebServices {
       // user is attempting to change state. right we only
       // allow users to kill the app
 
-      if (targetState.getState().equals(YarnApplicationState.KILLED.toString())) {
+      if (targetState.getState()
+          .equals(YarnApplicationState.KILLED.toString())) {
         return killApp(app, callerUGI, hsr, targetState.getDiagnostics());
       }
-      throw new BadRequestException("Only '"
-          + YarnApplicationState.KILLED.toString()
-          + "' is allowed as a target state.");
+      throw new BadRequestException(
+          "Only '" + YarnApplicationState.KILLED.toString()
+              + "' is allowed as a target state.");
     }
 
     AppState ret = new AppState();
@@ -961,19 +987,20 @@ public class RMWebServices extends WebServices {
 
     return Response.status(Status.OK).entity(ret).build();
   }
-  
+
   @GET
-  @Path("/get-node-to-labels")
+  @Path(RMWSConsts.GET_NODE_TO_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public NodeToLabelsInfo getNodeToLabels(@Context HttpServletRequest hsr)
       throws IOException {
     init();
 
     NodeToLabelsInfo ntl = new NodeToLabelsInfo();
     HashMap<String, NodeLabelsInfo> ntlMap = ntl.getNodeToLabels();
-    Map<NodeId, Set<NodeLabel>> nodeIdToLabels = rm.getRMContext()
-        .getNodeLabelManager().getNodeLabelsInfo();
+    Map<NodeId, Set<NodeLabel>> nodeIdToLabels =
+        rm.getRMContext().getNodeLabelManager().getNodeLabelsInfo();
 
     for (Map.Entry<NodeId, Set<NodeLabel>> nitle : nodeIdToLabels.entrySet()) {
       List<NodeLabel> labels = new ArrayList<NodeLabel>(nitle.getValue());
@@ -984,11 +1011,12 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/label-mappings")
+  @Path(RMWSConsts.LABEL_MAPPINGS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public LabelsToNodesInfo getLabelsToNodes(
-      @QueryParam("labels") Set<String> labels) throws IOException {
+      @QueryParam(RMWSConsts.LABELS) Set<String> labels) throws IOException {
     init();
 
     LabelsToNodesInfo lts = new LabelsToNodesInfo();
@@ -1007,17 +1035,19 @@ public class RMWebServices extends WebServices {
       for (NodeId nodeId : entry.getValue()) {
         nodeIdStrList.add(nodeId.toString());
       }
-      ltsMap.put(new NodeLabelInfo(entry.getKey()), new NodeIDsInfo(
-          nodeIdStrList));
+      ltsMap.put(new NodeLabelInfo(entry.getKey()),
+          new NodeIDsInfo(nodeIdStrList));
     }
     return lts;
   }
 
   @POST
-  @Path("/replace-node-to-labels")
+  @Path(RMWSConsts.REPLACE_NODE_TO_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public Response replaceLabelsOnNodes(final NodeToLabelsEntryList newNodeToLabels,
+  @Override
+  public Response replaceLabelsOnNodes(
+      final NodeToLabelsEntryList newNodeToLabels,
       @Context HttpServletRequest hsr) throws IOException {
     Map<NodeId, Set<String>> nodeIdToLabels =
         new HashMap<NodeId, Set<String>>();
@@ -1032,9 +1062,10 @@ public class RMWebServices extends WebServices {
   }
 
   @POST
-  @Path("/nodes/{nodeId}/replace-labels")
+  @Path(RMWSConsts.NODES_NODEID_REPLACE_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response replaceLabelsOnNode(
       @QueryParam("labels") Set<String> newNodeLabelsName,
       @Context HttpServletRequest hsr, @PathParam("nodeId") String nodeId)
@@ -1053,21 +1084,19 @@ public class RMWebServices extends WebServices {
       String operation) throws IOException {
     init();
 
-    NodeLabelsUtils.verifyCentralizedNodeLabelConfEnabled(
-        "replaceLabelsOnNode", isCentralizedNodeLabelConfiguration);
+    NodeLabelsUtils.verifyCentralizedNodeLabelConfEnabled("replaceLabelsOnNode",
+        isCentralizedNodeLabelConfiguration);
 
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      String msg =
-          "Unable to obtain user name, user not authenticated for"
-              + " post to ..." + operation;
+      String msg = "Unable to obtain user name, user not authenticated for"
+          + " post to ..." + operation;
       throw new AuthorizationException(msg);
     }
 
     if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
-      String msg =
-          "User " + callerUGI.getShortUserName() + " not authorized"
-              + " for post to ..." + operation;
+      String msg = "User " + callerUGI.getShortUserName() + " not authorized"
+          + " for post to ..." + operation;
       throw new AuthorizationException(msg);
     }
     try {
@@ -1081,58 +1110,60 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/get-node-labels")
+  @Path(RMWSConsts.GET_NODE_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
-  public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr) 
-    throws IOException {
+  @Override
+  public NodeLabelsInfo getClusterNodeLabels(@Context HttpServletRequest hsr)
+      throws IOException {
     init();
 
-    List<NodeLabel> nodeLabels = rm.getRMContext().getNodeLabelManager()
-        .getClusterNodeLabels();
+    List<NodeLabel> nodeLabels =
+        rm.getRMContext().getNodeLabelManager().getClusterNodeLabels();
     NodeLabelsInfo ret = new NodeLabelsInfo(nodeLabels);
 
     return ret;
   }
-  
+
   @POST
-  @Path("/add-node-labels")
+  @Path(RMWSConsts.ADD_NODE_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response addToClusterNodeLabels(final NodeLabelsInfo newNodeLabels,
-      @Context HttpServletRequest hsr)
-      throws Exception {
+      @Context HttpServletRequest hsr) throws Exception {
     init();
-    
+
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
       String msg = "Unable to obtain user name, user not authenticated for"
-        + " post to .../add-node-labels";
+          + " post to .../add-node-labels";
       throw new AuthorizationException(msg);
     }
     if (!rm.getRMContext().getNodeLabelManager().checkAccess(callerUGI)) {
       String msg = "User " + callerUGI.getShortUserName() + " not authorized"
-        + " for post to .../add-node-labels ";
+          + " for post to .../add-node-labels ";
       throw new AuthorizationException(msg);
     }
-    
+
     try {
       rm.getRMContext().getNodeLabelManager()
           .addToCluserNodeLabels(newNodeLabels.getNodeLabels());
     } catch (IOException e) {
       throw new BadRequestException(e);
     }
-            
+
     return Response.status(Status.OK).build();
 
   }
-  
+
   @POST
-  @Path("/remove-node-labels")
+  @Path(RMWSConsts.REMOVE_NODE_LABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response removeFromCluserNodeLabels(
-      @QueryParam("labels") Set<String> oldNodeLabels,
+      @QueryParam(RMWSConsts.LABELS) Set<String> oldNodeLabels,
       @Context HttpServletRequest hsr) throws Exception {
     init();
 
@@ -1157,18 +1188,19 @@ public class RMWebServices extends WebServices {
 
     return Response.status(Status.OK).build();
   }
-  
+
   @GET
-  @Path("/nodes/{nodeId}/get-labels")
+  @Path(RMWSConsts.NODES_NODEID_GETLABELS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public NodeLabelsInfo getLabelsOnNode(@Context HttpServletRequest hsr,
-      @PathParam("nodeId") String nodeId) throws IOException {
+      @PathParam(RMWSConsts.NODEID) String nodeId) throws IOException {
     init();
 
     NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
-    List<NodeLabel> labels = new ArrayList<NodeLabel>(rm.getRMContext()
-        .getNodeLabelManager().getLabelsInfoByNode(nid));
+    List<NodeLabel> labels = new ArrayList<NodeLabel>(
+        rm.getRMContext().getNodeLabelManager().getLabelsInfoByNode(nid));
     return new NodeLabelsInfo(labels);
   }
 
@@ -1183,20 +1215,19 @@ public class RMWebServices extends WebServices {
     final ApplicationId appid = app.getApplicationId();
     KillApplicationResponse resp = null;
     try {
-      resp =
-          callerUGI
-            .doAs(new PrivilegedExceptionAction<KillApplicationResponse>() {
-              @Override
-              public KillApplicationResponse run() throws IOException,
-                  YarnException {
-                KillApplicationRequest req =
-                    KillApplicationRequest.newInstance(appid);
-                  if (diagnostic != null) {
-                    req.setDiagnostics(diagnostic);
-                  }
-                return rm.getClientRMService().forceKillApplication(req);
+      resp = callerUGI
+          .doAs(new PrivilegedExceptionAction<KillApplicationResponse>() {
+            @Override
+            public KillApplicationResponse run()
+                throws IOException, YarnException {
+              KillApplicationRequest req =
+                  KillApplicationRequest.newInstance(appid);
+              if (diagnostic != null) {
+                req.setDiagnostics(diagnostic);
               }
-            });
+              return rm.getClientRMService().forceKillApplication(req);
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       // if the root cause is a permissions issue
       // bubble that up to the user
@@ -1204,9 +1235,8 @@ public class RMWebServices extends WebServices {
         YarnException ye = (YarnException) ue.getCause();
         if (ye.getCause() instanceof AccessControlException) {
           String appId = app.getApplicationId().toString();
-          String msg =
-              "Unauthorized attempt to kill appid " + appId
-                  + " by remote user " + userName;
+          String msg = "Unauthorized attempt to kill appid " + appId
+              + " by remote user " + userName;
           return Response.status(Status.FORBIDDEN).entity(msg).build();
         } else {
           throw ue;
@@ -1221,20 +1251,21 @@ public class RMWebServices extends WebServices {
 
     if (resp.getIsKillCompleted()) {
       RMAuditLogger.logSuccess(userName, AuditConstants.KILL_APP_REQUEST,
-        "RMWebService", app.getApplicationId());
+          "RMWebService", app.getApplicationId());
     } else {
       return Response.status(Status.ACCEPTED).entity(ret)
-        .header(HttpHeaders.LOCATION, hsr.getRequestURL()).build();
+          .header(HttpHeaders.LOCATION, hsr.getRequestURL()).build();
     }
     return Response.status(Status.OK).entity(ret).build();
   }
 
   @GET
-  @Path("/apps/{appid}/priority")
+  @Path(RMWSConsts.APPS_APPID_PRIORITY)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppPriority getAppPriority(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) throws AuthorizationException {
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "UNKNOWN-USER";
@@ -1252,21 +1283,21 @@ public class RMWebServices extends WebServices {
     }
 
     AppPriority ret = new AppPriority();
-    ret.setPriority(
-        app.getApplicationPriority().getPriority());
+    ret.setPriority(app.getApplicationPriority().getPriority());
 
     return ret;
   }
 
   @PUT
-  @Path("/apps/{appid}/priority")
+  @Path(RMWSConsts.APPS_APPID_PRIORITY)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response updateApplicationPriority(AppPriority targetPriority,
-      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
-      throws AuthorizationException, YarnException, InterruptedException,
-          IOException {
+      @Context HttpServletRequest hsr,
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException {
     init();
     if (targetPriority == null) {
       throw new YarnException("Target Priority cannot be null");
@@ -1305,7 +1336,7 @@ public class RMWebServices extends WebServices {
 
   private Response modifyApplicationPriority(final RMApp app,
       UserGroupInformation callerUGI, final int appPriority)
-          throws IOException, InterruptedException {
+      throws IOException, InterruptedException {
     String userName = callerUGI.getUserName();
     try {
       callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
@@ -1340,17 +1371,18 @@ public class RMWebServices extends WebServices {
         throw ue;
       }
     }
-    AppPriority ret = new AppPriority(
-        app.getApplicationPriority().getPriority());
+    AppPriority ret =
+        new AppPriority(app.getApplicationPriority().getPriority());
     return Response.status(Status.OK).entity(ret).build();
   }
 
   @GET
-  @Path("/apps/{appid}/queue")
+  @Path(RMWSConsts.APPS_APPID_QUEUE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppQueue getAppQueue(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) throws AuthorizationException {
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     String userName = "UNKNOWN-USER";
@@ -1362,8 +1394,8 @@ public class RMWebServices extends WebServices {
       app = getRMAppForAppId(appId);
     } catch (NotFoundException e) {
       RMAuditLogger.logFailure(userName, AuditConstants.GET_APP_QUEUE,
-        "UNKNOWN", "RMWebService",
-        "Trying to get queue of an absent application " + appId);
+          "UNKNOWN", "RMWebService",
+          "Trying to get queue of an absent application " + appId);
       throw e;
     }
 
@@ -1374,14 +1406,15 @@ public class RMWebServices extends WebServices {
   }
 
   @PUT
-  @Path("/apps/{appid}/queue")
+  @Path(RMWSConsts.APPS_APPID_QUEUE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response updateAppQueue(AppQueue targetQueue,
-      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
-      throws AuthorizationException, YarnException, InterruptedException,
-      IOException {
+      @Context HttpServletRequest hsr,
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
@@ -1401,8 +1434,8 @@ public class RMWebServices extends WebServices {
       app = getRMAppForAppId(appId);
     } catch (NotFoundException e) {
       RMAuditLogger.logFailure(userName, AuditConstants.MOVE_APP_REQUEST,
-        "UNKNOWN", "RMWebService", "Trying to move an absent application "
-            + appId);
+          "UNKNOWN", "RMWebService",
+          "Trying to move an absent application " + appId);
       throw e;
     }
 
@@ -1427,18 +1460,16 @@ public class RMWebServices extends WebServices {
     final ApplicationId appid = app.getApplicationId();
     final String reqTargetQueue = targetQueue;
     try {
-      callerUGI
-        .doAs(new PrivilegedExceptionAction<Void>() {
-          @Override
-          public Void run() throws IOException,
-              YarnException {
-            MoveApplicationAcrossQueuesRequest req =
-                MoveApplicationAcrossQueuesRequest.newInstance(appid,
+      callerUGI.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
+        public Void run() throws IOException, YarnException {
+          MoveApplicationAcrossQueuesRequest req =
+              MoveApplicationAcrossQueuesRequest.newInstance(appid,
                   reqTargetQueue);
-            rm.getClientRMService().moveApplicationAcrossQueues(req);
-            return null;
-          }
-        });
+          rm.getClientRMService().moveApplicationAcrossQueues(req);
+          return null;
+        }
+      });
     } catch (UndeclaredThrowableException ue) {
       // if the root cause is a permissions issue
       // bubble that up to the user
@@ -1446,14 +1477,13 @@ public class RMWebServices extends WebServices {
         YarnException ye = (YarnException) ue.getCause();
         if (ye.getCause() instanceof AccessControlException) {
           String appId = app.getApplicationId().toString();
-          String msg =
-              "Unauthorized attempt to move appid " + appId
-                  + " by remote user " + userName;
+          String msg = "Unauthorized attempt to move appid " + appId
+              + " by remote user " + userName;
           return Response.status(Status.FORBIDDEN).entity(msg).build();
         } else if (ye.getMessage().startsWith("App in")
             && ye.getMessage().endsWith("state cannot be moved.")) {
           return Response.status(Status.BAD_REQUEST).entity(ye.getMessage())
-            .build();
+              .build();
         } else {
           throw ue;
         }
@@ -1496,32 +1526,22 @@ public class RMWebServices extends WebServices {
   private boolean isStaticUser(UserGroupInformation callerUGI) {
     String staticUser =
         conf.get(CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER,
-          CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER);
+            CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER);
     return staticUser.equals(callerUGI.getUserName());
   }
 
-  /**
-   * Generates a new ApplicationId which is then sent to the client
-   * 
-   * @param hsr
-   *          the servlet request
-   * @return Response containing the app id and the maximum resource
-   *         capabilities
-   * @throws AuthorizationException
-   * @throws IOException
-   * @throws InterruptedException
-   */
   @POST
-  @Path("/apps/new-application")
+  @Path(RMWSConsts.APPS_NEW_APPLICATION)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response createNewApplication(@Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-          + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -1536,33 +1556,21 @@ public class RMWebServices extends WebServices {
   // reuse the code in ClientRMService to create new app
   // get the new app id and submit app
   // set location header with new app location
-  /**
-   * Function to submit an app to the RM
-   * 
-   * @param newApp
-   *          structure containing information to construct the
-   *          ApplicationSubmissionContext
-   * @param hsr
-   *          the servlet request
-   * @return Response containing the status code
-   * @throws AuthorizationException
-   * @throws IOException
-   * @throws InterruptedException
-   */
   @POST
-  @Path("/apps")
+  @Path(RMWSConsts.APPS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response submitApplication(ApplicationSubmissionContextInfo newApp,
-      @Context HttpServletRequest hsr) throws AuthorizationException,
-      IOException, InterruptedException {
+      @Context HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-          + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
 
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
@@ -1578,13 +1586,13 @@ public class RMWebServices extends WebServices {
 
     try {
       callerUGI
-        .doAs(new PrivilegedExceptionAction<SubmitApplicationResponse>() {
-          @Override
-          public SubmitApplicationResponse run() throws IOException,
-              YarnException {
-            return rm.getClientRMService().submitApplication(req);
-          }
-        });
+          .doAs(new PrivilegedExceptionAction<SubmitApplicationResponse>() {
+            @Override
+            public SubmitApplicationResponse run()
+                throws IOException, YarnException {
+              return rm.getClientRMService().submitApplication(req);
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
         throw new BadRequestException(ue.getCause().getMessage());
@@ -1595,7 +1603,7 @@ public class RMWebServices extends WebServices {
 
     String url = hsr.getRequestURL() + "/" + newApp.getApplicationId();
     return Response.status(Status.ACCEPTED).header(HttpHeaders.LOCATION, url)
-      .build();
+        .build();
   }
 
   /**
@@ -1618,7 +1626,7 @@ public class RMWebServices extends WebServices {
     }
     NewApplication appId =
         new NewApplication(resp.getApplicationId().toString(),
-          new ResourceInfo(resp.getMaximumResourceCapability()));
+            new ResourceInfo(resp.getMaximumResourceCapability()));
     return appId;
   }
 
@@ -1626,8 +1634,7 @@ public class RMWebServices extends WebServices {
    * Create the actual ApplicationSubmissionContext to be submitted to the RM
    * from the information provided by the user.
    * 
-   * @param newApp
-   *          the information provided by the user
+   * @param newApp the information provided by the user
    * @return returns the constructed ApplicationSubmissionContext
    * @throws IOException
    */
@@ -1644,28 +1651,27 @@ public class RMWebServices extends WebServices {
     } catch (Exception e) {
       throw new BadRequestException(error);
     }
-    ApplicationSubmissionContext appContext =
-        ApplicationSubmissionContext.newInstance(appid,
-          newApp.getApplicationName(), newApp.getQueue(),
-          Priority.newInstance(newApp.getPriority()),
-          createContainerLaunchContext(newApp), newApp.getUnmanagedAM(),
-          newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(),
-          createAppSubmissionContextResource(newApp),
-          newApp.getApplicationType(),
-          newApp.getKeepContainersAcrossApplicationAttempts(),
-          newApp.getAppNodeLabelExpression(),
-          newApp.getAMContainerNodeLabelExpression());
+    ApplicationSubmissionContext appContext = ApplicationSubmissionContext
+        .newInstance(appid, newApp.getApplicationName(), newApp.getQueue(),
+            Priority.newInstance(newApp.getPriority()),
+            createContainerLaunchContext(newApp), newApp.getUnmanagedAM(),
+            newApp.getCancelTokensWhenComplete(), newApp.getMaxAppAttempts(),
+            createAppSubmissionContextResource(newApp),
+            newApp.getApplicationType(),
+            newApp.getKeepContainersAcrossApplicationAttempts(),
+            newApp.getAppNodeLabelExpression(),
+            newApp.getAMContainerNodeLabelExpression());
     appContext.setApplicationTags(newApp.getApplicationTags());
     appContext.setAttemptFailuresValidityInterval(
         newApp.getAttemptFailuresValidityInterval());
     if (newApp.getLogAggregationContextInfo() != null) {
-      appContext.setLogAggregationContext(createLogAggregationContext(
-          newApp.getLogAggregationContextInfo()));
+      appContext.setLogAggregationContext(
+          createLogAggregationContext(newApp.getLogAggregationContextInfo()));
     }
     String reservationIdStr = newApp.getReservationId();
     if (reservationIdStr != null && !reservationIdStr.isEmpty()) {
-      ReservationId reservationId = ReservationId.parseReservationId(
-          reservationIdStr);
+      ReservationId reservationId =
+          ReservationId.parseReservationId(reservationIdStr);
       appContext.setReservationID(reservationId);
     }
     return appContext;
@@ -1674,20 +1680,19 @@ public class RMWebServices extends WebServices {
   protected Resource createAppSubmissionContextResource(
       ApplicationSubmissionContextInfo newApp) throws BadRequestException {
     if (newApp.getResource().getvCores() > rm.getConfig().getInt(
-      YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
-      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) {
+        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES)) {
       String msg = "Requested more cores than configured max";
       throw new BadRequestException(msg);
     }
     if (newApp.getResource().getMemorySize() > rm.getConfig().getInt(
-      YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
-      YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) {
+        YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB,
+        YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) {
       String msg = "Requested more memory than configured max";
       throw new BadRequestException(msg);
     }
-    Resource r =
-        Resource.newInstance(newApp.getResource().getMemorySize(), newApp
-          .getResource().getvCores());
+    Resource r = Resource.newInstance(newApp.getResource().getMemorySize(),
+        newApp.getResource().getvCores());
     return r;
   }
 
@@ -1696,21 +1701,20 @@ public class RMWebServices extends WebServices {
    * ApplicationSubmissionContext. This function takes the user information and
    * generates the ByteBuffer structures required by the ContainerLaunchContext
    * 
-   * @param newApp
-   *          the information provided by the user
+   * @param newApp the information provided by the user
    * @return created context
    * @throws BadRequestException
    * @throws IOException
    */
   protected ContainerLaunchContext createContainerLaunchContext(
-      ApplicationSubmissionContextInfo newApp) throws BadRequestException,
-      IOException {
+      ApplicationSubmissionContextInfo newApp)
+      throws BadRequestException, IOException {
 
     // create container launch context
 
     HashMap<String, ByteBuffer> hmap = new HashMap<String, ByteBuffer>();
     for (Map.Entry<String, String> entry : newApp
-      .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
+        .getContainerLaunchContextInfo().getAuxillaryServiceData().entrySet()) {
       if (entry.getValue().isEmpty() == false) {
         Base64 decoder = new Base64(0, null, true);
         byte[] data = decoder.decode(entry.getValue());
@@ -1720,27 +1724,23 @@ public class RMWebServices extends WebServices {
 
     HashMap<String, LocalResource> hlr = new HashMap<String, LocalResource>();
     for (Map.Entry<String, LocalResourceInfo> entry : newApp
-      .getContainerLaunchContextInfo().getResources().entrySet()) {
+        .getContainerLaunchContextInfo().getResources().entrySet()) {
       LocalResourceInfo l = entry.getValue();
-      LocalResource lr =
-          LocalResource.newInstance(
-              URL.fromURI(l.getUrl()), l.getType(),
-            l.getVisibility(), l.getSize(), l.getTimestamp());
+      LocalResource lr = LocalResource.newInstance(URL.fromURI(l.getUrl()),
+          l.getType(), l.getVisibility(), l.getSize(), l.getTimestamp());
       hlr.put(entry.getKey(), lr);
     }
 
     DataOutputBuffer out = new DataOutputBuffer();
-    Credentials cs =
-        createCredentials(newApp.getContainerLaunchContextInfo()
-          .getCredentials());
+    Credentials cs = createCredentials(
+        newApp.getContainerLaunchContextInfo().getCredentials());
     cs.writeTokenStorageToStream(out);
     ByteBuffer tokens = ByteBuffer.wrap(out.getData());
 
-    ContainerLaunchContext ctx =
-        ContainerLaunchContext.newInstance(hlr, newApp
-          .getContainerLaunchContextInfo().getEnvironment(), newApp
-          .getContainerLaunchContextInfo().getCommands(), hmap, tokens, newApp
-          .getContainerLaunchContextInfo().getAcls());
+    ContainerLaunchContext ctx = ContainerLaunchContext.newInstance(hlr,
+        newApp.getContainerLaunchContextInfo().getEnvironment(),
+        newApp.getContainerLaunchContextInfo().getCommands(), hmap, tokens,
+        newApp.getContainerLaunchContextInfo().getAcls());
 
     return ctx;
   }
@@ -1749,20 +1749,21 @@ public class RMWebServices extends WebServices {
    * Generate a Credentials object from the information in the CredentialsInfo
    * object.
    * 
-   * @param credentials
-   *          the CredentialsInfo provided by the user.
+   * @param credentials the CredentialsInfo provided by the user.
    * @return
    */
   private Credentials createCredentials(CredentialsInfo credentials) {
     Credentials ret = new Credentials();
     try {
-      for (Map.Entry<String, String> entry : credentials.getTokens().entrySet()) {
+      for (Map.Entry<String, String> entry : credentials.getTokens()
+          .entrySet()) {
         Text alias = new Text(entry.getKey());
         Token<TokenIdentifier> token = new Token<TokenIdentifier>();
         token.decodeFromUrlString(entry.getValue());
         ret.addToken(alias, token);
       }
-      for (Map.Entry<String, String> entry : credentials.getSecrets().entrySet()) {
+      for (Map.Entry<String, String> entry : credentials.getSecrets()
+          .entrySet()) {
         Text alias = new Text(entry.getKey());
         Base64 decoder = new Base64(0, null, true);
         byte[] secret = decoder.decode(entry.getValue());
@@ -1770,8 +1771,8 @@ public class RMWebServices extends WebServices {
       }
     } catch (IOException ie) {
       throw new BadRequestException(
-        "Could not parse credentials data; exception message = "
-            + ie.getMessage());
+          "Could not parse credentials data; exception message = "
+              + ie.getMessage());
     }
     return ret;
   }
@@ -1787,14 +1788,13 @@ public class RMWebServices extends WebServices {
 
     String authType = hsr.getAuthType();
     if (!KerberosAuthenticationHandler.TYPE.equalsIgnoreCase(authType)) {
-      String msg =
-          "Delegation token operations can only be carried out on a "
-              + "Kerberos authenticated channel. Expected auth type is "
-              + KerberosAuthenticationHandler.TYPE + ", got type " + authType;
+      String msg = "Delegation token operations can only be carried out on a "
+          + "Kerberos authenticated channel. Expected auth type is "
+          + KerberosAuthenticationHandler.TYPE + ", got type " + authType;
       throw new YarnException(msg);
     }
-    if (hsr
-      .getAttribute(DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE) != null) {
+    if (hsr.getAttribute(
+        DelegationTokenAuthenticationHandler.DELEGATION_TOKEN_UGI_ATTRIBUTE) != null) {
       String msg =
           "Delegation token operations cannot be carried out using delegation"
               + " token authentication.";
@@ -1817,10 +1817,11 @@ public class RMWebServices extends WebServices {
   }
 
   @POST
-  @Path("/delegation-token")
+  @Path(RMWSConsts.DELEGATION_TOKEN)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response postDelegationToken(DelegationToken tokenData,
       @Context HttpServletRequest hsr) throws AuthorizationException,
       IOException, InterruptedException, Exception {
@@ -1836,14 +1837,14 @@ public class RMWebServices extends WebServices {
   }
 
   @POST
-  @Path("/delegation-token/expiration")
+  @Path(RMWSConsts.DELEGATION_TOKEN_EXPIRATION)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public Response
-      postDelegationTokenExpiration(@Context HttpServletRequest hsr)
-          throws AuthorizationException, IOException, InterruptedException,
-          Exception {
+  @Override
+  public Response postDelegationTokenExpiration(@Context HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException,
+      Exception {
 
     init();
     UserGroupInformation callerUGI;
@@ -1866,35 +1867,33 @@ public class RMWebServices extends WebServices {
     final String renewer = tokenData.getRenewer();
     GetDelegationTokenResponse resp;
     try {
-      resp =
-          callerUGI
-            .doAs(new PrivilegedExceptionAction<GetDelegationTokenResponse>() {
-              @Override
-              public GetDelegationTokenResponse run() throws IOException,
-                  YarnException {
-                GetDelegationTokenRequest createReq =
-                    GetDelegationTokenRequest.newInstance(renewer);
-                return rm.getClientRMService().getDelegationToken(createReq);
-              }
-            });
+      resp = callerUGI
+          .doAs(new PrivilegedExceptionAction<GetDelegationTokenResponse>() {
+            @Override
+            public GetDelegationTokenResponse run()
+                throws IOException, YarnException {
+              GetDelegationTokenRequest createReq =
+                  GetDelegationTokenRequest.newInstance(renewer);
+              return rm.getClientRMService().getDelegationToken(createReq);
+            }
+          });
     } catch (Exception e) {
       LOG.info("Create delegation token request failed", e);
       throw e;
     }
 
     Token<RMDelegationTokenIdentifier> tk =
-        new Token<RMDelegationTokenIdentifier>(resp.getRMDelegationToken()
-          .getIdentifier().array(), resp.getRMDelegationToken().getPassword()
-          .array(), new Text(resp.getRMDelegationToken().getKind()), new Text(
-          resp.getRMDelegationToken().getService()));
+        new Token<RMDelegationTokenIdentifier>(
+            resp.getRMDelegationToken().getIdentifier().array(),
+            resp.getRMDelegationToken().getPassword().array(),
+            new Text(resp.getRMDelegationToken().getKind()),
+            new Text(resp.getRMDelegationToken().getService()));
     RMDelegationTokenIdentifier identifier = tk.decodeIdentifier();
-    long currentExpiration =
-        rm.getRMContext().getRMDelegationTokenSecretManager()
-          .getRenewDate(identifier);
-    DelegationToken respToken =
-        new DelegationToken(tk.encodeToUrlString(), renewer, identifier
-          .getOwner().toString(), tk.getKind().toString(), currentExpiration,
-          identifier.getMaxDate());
+    long currentExpiration = rm.getRMContext()
+        .getRMDelegationTokenSecretManager().getRenewDate(identifier);
+    DelegationToken respToken = new DelegationToken(tk.encodeToUrlString(),
+        renewer, identifier.getOwner().toString(), tk.getKind().toString(),
+        currentExpiration, identifier.getMaxDate());
     return Response.status(Status.OK).entity(respToken).build();
   }
 
@@ -1906,30 +1905,29 @@ public class RMWebServices extends WebServices {
     Token<RMDelegationTokenIdentifier> token =
         extractToken(tokenData.getToken());
 
-    org.apache.hadoop.yarn.api.records.Token dToken =
-        BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
-          .toString(), token.getPassword(), token.getService().toString());
+    org.apache.hadoop.yarn.api.records.Token dToken = BuilderUtils
+        .newDelegationToken(token.getIdentifier(), token.getKind().toString(),
+            token.getPassword(), token.getService().toString());
     final RenewDelegationTokenRequest req =
         RenewDelegationTokenRequest.newInstance(dToken);
 
     RenewDelegationTokenResponse resp;
     try {
-      resp =
-          callerUGI
-            .doAs(new PrivilegedExceptionAction<RenewDelegationTokenResponse>() {
-              @Override
-              public RenewDelegationTokenResponse run() throws IOException,
-                  YarnException {
-                return rm.getClientRMService().renewDelegationToken(req);
-              }
-            });
+      resp = callerUGI
+          .doAs(new PrivilegedExceptionAction<RenewDelegationTokenResponse>() {
+            @Override
+            public RenewDelegationTokenResponse run() throws YarnException {
+              return rm.getClientRMService().renewDelegationToken(req);
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
         if (ue.getCause().getCause() instanceof InvalidToken) {
           throw new BadRequestException(ue.getCause().getCause().getMessage());
-        } else if (ue.getCause().getCause() instanceof org.apache.hadoop.security.AccessControlException) {
+        } else if (ue.getCause()
+            .getCause() instanceof org.apache.hadoop.security.AccessControlException) {
           return Response.status(Status.FORBIDDEN)
-            .entity(ue.getCause().getCause().getMessage()).build();
+              .entity(ue.getCause().getCause().getMessage()).build();
         }
         LOG.info("Renew delegation token request failed", ue);
         throw ue;
@@ -1955,9 +1953,10 @@ public class RMWebServices extends WebServices {
   // since urls tend to get logged and anyone with access to
   // the logs can extract tokens which are meant to be secret
   @DELETE
-  @Path("/delegation-token")
+  @Path(RMWSConsts.DELEGATION_TOKEN)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response cancelDelegationToken(@Context HttpServletRequest hsr)
       throws AuthorizationException, IOException, InterruptedException,
       Exception {
@@ -1972,28 +1971,29 @@ public class RMWebServices extends WebServices {
 
     Token<RMDelegationTokenIdentifier> token = extractToken(hsr);
 
-    org.apache.hadoop.yarn.api.records.Token dToken =
-        BuilderUtils.newDelegationToken(token.getIdentifier(), token.getKind()
-          .toString(), token.getPassword(), token.getService().toString());
+    org.apache.hadoop.yarn.api.records.Token dToken = BuilderUtils
+        .newDelegationToken(token.getIdentifier(), token.getKind().toString(),
+            token.getPassword(), token.getService().toString());
     final CancelDelegationTokenRequest req =
         CancelDelegationTokenRequest.newInstance(dToken);
 
     try {
       callerUGI
-        .doAs(new PrivilegedExceptionAction<CancelDelegationTokenResponse>() {
-          @Override
-          public CancelDelegationTokenResponse run() throws IOException,
-              YarnException {
-            return rm.getClientRMService().cancelDelegationToken(req);
-          }
-        });
+          .doAs(new PrivilegedExceptionAction<CancelDelegationTokenResponse>() {
+            @Override
+            public CancelDelegationTokenResponse run()
+                throws IOException, YarnException {
+              return rm.getClientRMService().cancelDelegationToken(req);
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
         if (ue.getCause().getCause() instanceof InvalidToken) {
           throw new BadRequestException(ue.getCause().getCause().getMessage());
-        } else if (ue.getCause().getCause() instanceof org.apache.hadoop.security.AccessControlException) {
+        } else if (ue.getCause()
+            .getCause() instanceof org.apache.hadoop.security.AccessControlException) {
           return Response.status(Status.FORBIDDEN)
-            .entity(ue.getCause().getCause().getMessage()).build();
+              .entity(ue.getCause().getCause().getMessage()).build();
         }
         LOG.info("Renew delegation token request failed", ue);
         throw ue;
@@ -2012,9 +2012,8 @@ public class RMWebServices extends WebServices {
       HttpServletRequest request) {
     String encodedToken = request.getHeader(DELEGATION_TOKEN_HEADER);
     if (encodedToken == null) {
-      String msg =
-          "Header '" + DELEGATION_TOKEN_HEADER
-              + "' containing encoded token not found";
+      String msg = "Header '" + DELEGATION_TOKEN_HEADER
+          + "' containing encoded token not found";
       throw new BadRequestException(msg);
     }
     return extractToken(encodedToken);
@@ -2032,28 +2031,18 @@ public class RMWebServices extends WebServices {
     return token;
   }
 
-  /**
-   * Generates a new ReservationId which is then sent to the client.
-   *
-   * @param hsr the servlet request
-   * @return Response containing the app id and the maximum resource
-   *         capabilities
-   * @throws AuthorizationException if the user is not authorized
-   *         to invoke this method.
-   * @throws IOException if creation fails.
-   * @throws InterruptedException if interrupted.
-   */
   @POST
-  @Path("/reservation/new-reservation")
+  @Path(RMWSConsts.RESERVATION_NEW)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response createNewReservation(@Context HttpServletRequest hsr)
-    throws AuthorizationException, IOException, InterruptedException {
+      throws AuthorizationException, IOException, InterruptedException {
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-        + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -2088,32 +2077,21 @@ public class RMWebServices extends WebServices {
     return reservationId;
   }
 
-  /**
-   * Function to submit a Reservation to the RM.
-   *
-   * @param resContext provides information to construct the
-   *          ReservationSubmissionRequest
-   * @param hsr the servlet request
-   * @return Response containing the status code
-   * @throws AuthorizationException
-   * @throws IOException
-   * @throws InterruptedException
-   */
   @POST
-  @Path("/reservation/submit")
+  @Path(RMWSConsts.RESERVATION_SUBMIT)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
-  public Response submitReservation(
-      ReservationSubmissionRequestInfo resContext,
-      @Context HttpServletRequest hsr) throws AuthorizationException,
-      IOException, InterruptedException {
+  @Override
+  public Response submitReservation(ReservationSubmissionRequestInfo resContext,
+      @Context HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-          + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -2126,11 +2104,11 @@ public class RMWebServices extends WebServices {
     try {
       callerUGI
           .doAs(new PrivilegedExceptionAction<ReservationSubmissionResponse>() {
-              @Override
-              public ReservationSubmissionResponse run() throws IOException,
-                  YarnException {
-                return rm.getClientRMService().submitReservation(reservation);
-              }
+            @Override
+            public ReservationSubmissionResponse run()
+                throws IOException, YarnException {
+              return rm.getClientRMService().submitReservation(reservation);
+            }
           });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
@@ -2179,9 +2157,8 @@ public class RMWebServices extends WebServices {
       int numContainers = resReqInfo.getNumContainers();
       int minConcurrency = resReqInfo.getMinConcurrency();
       long duration = resReqInfo.getDuration();
-      ReservationRequest rr =
-          ReservationRequest.newInstance(capability, numContainers,
-              minConcurrency, duration);
+      ReservationRequest rr = ReservationRequest.newInstance(capability,
+          numContainers, minConcurrency, duration);
       list.add(rr);
     }
     ReservationRequests reqs = ReservationRequests.newInstance(list, resInt);
@@ -2189,40 +2166,29 @@ public class RMWebServices extends WebServices {
         ReservationDefinition.newInstance(resInfo.getArrival(),
             resInfo.getDeadline(), reqs, resInfo.getReservationName());
 
-    ReservationId reservationId = ReservationId.parseReservationId(resContext
-        .getReservationId());
-    ReservationSubmissionRequest request =
-        ReservationSubmissionRequest.newInstance(rDef, resContext.getQueue(),
-          reservationId);
+    ReservationId reservationId =
+        ReservationId.parseReservationId(resContext.getReservationId());
+    ReservationSubmissionRequest request = ReservationSubmissionRequest
+        .newInstance(rDef, resContext.getQueue(), reservationId);
 
     return request;
   }
 
-  /**
-   * Function to update a Reservation to the RM.
-   *
-   * @param resContext provides information to construct the
-   *          ReservationUpdateRequest
-   * @param hsr the servlet request
-   * @return Response containing the status code
-   * @throws AuthorizationException
-   * @throws IOException
-   * @throws InterruptedException
-   */
   @POST
-  @Path("/reservation/update")
+  @Path(RMWSConsts.RESERVATION_UPDATE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response updateReservation(ReservationUpdateRequestInfo resContext,
-      @Context HttpServletRequest hsr) throws AuthorizationException,
-      IOException, InterruptedException {
+      @Context HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-          + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -2234,16 +2200,15 @@ public class RMWebServices extends WebServices {
 
     ReservationUpdateResponseInfo resRespInfo;
     try {
-      resRespInfo =
-          callerUGI.doAs(
-              new PrivilegedExceptionAction<ReservationUpdateResponseInfo>() {
-                @Override
-                public ReservationUpdateResponseInfo run() throws IOException,
-                    YarnException {
-                  rm.getClientRMService().updateReservation(reservation);
-                  return new ReservationUpdateResponseInfo();
-                }
-              });
+      resRespInfo = callerUGI
+          .doAs(new PrivilegedExceptionAction<ReservationUpdateResponseInfo>() {
+            @Override
+            public ReservationUpdateResponseInfo run()
+                throws IOException, YarnException {
+              rm.getClientRMService().updateReservation(reservation);
+              return new ReservationUpdateResponseInfo();
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
         throw new BadRequestException(ue.getCause().getMessage());
@@ -2293,51 +2258,35 @@ public class RMWebServices extends WebServices {
       int numContainers = resReqInfo.getNumContainers();
       int minConcurrency = resReqInfo.getMinConcurrency();
       long duration = resReqInfo.getDuration();
-      ReservationRequest rr =
-          ReservationRequest.newInstance(capability, numContainers,
-              minConcurrency, duration);
+      ReservationRequest rr = ReservationRequest.newInstance(capability,
+          numContainers, minConcurrency, duration);
       list.add(rr);
     }
     ReservationRequests reqs = ReservationRequests.newInstance(list, resInt);
     ReservationDefinition rDef =
         ReservationDefinition.newInstance(resInfo.getArrival(),
             resInfo.getDeadline(), reqs, resInfo.getReservationName());
-    ReservationUpdateRequest request =
-        ReservationUpdateRequest.newInstance(rDef, ReservationId
-            .parseReservationId(resContext.getReservationId()));
+    ReservationUpdateRequest request = ReservationUpdateRequest.newInstance(
+        rDef, ReservationId.parseReservationId(resContext.getReservationId()));
 
     return request;
   }
 
-  /**
-   * Function to delete a Reservation to the RM.
-   *
-   * @param resContext provides information to construct
-   *          the ReservationDeleteRequest
-   * @param hsr the servlet request
-   * @return Response containing the status code
-   * @throws AuthorizationException when the user group information cannot be
-   *           retrieved.
-   * @throws IOException when a {@link ReservationDeleteRequest} cannot be
-   *           created from the {@link ReservationDeleteRequestInfo}. This
-   *           exception is also thrown on
-   *           {@code ClientRMService.deleteReservation} invokation failure.
-   * @throws InterruptedException if doAs action throws an InterruptedException.
-   */
   @POST
-  @Path("/reservation/delete")
+  @Path(RMWSConsts.RESERVATION_DELETE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response deleteReservation(ReservationDeleteRequestInfo resContext,
-      @Context HttpServletRequest hsr) throws AuthorizationException,
-      IOException, InterruptedException {
+      @Context HttpServletRequest hsr)
+      throws AuthorizationException, IOException, InterruptedException {
 
     init();
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-          + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -2349,16 +2298,15 @@ public class RMWebServices extends WebServices {
 
     ReservationDeleteResponseInfo resRespInfo;
     try {
-      resRespInfo =
-          callerUGI.doAs(
-              new PrivilegedExceptionAction<ReservationDeleteResponseInfo>() {
-                @Override
-                public ReservationDeleteResponseInfo run() throws IOException,
-                    YarnException {
-                  rm.getClientRMService().deleteReservation(reservation);
-                  return new ReservationDeleteResponseInfo();
-                }
-              });
+      resRespInfo = callerUGI
+          .doAs(new PrivilegedExceptionAction<ReservationDeleteResponseInfo>() {
+            @Override
+            public ReservationDeleteResponseInfo run()
+                throws IOException, YarnException {
+              rm.getClientRMService().deleteReservation(reservation);
+              return new ReservationDeleteResponseInfo();
+            }
+          });
     } catch (UndeclaredThrowableException ue) {
       if (ue.getCause() instanceof YarnException) {
         throw new BadRequestException(ue.getCause().getMessage());
@@ -2373,37 +2321,33 @@ public class RMWebServices extends WebServices {
   private ReservationDeleteRequest createReservationDeleteRequest(
       ReservationDeleteRequestInfo resContext) throws IOException {
 
-    ReservationDeleteRequest request =
-        ReservationDeleteRequest.newInstance(ReservationId
-            .parseReservationId(resContext.getReservationId()));
+    ReservationDeleteRequest request = ReservationDeleteRequest.newInstance(
+        ReservationId.parseReservationId(resContext.getReservationId()));
 
     return request;
   }
 
-  /**
-   * Function to retrieve a list of all the reservations.
-   */
   @GET
-  @Path("/reservation/list")
+  @Path(RMWSConsts.RESERVATION_LIST)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public Response listReservation(
-          @QueryParam("queue") @DefaultValue("default") String queue,
-          @QueryParam("reservation-id") @DefaultValue("") String reservationId,
-          @QueryParam("start-time") @DefaultValue("0") long startTime,
-          @QueryParam("end-time") @DefaultValue("-1") long endTime,
-          @QueryParam("include-resource-allocations") @DefaultValue("false")
-          boolean includeResourceAllocations, @Context HttpServletRequest hsr)
-          throws Exception {
+      @QueryParam(RMWSConsts.QUEUE) @DefaultValue(DEFAULT_QUEUE) String queue,
+      @QueryParam(RMWSConsts.RESERVATION_ID) @DefaultValue(DEFAULT_RESERVATION_ID) String reservationId,
+      @QueryParam(RMWSConsts.START_TIME) @DefaultValue(DEFAULT_START_TIME) long startTime,
+      @QueryParam(RMWSConsts.END_TIME) @DefaultValue(DEFAULT_END_TIME) long endTime,
+      @QueryParam(RMWSConsts.INCLUDE_RESOURCE) @DefaultValue(DEFAULT_INCLUDE_RESOURCE) boolean includeResourceAllocations,
+      @Context HttpServletRequest hsr) throws Exception {
     init();
 
     final ReservationListRequest request = ReservationListRequest.newInstance(
-          queue, reservationId, startTime, endTime, includeResourceAllocations);
+        queue, reservationId, startTime, endTime, includeResourceAllocations);
 
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
     if (callerUGI == null) {
-      throw new AuthorizationException("Unable to obtain user name, "
-              + "user not authenticated");
+      throw new AuthorizationException(
+          "Unable to obtain user name, " + "user not authenticated");
     }
     if (UserGroupInformation.isSecurityEnabled() && isStaticUser(callerUGI)) {
       String msg = "The default static user cannot carry out this operation.";
@@ -2412,11 +2356,11 @@ public class RMWebServices extends WebServices {
 
     ReservationListResponse resRespInfo;
     try {
-      resRespInfo = callerUGI.doAs(
-          new PrivilegedExceptionAction<ReservationListResponse>() {
+      resRespInfo = callerUGI
+          .doAs(new PrivilegedExceptionAction<ReservationListResponse>() {
             @Override
-            public ReservationListResponse run() throws IOException,
-                    YarnException {
+            public ReservationListResponse run()
+                throws IOException, YarnException {
               return rm.getClientRMService().listReservations(request);
             }
           });
@@ -2428,18 +2372,19 @@ public class RMWebServices extends WebServices {
       throw ue;
     }
 
-    ReservationListInfo resResponse = new ReservationListInfo(resRespInfo,
-            includeResourceAllocations);
+    ReservationListInfo resResponse =
+        new ReservationListInfo(resRespInfo, includeResourceAllocations);
     return Response.status(Status.OK).entity(resResponse).build();
   }
 
   @GET
-  @Path("/apps/{appid}/timeouts/{type}")
+  @Path(RMWSConsts.APPS_TIMEOUTS_TYPE)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppTimeoutInfo getAppTimeout(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId, @PathParam("type") String type)
-      throws AuthorizationException {
+      @PathParam(RMWSConsts.APPID) String appId,
+      @PathParam(RMWSConsts.TYPE) String type) throws AuthorizationException {
     init();
     RMApp app = validateAppTimeoutRequest(hsr, appId);
 
@@ -2478,11 +2423,12 @@ public class RMWebServices extends WebServices {
   }
 
   @GET
-  @Path("/apps/{appid}/timeouts")
+  @Path(RMWSConsts.APPS_TIMEOUTS)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
+  @Override
   public AppTimeoutsInfo getAppTimeouts(@Context HttpServletRequest hsr,
-      @PathParam("appid") String appId) throws AuthorizationException {
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException {
     init();
 
     RMApp app = validateAppTimeoutRequest(hsr, appId);
@@ -2532,14 +2478,15 @@ public class RMWebServices extends WebServices {
   }
 
   @PUT
-  @Path("/apps/{appid}/timeout")
+  @Path(RMWSConsts.APPS_TIMEOUT)
   @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8,
       MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
   @Consumes({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  @Override
   public Response updateApplicationTimeout(AppTimeoutInfo appTimeout,
-      @Context HttpServletRequest hsr, @PathParam("appid") String appId)
-      throws AuthorizationException, YarnException, InterruptedException,
-      IOException {
+      @Context HttpServletRequest hsr,
+      @PathParam(RMWSConsts.APPID) String appId) throws AuthorizationException,
+      YarnException, InterruptedException, IOException {
     init();
 
     UserGroupInformation callerUGI = getCallerUserGroupInformation(hsr, true);
@@ -2601,8 +2548,8 @@ public class RMWebServices extends WebServices {
               + " by remote user " + userName;
           return Response.status(Status.FORBIDDEN).entity(msg).build();
         } else if (ye.getCause() instanceof ParseException) {
-          return Response.status(Status.BAD_REQUEST)
-              .entity(ye.getMessage()).build();
+          return Response.status(Status.BAD_REQUEST).entity(ye.getMessage())
+              .build();
         } else {
           throw ue;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[45/50] [abbrv] hadoop git commit: HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.

Posted by xy...@apache.org.
HDFS-11711. DN should not delete the block On "Too many open files" Exception. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d3bbf2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d3bbf2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d3bbf2d

Branch: refs/heads/HDFS-7240
Commit: 7d3bbf2d378e02588e1dcbf5b724a97aed9229f7
Parents: c58bd15
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jun 7 13:05:03 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../hdfs/server/datanode/BlockSender.java       | 13 ++--
 .../server/datanode/DataNodeFaultInjector.java  |  4 ++
 .../server/datanode/TestDataNodeMetrics.java    | 62 ++++++++++++++++++++
 3 files changed, 75 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d3bbf2d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 203ee35..3ff5c75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -302,6 +302,7 @@ class BlockSender implements java.io.Closeable {
         LengthInputStream metaIn = null;
         boolean keepMetaInOpen = false;
         try {
+          DataNodeFaultInjector.get().throwTooManyOpenFiles();
           metaIn = datanode.data.getMetaDataInputStream(block);
           if (!corruptChecksumOk || metaIn != null) {
             if (metaIn == null) {
@@ -331,10 +332,14 @@ class BlockSender implements java.io.Closeable {
             LOG.warn("Could not find metadata file for " + block);
           }
         } catch (FileNotFoundException e) {
-          // The replica is on its volume map but not on disk
-          datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
-          datanode.data.invalidate(block.getBlockPoolId(),
-              new Block[]{block.getLocalBlock()});
+          if ((e.getMessage() != null) && !(e.getMessage()
+              .contains("Too many open files"))) {
+            // The replica is on its volume map but not on disk
+            datanode
+                .notifyNamenodeDeletedBlock(block, replica.getStorageUuid());
+            datanode.data.invalidate(block.getBlockPoolId(),
+                new Block[] {block.getLocalBlock()});
+          }
           throw e;
         } finally {
           if (!keepMetaInOpen) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d3bbf2d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
index d2d557f..0a2a60b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeFaultInjector.java
@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 
 /**
@@ -85,4 +86,7 @@ public class DataNodeFaultInjector {
   public void startOfferService() throws Exception {}
 
   public void endOfferService() throws Exception {}
+
+  public void throwTooManyOpenFiles() throws FileNotFoundException {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d3bbf2d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 9abc19d..7b3dea7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -24,8 +24,10 @@ import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.*;
 
 import java.io.Closeable;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.List;
 
@@ -45,8 +47,10 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
@@ -365,4 +369,62 @@ public class TestDataNodeMetrics {
       }
     }
   }
+
+  @Test
+  public void testDNShouldNotDeleteBlockONTooManyOpenFiles()
+      throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
+    conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1);
+    DataNodeFaultInjector oldInjector = DataNodeFaultInjector.get();
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    final DataNodeFaultInjector injector =
+        Mockito.mock(DataNodeFaultInjector.class);
+    try {
+      // wait until the cluster is up
+      cluster.waitActive();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      Path p = new Path("/testShouldThrowTMP");
+      DFSTestUtil.writeFile(fs, p, new String("testdata"));
+      //Before DN throws too many open files
+      verifyBlockLocations(fs, p, 1);
+      Mockito.doThrow(new FileNotFoundException("Too many open files")).
+          when(injector).
+          throwTooManyOpenFiles();
+      DataNodeFaultInjector.set(injector);
+      ExtendedBlock b =
+          fs.getClient().getLocatedBlocks(p.toString(), 0).get(0).getBlock();
+      try {
+        new BlockSender(b, 0, -1, false, true, true,
+                cluster.getDataNodes().get(0), null,
+                CachingStrategy.newDefaultStrategy());
+        fail("Must throw FileNotFoundException");
+      } catch (FileNotFoundException fe) {
+        assertTrue("Should throw too many open files",
+                fe.getMessage().contains("Too many open files"));
+      }
+      cluster.triggerHeartbeats(); // IBR delete ack
+      //After DN throws too many open files
+      assertTrue(cluster.getDataNodes().get(0).getFSDataset().isValidBlock(b));
+      verifyBlockLocations(fs, p, 1);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      DataNodeFaultInjector.set(oldInjector);
+    }
+  }
+
+  private void verifyBlockLocations(DistributedFileSystem fs, Path p,
+      int expected) throws IOException, TimeoutException, InterruptedException {
+    final LocatedBlock lb =
+        fs.getClient().getLocatedBlocks(p.toString(), 0).get(0);
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return lb.getLocations().length == expected;
+      }
+    }, 1000, 6000);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/50] [abbrv] hadoop git commit: HADOOP-14491. Azure has messed doc structure. Contributed by Mingliang Liu

Posted by xy...@apache.org.
HADOOP-14491. Azure has messed doc structure. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/974f33ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/974f33ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/974f33ad

Branch: refs/heads/HDFS-7240
Commit: 974f33add21f77fff920caee15d38526ffa5be79
Parents: 756ff41
Author: Mingliang Liu <li...@apache.org>
Authored: Mon Jun 5 18:24:40 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../hadoop-azure/src/site/markdown/index.md     | 233 ++++++++++---------
 1 file changed, 123 insertions(+), 110 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/974f33ad/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 1dca3b9..9c57e60 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -14,20 +14,9 @@
 
 # Hadoop Azure Support: Azure Blob Storage
 
-* [Introduction](#Introduction)
-* [Features](#Features)
-* [Limitations](#Limitations)
-* [Usage](#Usage)
-    * [Concepts](#Concepts)
-    * [Configuring Credentials](#Configuring_Credentials)
-    * [Page Blob Support and Configuration](#Page_Blob_Support_and_Configuration)
-    * [Atomic Folder Rename](#Atomic_Folder_Rename)
-    * [Accessing wasb URLs](#Accessing_wasb_URLs)
-    * [Append API Support and Configuration](#Append_API_Support_and_Configuration)
-    * [Multithread Support](#Multithread_Support)
-* [Testing the hadoop-azure Module](#Testing_the_hadoop-azure_Module)
-
-## <a name="Introduction" />Introduction
+<!-- MACRO{toc|fromDepth=1|toDepth=3} -->
+
+## Introduction
 
 The hadoop-azure module provides support for integration with
 [Azure Blob Storage](http://azure.microsoft.com/en-us/documentation/services/storage/).
@@ -38,7 +27,7 @@ on the additional artifacts it requires, notably the
 To make it part of Apache Hadoop's default classpath, simply make sure that
 HADOOP_OPTIONAL_TOOLS in hadoop-env.sh has 'hadoop-azure' in the list.
 
-## <a name="Features" />Features
+## Features
 
 * Read and write data stored in an Azure Blob Storage account.
 * Present a hierarchical file system view by implementing the standard Hadoop
@@ -54,15 +43,15 @@ HADOOP_OPTIONAL_TOOLS in hadoop-env.sh has 'hadoop-azure' in the list.
 * Tested on both Linux and Windows.
 * Tested at scale.
 
-## <a name="Limitations" />Limitations
+## Limitations
 
 * File owner and group are persisted, but the permissions model is not enforced.
   Authorization occurs at the level of the entire Azure Blob Storage account.
 * File last access time is not tracked.
 
-## <a name="Usage" />Usage
+## Usage
 
-### <a name="Concepts" />Concepts
+### Concepts
 
 The Azure Blob Storage data model presents 3 core concepts:
 
@@ -76,7 +65,7 @@ The Azure Blob Storage data model presents 3 core concepts:
   The internal implementation also uses blobs to persist the file system
   hierarchy and other metadata.
 
-### <a name="Configuring_Credentials" />Configuring Credentials
+### Configuring Credentials
 
 Usage of Azure Blob Storage requires configuration of credentials.  Typically
 this is set in core-site.xml.  The configuration property name is of the form
@@ -87,11 +76,12 @@ untrusted party.**
 
 For example:
 
-    <property>
-      <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
-      <value>YOUR ACCESS KEY</value>
-    </property>
-
+```xml
+<property>
+  <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
+  <value>YOUR ACCESS KEY</value>
+</property>
+```
 In many Hadoop clusters, the core-site.xml file is world-readable. It is possible to
 protect the access key within a credential provider as well. This provides an encrypted
 file format along with protection with file permissions.
@@ -110,14 +100,14 @@ For additional reading on the credential provider API see:
 
 ###### provision
 
-```
+```bash
 % hadoop credential create fs.azure.account.key.youraccount.blob.core.windows.net -value 123
     -provider localjceks://file/home/lmccay/wasb.jceks
 ```
 
 ###### configure core-site.xml or command line system property
 
-```
+```xml
 <property>
   <name>hadoop.security.credential.provider.path</name>
   <value>localjceks://file/home/lmccay/wasb.jceks</value>
@@ -127,7 +117,7 @@ For additional reading on the credential provider API see:
 
 ###### distcp
 
-```
+```bash
 % hadoop distcp
     [-D hadoop.security.credential.provider.path=localjceks://file/home/lmccay/wasb.jceks]
     hdfs://hostname:9001/user/lmccay/007020615 wasb://yourcontainer@youraccount.blob.core.windows.net/testDir/
@@ -145,22 +135,25 @@ specifies an external program to be invoked by Hadoop processes to decrypt the
 key.  The encrypted key value is passed to this external program as a command
 line argument:
 
-    <property>
-      <name>fs.azure.account.keyprovider.youraccount</name>
-      <value>org.apache.hadoop.fs.azure.ShellDecryptionKeyProvider</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.account.keyprovider.youraccount</name>
+  <value>org.apache.hadoop.fs.azure.ShellDecryptionKeyProvider</value>
+</property>
 
-    <property>
-      <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
-      <value>YOUR ENCRYPTED ACCESS KEY</value>
-    </property>
+<property>
+  <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
+  <value>YOUR ENCRYPTED ACCESS KEY</value>
+</property>
 
-    <property>
-      <name>fs.azure.shellkeyprovider.script</name>
-      <value>PATH TO DECRYPTION PROGRAM</value>
-    </property>
+<property>
+  <name>fs.azure.shellkeyprovider.script</name>
+  <value>PATH TO DECRYPTION PROGRAM</value>
+</property>
+
+```
 
-### <a name="Page_Blob_Support_and_Configuration" />Page Blob Support and Configuration
+### Page Blob Support and Configuration
 
 The Azure Blob Storage interface for Hadoop supports two kinds of blobs,
 [block blobs and page blobs](http://msdn.microsoft.com/en-us/library/azure/ee691964.aspx).
@@ -182,10 +175,12 @@ folder names.
 
 For example:
 
-    <property>
-      <name>fs.azure.page.blob.dir</name>
-      <value>/hbase/WALs,/hbase/oldWALs,/data/mypageblobfiles</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.page.blob.dir</name>
+  <value>/hbase/WALs,/hbase/oldWALs,/data/mypageblobfiles</value>
+</property>
+```
 
 You can set this to simply / to make all files page blobs.
 
@@ -197,7 +192,7 @@ The configuration option `fs.azure.page.blob.extension.size` is the page blob
 extension size.  This defines the amount to extend a page blob if it starts to
 get full.  It must be 128MB or greater, specified as an integer number of bytes.
 
-### <a name="Atomic_Folder_Rename" />Atomic Folder Rename
+### Atomic Folder Rename
 
 Azure storage stores files as a flat key/value store without formal support
 for folders.  The hadoop-azure file system layer simulates folders on top
@@ -216,12 +211,14 @@ the intention of the rename operation, to allow redo in event of a failure.
 
 For example:
 
-    <property>
-      <name>fs.azure.atomic.rename.dir</name>
-      <value>/hbase,/data</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.atomic.rename.dir</name>
+  <value>/hbase,/data</value>
+</property>
+```
 
-### <a name="Accessing_wasb_URLs" />Accessing wasb URLs
+### Accessing wasb URLs
 
 After credentials are configured in core-site.xml, any Hadoop component may
 reference files in that Azure Blob Storage account by using URLs of the following
@@ -238,28 +235,32 @@ For example, the following
 commands demonstrate access to a storage account named `youraccount` and a
 container named `yourcontainer`.
 
-    > hadoop fs -mkdir wasb://yourcontainer@youraccount.blob.core.windows.net/testDir
+```bash
+% hadoop fs -mkdir wasb://yourcontainer@youraccount.blob.core.windows.net/testDir
 
-    > hadoop fs -put testFile wasb://yourcontainer@youraccount.blob.core.windows.net/testDir/testFile
+% hadoop fs -put testFile wasb://yourcontainer@youraccount.blob.core.windows.net/testDir/testFile
 
-    > hadoop fs -cat wasbs://yourcontainer@youraccount.blob.core.windows.net/testDir/testFile
-    test file content
+% hadoop fs -cat wasbs://yourcontainer@youraccount.blob.core.windows.net/testDir/testFile
+test file content
+```
 
 It's also possible to configure `fs.defaultFS` to use a `wasb` or `wasbs` URL.
 This causes all bare paths, such as `/testDir/testFile` to resolve automatically
 to that file system.
 
-### <a name="Append_API_Support_and_Configuration" />Append API Support and Configuration
+### Append API Support and Configuration
 
 The Azure Blob Storage interface for Hadoop has optional support for Append API for
 single writer by setting the configuration `fs.azure.enable.append.support` to true.
 
 For Example:
 
-    <property>
-      <name>fs.azure.enable.append.support</name>
-      <value>true</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.enable.append.support</name>
+  <value>true</value>
+</property>
+```
 
 It must be noted Append support in Azure Blob Storage interface DIFFERS FROM HDFS SEMANTICS. Append
 support does not enforce single writer internally but requires applications to guarantee this semantic.
@@ -267,25 +268,29 @@ It becomes a responsibility of the application either to ensure single-threaded
 file path, or rely on some external locking mechanism of its own.  Failure to do so will result in
 unexpected behavior.
 
-### <a name="Multithread_Support" />Multithread Support
+### Multithread Support
 
 Rename and Delete blob operations on directories with large number of files and sub directories currently is very slow as these operations are done one blob at a time serially. These files and sub folders can be deleted or renamed parallel. Following configurations can be used to enable threads to do parallel processing
 
 To enable 10 threads for Delete operation. Set configuration value to 0 or 1 to disable threads. The default behavior is threads disabled.
 
-    <property>
-      <name>fs.azure.delete.threads</name>
-      <value>10</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.delete.threads</name>
+  <value>10</value>
+</property>
+```
 
 To enable 20 threads for Rename operation. Set configuration value to 0 or 1 to disable threads. The default behavior is threads disabled.
 
-    <property>
-      <name>fs.azure.rename.threads</name>
-      <value>20</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.rename.threads</name>
+  <value>20</value>
+</property>
+```
 
-### <a name="WASB_SECURE_MODE" />WASB Secure mode and configuration
+### WASB Secure mode and configuration
 
 WASB can operate in secure mode where the Storage access keys required to communicate with Azure storage does not have to
 be in the same address space as the process using WASB. In this mode all interactions with Azure storage is performed using
@@ -295,30 +300,32 @@ Romote mode, however for testing purposes the local mode can be enabled to gener
 
 To enable Secure mode following property needs to be set to true.
 
-```
-    <property>
-      <name>fs.azure.secure.mode</name>
-      <value>true</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.secure.mode</name>
+  <value>true</value>
+</property>
 ```
 
 To enable SAS key generation locally following property needs to be set to true.
 
+```xml
+<property>
+  <name>fs.azure.local.sas.key.mode</name>
+  <value>true</value>
+</property>
 ```
-    <property>
-      <name>fs.azure.local.sas.key.mode</name>
-      <value>true</value>
-    </property>
-```
+
 To use the remote SAS key generation mode, an external REST service is expected to provided required SAS keys.
 Following property can used to provide the end point to use for remote SAS Key generation:
 
+```xml
+<property>
+  <name>fs.azure.cred.service.url</name>
+  <value>{URL}</value>
+</property>
 ```
-    <property>
-      <name>fs.azure.cred.service.url</name>
-      <value>{URL}</value>
-    </property>
-```
+
 The remote service is expected to provide support for two REST calls ```{URL}/GET_CONTAINER_SAS``` and ```{URL}/GET_RELATIVE_BLOB_SAS```, for generating
 container and relative blob sas keys. An example requests
 
@@ -326,7 +333,8 @@ container and relative blob sas keys. An example requests
 ```{URL}/GET_CONTAINER_SAS?storage_account=<account_name>&container=<container>&relative_path=<relative path>&sas_expiry=<expiry period>&delegation_token=<delegation token>```
 
 The service is expected to return a response in JSON format:
-```
+
+```json
 {
   "responseCode" : 0 or non-zero <int>,
   "responseMessage" : relavant message on failure <String>,
@@ -334,40 +342,42 @@ The service is expected to return a response in JSON format:
 }
 ```
 
-## <a name="WASB Authorization" />Authorization Support in WASB.
+### Authorization Support in WASB
 
 Authorization support can be enabled in WASB using the following configuration:
 
+```xml
+<property>
+  <name>fs.azure.authorization</name>
+  <value>true</value>
+</property>
 ```
-    <property>
-      <name>fs.azure.authorization</name>
-      <value>true</value>
-    </property>
-```
-  The current implementation of authorization relies on the presence of an external service that can enforce
-  the authorization. The service is expected to be running on a URL provided by the following config.
 
-```
-    <property>
-      <name>fs.azure.authorization.remote.service.url</name>
-      <value>{URL}</value>
-    </property>
+The current implementation of authorization relies on the presence of an external service that can enforce
+the authorization. The service is expected to be running on a URL provided by the following config.
+
+```xml
+<property>
+  <name>fs.azure.authorization.remote.service.url</name>
+  <value>{URL}</value>
+</property>
 ```
 
-  The remote service is expected to provide support for the following REST call: ```{URL}/CHECK_AUTHORIZATION```
-  An example request:
+The remote service is expected to provide support for the following REST call: ```{URL}/CHECK_AUTHORIZATION```
+An example request:
   ```{URL}/CHECK_AUTHORIZATION?wasb_absolute_path=<absolute_path>&operation_type=<operation type>&delegation_token=<delegation token>```
 
-  The service is expected to return a response in JSON format:
-  ```
-  {
+The service is expected to return a response in JSON format:
+
+```json
+{
     "responseCode" : 0 or non-zero <int>,
     "responseMessage" : relevant message on failure <String>,
     "authorizationResult" : true/false <boolean>
-  }
-  ```
+}
+```
 
-## <a name="Testing_the_hadoop-azure_Module" />Testing the hadoop-azure Module
+## Testing the hadoop-azure Module
 
 The hadoop-azure module includes a full suite of unit tests.  Most of the tests
 will run without additional configuration by running `mvn test`.  This includes
@@ -382,10 +392,12 @@ that runs on a local machine.
 To use the emulator, install Azure SDK 2.3 and start the storage emulator.  Then,
 edit `src/test/resources/azure-test.xml` and add the following property:
 
-    <property>
-      <name>fs.azure.test.emulator</name>
-      <value>true</value>
-    </property>
+```xml
+<property>
+  <name>fs.azure.test.emulator</name>
+  <value>true</value>
+</property>
+```
 
 There is a known issue when running tests with the emulator.  You may see the
 following failure message:
@@ -399,6 +411,7 @@ file to `src/test/resources/azure-auth-keys.xml` and setting
 the name of the storage account and its access key.
 
 For example:
+
 ```xml
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/50] [abbrv] hadoop git commit: YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.

Posted by xy...@apache.org.
YARN-6458. Use yarn package manager to lock down dependency versions for new web UI. Contributed by Sreenath Somarajapuram.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4fba3d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4fba3d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4fba3d0

Branch: refs/heads/HDFS-7240
Commit: f4fba3d0acd06f4b6617b82688d4904f45a08b2a
Parents: fe76c59
Author: Sunil G <su...@apache.org>
Authored: Sun Jun 4 22:05:14 2017 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../hadoop-yarn/hadoop-yarn-ui/README.md        |   64 +
 .../hadoop-yarn/hadoop-yarn-ui/pom.xml          |  152 +-
 .../hadoop-yarn-ui/src/main/webapp/.bowerrc     |    5 +-
 .../src/main/webapp/WEB-INF/wro.xml             |    9 +
 .../src/main/webapp/bower-shrinkwrap.json       |   66 +
 .../src/main/webapp/ember-cli-build.js          |   16 +-
 .../hadoop-yarn-ui/src/main/webapp/package.json |   10 +-
 .../hadoop-yarn-ui/src/main/webapp/yarn.lock    | 4983 ++++++++++++++++++
 8 files changed, 5253 insertions(+), 52 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
new file mode 100644
index 0000000..f67f351
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/README.md
@@ -0,0 +1,64 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+# Yarn UI
+
+The Yarn UI is an Ember based web-app that provides visualization of the applications running on the Apache Hadoop YARN framework.
+
+## Configurations
+
+* You can point the UI to custom locations by setting the environment variables in `src/main/webapp/config/configs.env`
+
+## Development
+
+All the following commands must be run inside `src/main/webapp`.
+
+### Prerequisites
+
+You will need the following things properly installed on your computer.
+
+* Install [Yarn](https://yarnpkg.com) v0.21.3
+* Install [Bower](http://bower.io/) v1.7.7
+* Install all dependencies by running `yarn install` & `bower install`
+
+### Running UI
+
+* `yarn start`
+* Visit your app at [http://localhost:4200](http://localhost:4200).
+
+### Building
+
+* `yarn run build` (production)
+* Files would be stored in "dist/"
+
+### Adding new dependencies
+
+**Warning: Do not edit the _package.json_ or _bower.json_ files manually. This could make them out-of-sync with the respective lock or shrinkwrap files.**
+
+Yarn UI has replaced NPM with Yarn package manager. And hence Yarn would be used to manage dependencies defined in package.json.
+
+* Please use the Yarn and Bower command-line tools to add new dependencies. And the tool version must be same as those defined in Prerequisites section.
+* Once any dependency is added:
+  *  If it's in package.json. Make sure that the respective, and only those changes are reflected in yarn.lock file.
+  *  If it's in bower.json. Make sure that the respective, and only those changes are reflected in bower-shrinkwrap.json file.
+* Commands to add using CLI tools:
+  * Yarn: yarn add [package-name]
+  * Bower: bower install --save [package-name]
+
+### Adding new routes (pages), controllers, components etc.
+
+* Use ember-cli blueprint generator - [Ember CLI](http://ember-cli.com/extending/#generators-and-blueprints)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
index 2985a05..2823fa8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/pom.xml
@@ -26,15 +26,16 @@
   <artifactId>hadoop-yarn-ui</artifactId>
   <version>3.0.0-alpha4-SNAPSHOT</version>
   <name>Apache Hadoop YARN UI</name>
-  <packaging>${packaging.type}</packaging>
+  <packaging>${packagingType}</packaging>
 
   <properties>
-    <packaging.type>pom</packaging.type>
-    <webappTgtDir>${basedir}/target/src/main/webapp</webappTgtDir>
-    <node.executable>${basedir}/target/src/main/webapp/node/node</node.executable>
-    <nodeVersion>v5.7.1</nodeVersion>
-    <npmVersion>3.6.0</npmVersion>
-    <keep-ui-build-cache>false</keep-ui-build-cache>
+    <packagingType>pom</packagingType>
+
+    <webappDir>${basedir}/target/webapp</webappDir>
+    <nodeExecutable>${basedir}/target/webapp/node/node</nodeExecutable>
+    <packageManagerScript>node/yarn/dist/bin/yarn.js</packageManagerScript>
+
+    <keepUIBuildCache>false</keepUIBuildCache>
   </properties>
 
   <build>
@@ -47,7 +48,9 @@
           <excludes>
             <exclude>src/main/webapp/jsconfig.json</exclude>
             <exclude>src/main/webapp/bower.json</exclude>
+            <exclude>src/main/webapp/bower-shrinkwrap.json</exclude>
             <exclude>src/main/webapp/package.json</exclude>
+            <exclude>src/main/webapp/yarn.lock</exclude>
             <exclude>src/main/webapp/testem.json</exclude>
             <exclude>src/main/webapp/public/assets/images/**/*</exclude>
             <exclude>src/main/webapp/public/assets/images/*</exclude>
@@ -57,6 +60,7 @@
             <exclude>src/main/webapp/.ember-cli</exclude>
             <exclude>src/main/webapp/.jshintrc</exclude>
             <exclude>src/main/webapp/.watchmanconfig</exclude>
+            <exclude>src/main/webapp/WEB-INF/wro.xml</exclude>
           </excludes>
         </configuration>
       </plugin>
@@ -65,14 +69,14 @@
         <artifactId>maven-clean-plugin</artifactId>
         <version>3.0.0</version>
         <configuration>
-          <skip>${keep-ui-build-cache}</skip>
+          <skip>${keepUIBuildCache}</skip>
           <followSymLinks>false</followSymLinks>
           <filesets>
             <fileset>
-              <directory>${webappTgtDir}/bower_components</directory>
+              <directory>${webappDir}/bower_components</directory>
             </fileset>
             <fileset>
-              <directory>${webappTgtDir}/node_modules</directory>
+              <directory>${webappDir}/node_modules</directory>
             </fileset>
           </filesets>
         </configuration>
@@ -89,66 +93,79 @@
       </activation>
 
       <properties>
-        <packaging.type>war</packaging.type>
+        <packagingType>war</packagingType>
       </properties>
 
       <build>
         <plugins>
-          <!-- prepare source code -->
+          <!-- Copy files into target for build -->
           <plugin>
-            <groupId>org.apache.maven.plugins</groupId>
-            <artifactId>maven-antrun-plugin</artifactId>
+            <artifactId>maven-resources-plugin</artifactId>
             <executions>
               <execution>
-                <id>prepare-source-code</id>
-                <phase>generate-sources</phase>
+                <id>copy-resources</id>
+                <!-- here the phase you need -->
+                <phase>validate</phase>
                 <goals>
-                  <goal>run</goal>
+                  <goal>copy-resources</goal>
                 </goals>
                 <configuration>
-                  <target>
-                    <copy toDir="${basedir}/target/src/main/webapp">
-                      <fileset dir="${basedir}/src/main/webapp"/>
-                    </copy>
-
-                    <copy toDir="${basedir}/target/src/public">
-                      <fileset dir="${basedir}/public"/>
-                    </copy>
-                  </target>
+                  <outputDirectory>${webappDir}</outputDirectory>
+                  <resources>
+                    <resource>
+                      <directory>${basedir}/src/main/webapp</directory>
+                      <filtering>true</filtering>
+                      <excludes>
+                        <exclude>node_modules/**/*</exclude>
+                        <exclude>bower_components/**/*</exclude>
+                        <exclude>tmp/**/*</exclude>
+                        <exclude>dist/**/*</exclude>
+                      </excludes>
+                    </resource>
+                  </resources>
                 </configuration>
               </execution>
             </executions>
           </plugin>
 
+          <!-- Install Node, Yarn, Bower & dependencies -->
           <plugin>
             <groupId>com.github.eirslett</groupId>
             <artifactId>frontend-maven-plugin</artifactId>
-            <version>1.1</version>
+            <version>1.2</version>
             <configuration>
-              <workingDirectory>${webappTgtDir}</workingDirectory>
+              <workingDirectory>${webappDir}</workingDirectory>
             </configuration>
             <executions>
+
+              <!-- Install all dependencies -->
               <execution>
-                <phase>generate-sources</phase>
-                <id>install node and npm</id>
+                <phase>generate-resources</phase>
+                <id>install node and yarn</id>
                 <goals>
-                  <goal>install-node-and-npm</goal>
+                  <goal>install-node-and-yarn</goal>
                 </goals>
                 <configuration>
-                  <nodeVersion>${nodeVersion}</nodeVersion>
-                  <npmVersion>${npmVersion}</npmVersion>
+                  <nodeVersion>v5.12.0</nodeVersion>
+                  <yarnVersion>v0.21.3</yarnVersion>
                 </configuration>
               </execution>
               <execution>
-                <phase>generate-sources</phase>
-                <id>npm install</id>
+                <phase>generate-resources</phase>
+                <id>yarn install</id>
                 <goals>
-                  <goal>npm</goal>
+                  <goal>yarn</goal>
                 </goals>
+                <configuration>
+                  <arguments>install</arguments>
+                </configuration>
               </execution>
               <execution>
-                <phase>generate-sources</phase>
+                <phase>generate-resources</phase>
                 <id>bower install</id>
+                <configuration>
+                  <arguments>install</arguments>
+                </configuration>
                 <goals>
                   <goal>bower</goal>
                 </goals>
@@ -156,38 +173,80 @@
             </executions>
           </plugin>
 
-
-          <!-- Bower install & grunt build-->
           <plugin>
-            <artifactId>exec-maven-plugin</artifactId>
             <groupId>org.codehaus.mojo</groupId>
+            <artifactId>exec-maven-plugin</artifactId>
             <executions>
+
+              <!-- Build -->
               <execution>
                 <id>ember build</id>
-                <phase>generate-sources</phase>
+                <phase>generate-resources</phase>
                 <goals>
                   <goal>exec</goal>
                 </goals>
                 <configuration>
-                  <workingDirectory>${webappTgtDir}</workingDirectory>
-                  <executable>${node.executable}</executable>
+                  <workingDirectory>${webappDir}</workingDirectory>
+                  <executable>${nodeExecutable}</executable>
                   <arguments>
-                    <argument>node/node_modules/npm/bin/npm-cli</argument>
+                    <argument>${packageManagerScript}</argument>
                     <argument>run</argument>
                     <argument>build:mvn</argument>
                   </arguments>
                 </configuration>
               </execution>
+
             </executions>
           </plugin>
 
+          <!-- Asset minifier -->
+          <plugin>
+            <groupId>ro.isdc.wro4j</groupId>
+            <artifactId>wro4j-maven-plugin</artifactId>
+            <version>1.7.9</version>
+            <executions>
+              <execution>
+                <phase>prepare-package</phase>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+            <configuration>
+              <minimize>true</minimize>
+              <targetGroups>yarn-ui,vendor</targetGroups>
+              <destinationFolder>${basedir}/target/minified-resources/assets</destinationFolder>
+              <contextFolder>${webappDir}/dist/assets</contextFolder>
+              <wroFile>${webappDir}/WEB-INF/wro.xml</wroFile>
+            </configuration>
+          </plugin>
+
           <!-- Package into war -->
           <plugin>
             <groupId>org.apache.maven.plugins</groupId>
             <artifactId>maven-war-plugin</artifactId>
+            <executions>
+              <execution>
+                <phase>package</phase>
+              </execution>
+            </executions>
             <configuration>
-              <webXml>${basedir}/src/main/webapp/WEB-INF/web.xml</webXml>
-              <warSourceDirectory>${webappTgtDir}/dist</warSourceDirectory>
+              <webXml>${webappDir}/WEB-INF/web.xml</webXml>
+              <warSourceDirectory>${webappDir}/dist</warSourceDirectory>
+              <webResources>
+                <resource>
+                  <filtering>false</filtering>
+                  <directory>${basedir}/target/minified-resources</directory>
+                </resource>
+              </webResources>
+            </configuration>
+          </plugin>
+
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-jar-plugin</artifactId>
+            <configuration>
+              <skipIfEmpty>true</skipIfEmpty>
             </configuration>
           </plugin>
 
@@ -195,4 +254,5 @@
       </build>
     </profile>
   </profiles>
+
 </project>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..5b0b07d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,7 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "resolvers": [
+    "bower-shrinkwrap-resolver-ext"
+  ]
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
new file mode 100644
index 0000000..64c925a
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/WEB-INF/wro.xml
@@ -0,0 +1,9 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<groups xmlns="http://www.isdc.ro/wro">
+  <group name='vendor'>
+    <js>/vendor.js</js>
+  </group>
+  <group name='yarn-ui'>
+    <js>/yarn-ui.js</js>
+  </group>
+</groups>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
new file mode 100644
index 0000000..b0f3aa3
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/bower-shrinkwrap.json
@@ -0,0 +1,66 @@
+{
+  "https://github.com/DataTables/DataTables.git": {
+    "1.10.15": "1.10.15"
+  },
+  "https://github.com/components/ember-data.git": {
+    "2.1.0": "d8b4d3092f67afe22d9d374c40d719d557915fa3"
+  },
+  "https://github.com/components/ember.git": {
+    "2.2.0": "49e042ca89922ed96b27488c2a98add280ae7123"
+  },
+  "https://github.com/components/jqueryui.git": {
+    "1.11.4": "c34f8dbf3ba57b3784b93f26119f436c0e8288e1"
+  },
+  "https://github.com/dockyard/ember-qunit-notifications.git": {
+    "0.1.0": "a83277aa7a1c0545c66e6d133caebb9a620e71ad"
+  },
+  "https://github.com/dockyard/qunit-notifications.git": {
+    "0.1.1": "7a13f6dba5a340e1cb9e0b64c1c711e4d7edaca1"
+  },
+  "https://github.com/ember-cli/ember-cli-shims.git": {
+    "0.0.6": "dcab43b58d5698690050bb9a46ead5c8663c7da1"
+  },
+  "https://github.com/ember-cli/ember-cli-test-loader.git": {
+    "0.2.1": "3348d801089279296c38f31ae14d9c4d115ce154"
+  },
+  "https://github.com/ember-cli/ember-load-initializers.git": {
+    "0.1.7": "7bb21488563bd1bba23e903a812bf5815beddd1a"
+  },
+  "https://github.com/fgnass/spin.js.git": {
+    "2.3.2": "2.3.2"
+  },
+  "https://github.com/ivaynberg/select2.git": {
+    "4.0.0": "4.0.0"
+  },
+  "https://github.com/jquery/jquery-dist.git": {
+    "2.1.4": "7751e69b615c6eca6f783a81e292a55725af6b85"
+  },
+  "https://github.com/jquery/qunit.git": {
+    "1.19.0": "467e7e34652ad7d5883ce9c568461cf8c5e172a8"
+  },
+  "https://github.com/mbostock-bower/d3-bower.git": {
+    "3.5.17": "3.5.17"
+  },
+  "https://github.com/moment/moment-timezone.git": {
+    "0.5.0": "74a2e9378ecf4a31a168f3049f086565c8d66814"
+  },
+  "https://github.com/moment/moment.git": {
+    "2.10.6": "2.10.6",
+    "2.12.0": "d3d7488b4d60632854181cb0a9af325d57fb3d51"
+  },
+  "https://github.com/rwjblue/ember-qunit-builds.git": {
+    "0.4.16": "142c4066a5458bef9dfcb92b70152b9c01d79188"
+  },
+  "https://github.com/sreenaths/more-js.git": {
+    "0.8.2": "0.8.2"
+  },
+  "https://github.com/sreenaths/snippet-ss.git": {
+    "1.11.0": "c1abc566f4e001b7f1939b6dbdd911eadc969cf9"
+  },
+  "https://github.com/stefanpenner/loader.js.git": {
+    "3.3.0": "ac909550c9544325632542bbea97531cc60bc628"
+  },
+  "https://github.com/twbs/bootstrap.git": {
+    "3.3.6": "81df608a40bf0629a1dc08e584849bb1e43e0b7a"
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index b75a2e9..4799f92 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -21,8 +21,22 @@ var Funnel = require("broccoli-funnel");
 var EmberApp = require('ember-cli/lib/broccoli/ember-app');
 
 module.exports = function(defaults) {
+  var isProd = EmberApp.env() === 'production';
   var app = new EmberApp(defaults, {
-    hinting: true
+    storeConfigInMeta: false,
+    minifyCSS: {
+      enabled: isProd
+    },
+    minifyJS: {
+      // Will be minified by wro4j-maven-plugin for performance
+      enabled: false,
+    },
+    fingerprint: {
+      enabled: false
+    },
+    sourcemaps: {
+      enabled: !isProd
+    }
   });
 
   app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4fba3d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
index 2964d33..2830be3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package.json
@@ -10,7 +10,6 @@
   "scripts": {
     "build": "TMPDIR=tmp node ./node_modules/ember-cli/bin/ember build",
     "start": "TMPDIR=tmp node ./node_modules/ember-cli/bin/ember server",
-
     "build:mvn": "TMPDIR=tmp node/node ./node_modules/ember-cli/bin/ember build -prod"
   },
   "repository": "",
@@ -21,12 +20,13 @@
   "license": "Apache",
   "devDependencies": {
     "bower": "1.7.7",
+    "bower-shrinkwrap-resolver-ext": "0.1.0",
     "broccoli-asset-rev": "2.4.2",
     "broccoli-funnel": "1.0.1",
     "broccoli-merge-trees": "1.1.1",
     "ember-array-contains-helper": "1.0.2",
     "ember-bootstrap": "0.5.1",
-    "ember-cli": "^1.13.13",
+    "ember-cli": "1.13.14",
     "ember-cli-app-version": "1.0.0",
     "ember-cli-babel": "5.1.6",
     "ember-cli-content-security-policy": "0.4.0",
@@ -49,8 +49,10 @@
     "ember-lodash": "0.0.10",
     "ember-resolver": "2.0.3",
     "ember-spin-spinner": "0.2.3",
-    "ember-truth-helpers": "1.2.0",
-    "select2": "4.0.0"
+    "ember-truth-helpers": "1.3.0",
+    "loader.js": "4.2.3",
+    "select2": "4.0.0",
+    "testem": "0.9.11"
   },
   "dependencies": {
     "em-helpers": "^0.8.0",


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/50] [abbrv] hadoop git commit: HADOOP-13921. Remove log4j classes from JobConf.

Posted by xy...@apache.org.
HADOOP-13921. Remove log4j classes from JobConf.

Signed-off-by: Akira Ajisaka <aa...@apache.org>


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9d9e56c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9d9e56c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9d9e56c3

Branch: refs/heads/HDFS-7240
Commit: 9d9e56c39f848719814d1f25db726c0e9608c89f
Parents: 0618f49
Author: Sean Busbey <bu...@cloudera.com>
Authored: Thu May 4 11:16:25 2017 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 hadoop-client-modules/hadoop-client-runtime/pom.xml           | 1 -
 .../main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java | 4 ++--
 .../src/main/java/org/apache/hadoop/mapred/JobConf.java       | 7 +++----
 3 files changed, 5 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d9e56c3/hadoop-client-modules/hadoop-client-runtime/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index dc0f005..3c8364c 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -96,7 +96,6 @@
     </dependency>
     <!-- Move log4j to optional, since it is needed for some pieces folks might not use:
          * one of the three custom log4j appenders we have
-         * JobConf (?!) (so essentially any user of MapReduce)
       -->
     <dependency>
       <groupId>log4j</groupId>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d9e56c3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 5fd66ac..a43da65 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -650,12 +650,12 @@ public class MRApps extends Apps {
     if (isMap) {
       return conf.get(
           MRJobConfig.MAP_LOG_LEVEL,
-          JobConf.DEFAULT_LOG_LEVEL.toString()
+          JobConf.DEFAULT_LOG_LEVEL
       );
     } else {
       return conf.get(
           MRJobConfig.REDUCE_LOG_LEVEL,
-          JobConf.DEFAULT_LOG_LEVEL.toString()
+          JobConf.DEFAULT_LOG_LEVEL
       );
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9d9e56c3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index f286a96..be8fa9e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.util.ClassUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Tool;
-import org.apache.log4j.Level;
 
 /** 
  * A map/reduce job configuration.
@@ -333,7 +332,7 @@ public class JobConf extends Configuration {
   private Credentials credentials = new Credentials();
   
   /**
-   * Configuration key to set the logging {@link Level} for the map task.
+   * Configuration key to set the logging level for the map task.
    *
    * The allowed logging levels are:
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -342,7 +341,7 @@ public class JobConf extends Configuration {
     JobContext.MAP_LOG_LEVEL;
   
   /**
-   * Configuration key to set the logging {@link Level} for the reduce task.
+   * Configuration key to set the logging level for the reduce task.
    *
    * The allowed logging levels are:
    * OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
@@ -353,7 +352,7 @@ public class JobConf extends Configuration {
   /**
    * Default logging level for map/reduce tasks.
    */
-  public static final Level DEFAULT_LOG_LEVEL = Level.INFO;
+  public static final String DEFAULT_LOG_LEVEL = JobContext.DEFAULT_LOG_LEVEL;
 
   /**
    * The variable is kept for M/R 1.x applications, M/R 2.x applications should


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/50] [abbrv] hadoop git commit: HADOOP-14460. Azure: update doc for live and contract tests. Contributed by Mingliang Liu

Posted by xy...@apache.org.
HADOOP-14460. Azure: update doc for live and contract tests. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c6f22d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c6f22d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c6f22d6

Branch: refs/heads/HDFS-7240
Commit: 5c6f22d62ea9e6fbe4e5411d5934958fcbf15dac
Parents: 93f2aaf
Author: Mingliang Liu <li...@apache.org>
Authored: Wed May 31 14:55:39 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../hadoop-azure/src/site/markdown/index.md     | 90 ++++++++++++--------
 1 file changed, 56 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c6f22d6/hadoop-tools/hadoop-azure/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/site/markdown/index.md b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
index 1d1274b..1dca3b9 100644
--- a/hadoop-tools/hadoop-azure/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure/src/site/markdown/index.md
@@ -396,42 +396,64 @@ To resolve this, restart the Azure Emulator.  Ensure it v3.2 or later.
 
 It's also possible to run tests against a live Azure Storage account by saving a
 file to `src/test/resources/azure-auth-keys.xml` and setting
-`fs.azure.test.account.name` to the name of the storage account.
+the name of the storage account and its access key.
 
 For example:
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.azure.test.account.name</name>
+    <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+</configuration>
+```
 
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-        <name>fs.azure.account.key.youraccount.blob.core.windows.net</name>
-        <value>YOUR ACCESS KEY</value>
-      </property>
-
-      <property>
-        <name>fs.azure.test.account.name</name>
-        <value>youraccount</value>
-      </property>
-    </configuration>
-
-To run contract tests add live Azure Storage account by saving a
-file to `src/test/resources/azure-auth-keys.xml`.
-For example:
+To run contract tests, set the WASB file system URI in `src/test/resources/azure-auth-keys.xml`
+and the account access key. For example:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.contract.test.fs.wasb</name>
+    <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+    <description>The name of the azure file system for testing.</description>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+</configuration>
+```
 
-    <?xml version="1.0"?>
-    <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-    <configuration>
-      <property>
-        <name>fs.contract.test.fs.wasb</name>
-        <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
-        <description>The name of the azure file system for testing.</description>
-      </property>
-
-      <property>
-        <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
-        <value>{ACCOUNTKEY}</value>
-      </property>
-    </configuration>
-
-DO NOT ADD azure-auth-keys.xml TO REVISION CONTROL.  The keys to your Azure
+Overall, to run all the tests using `mvn test`,  a sample `azure-auth-keys.xml` is like following:
+
+```xml
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<configuration>
+  <property>
+    <name>fs.azure.test.account.name</name>
+    <value>{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+  <property>
+    <name>fs.azure.account.key.{ACCOUNTNAME}.blob.core.windows.net</name>
+    <value>{ACCOUNT ACCESS KEY}</value>
+  </property>
+  <property>
+    <name>fs.contract.test.fs.wasb</name>
+    <value>wasb://{CONTAINERNAME}@{ACCOUNTNAME}.blob.core.windows.net</value>
+  </property>
+</configuration>
+```
+
+DO NOT ADD `azure-auth-keys.xml` TO REVISION CONTROL.  The keys to your Azure
 Storage account are a secret and must not be shared.
+


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/50] [abbrv] hadoop git commit: HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.

Posted by xy...@apache.org.
HDFS-11659. TestDataNodeHotSwapVolumes.testRemoveVolumeBeingWritten fail due to no DataNode available for pipeline recovery. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/611d452b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/611d452b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/611d452b

Branch: refs/heads/HDFS-7240
Commit: 611d452bcf217c680ac7169848311c06d9ec0a23
Parents: 71c34c7
Author: Lei Xu <le...@apache.org>
Authored: Tue May 30 11:09:03 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:48 2017 -0700

----------------------------------------------------------------------
 .../datanode/TestDataNodeHotSwapVolumes.java    | 34 ++++++++++++++++++--
 1 file changed, 32 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/611d452b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2d87614..9d140a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -97,6 +97,7 @@ public class TestDataNodeHotSwapVolumes {
   private static final int BLOCK_SIZE = 512;
   private static final int DEFAULT_STORAGES_PER_DATANODE = 2;
   private MiniDFSCluster cluster;
+  private Configuration conf;
 
   @After
   public void tearDown() {
@@ -111,7 +112,7 @@ public class TestDataNodeHotSwapVolumes {
   private void startDFSCluster(int numNameNodes, int numDataNodes,
       int storagePerDataNode) throws IOException {
     shutdown();
-    Configuration conf = new Configuration();
+    conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
 
     /*
@@ -756,7 +757,7 @@ public class TestDataNodeHotSwapVolumes {
     }
   }
 
-  @Test(timeout=180000)
+  @Test(timeout=600000)
   public void testRemoveVolumeBeingWritten()
       throws InterruptedException, TimeoutException, ReconfigurationException,
       IOException, BrokenBarrierException {
@@ -848,6 +849,9 @@ public class TestDataNodeHotSwapVolumes {
           1, fsVolumeReferences.size());
     }
 
+    // Add a new DataNode to help with the pipeline recover.
+    cluster.startDataNodes(conf, 1, true, null, null, null);
+
     // Verify the file has sufficient replications.
     DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
     // Read the content back
@@ -857,6 +861,32 @@ public class TestDataNodeHotSwapVolumes {
     if (!exceptions.isEmpty()) {
       throw new IOException(exceptions.get(0).getCause());
     }
+
+    // Write more files to make sure that the DataNode that has removed volume
+    // is still alive to receive data.
+    for (int i = 0; i < 10; i++) {
+      final Path file = new Path("/after-" + i);
+      try (FSDataOutputStream fout = fs.create(file, REPLICATION)) {
+        rb.nextBytes(writeBuf);
+        fout.write(writeBuf);
+      }
+    }
+
+    try (FsDatasetSpi.FsVolumeReferences fsVolumeReferences = fsDatasetSpi
+        .getFsVolumeReferences()) {
+      assertEquals("Volume remove wasn't successful.",
+          1, fsVolumeReferences.size());
+      FsVolumeSpi volume = fsVolumeReferences.get(0);
+      String bpid = cluster.getNamesystem().getBlockPoolId();
+      FsVolumeSpi.BlockIterator blkIter = volume.newBlockIterator(bpid, "test");
+      int blockCount = 0;
+      while (!blkIter.atEnd()) {
+        blkIter.nextBlock();
+        blockCount++;
+      }
+      assertTrue(String.format("DataNode(%d) should have more than 1 blocks",
+          dataNodeIdx), blockCount > 1);
+    }
   }
 
   @Test(timeout=60000)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/50] [abbrv] hadoop git commit: HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.

Posted by xy...@apache.org.
HDFS-5042. Completed files lost after power failure. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a81e70c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a81e70c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a81e70c

Branch: refs/heads/HDFS-7240
Commit: 5a81e70c448cf9674323ed220c758726d51a1aec
Parents: fee8342
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed May 31 10:55:03 2017 -0500
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/io/IOUtils.java | 55 +++++++++++++++++++-
 .../hdfs/server/datanode/BlockReceiver.java     |  9 +++-
 .../hdfs/server/datanode/FileIoProvider.java    | 19 ++++++-
 .../hdfs/server/datanode/LocalReplica.java      | 13 +++++
 .../server/datanode/fsdataset/FsDatasetSpi.java |  4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 31 ++++++++---
 .../server/datanode/SimulatedFSDataset.java     |  3 +-
 .../datanode/TestDataNodeHotSwapVolumes.java    |  6 ++-
 .../server/datanode/TestSimulatedFSDataset.java |  4 +-
 .../extdataset/ExternalDatasetImpl.java         |  3 +-
 .../fsdataset/impl/TestFsDatasetImpl.java       |  2 +-
 11 files changed, 130 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 0d2e797..ee7264b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream;
 import java.nio.file.DirectoryIteratorException;
 import java.nio.file.Files;
 import java.nio.file.Path;
+import java.nio.file.StandardOpenOption;
 import java.util.ArrayList;
 import java.util.List;
 
@@ -36,7 +37,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ChunkedArrayList;
+import org.apache.hadoop.util.Shell;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -357,4 +358,56 @@ public class IOUtils {
     }
     return list;
   }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.<br>
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param fileToSync the file to fsync
+   */
+  public static void fsync(File fileToSync) throws IOException {
+    if (!fileToSync.exists()) {
+      throw new FileNotFoundException(
+          "File/Directory " + fileToSync.getAbsolutePath() + " does not exist");
+    }
+    boolean isDir = fileToSync.isDirectory();
+    // If the file is a directory we have to open read-only, for regular files
+    // we must open r/w for the fsync to have an effect. See
+    // http://blog.httrack.com/blog/2013/11/15/
+    // everything-you-always-wanted-to-know-about-fsync/
+    try(FileChannel channel = FileChannel.open(fileToSync.toPath(),
+        isDir ? StandardOpenOption.READ : StandardOpenOption.WRITE)){
+      fsync(channel, isDir);
+    }
+  }
+
+  /**
+   * Ensure that any writes to the given file is written to the storage device
+   * that contains it. This method opens channel on given File and closes it
+   * once the sync is done.
+   * Borrowed from Uwe Schindler in LUCENE-5588
+   * @param channel Channel to sync
+   * @param isDir if true, the given file is a directory (Channel should be
+   *          opened for read and ignore IOExceptions, because not all file
+   *          systems and operating systems allow to fsync on a directory)
+   * @throws IOException
+   */
+  public static void fsync(FileChannel channel, boolean isDir)
+      throws IOException {
+    try {
+      channel.force(true);
+    } catch (IOException ioe) {
+      if (isDir) {
+        assert !(Shell.LINUX
+            || Shell.MAC) : "On Linux and MacOSX fsyncing a directory"
+                + " should not throw IOException, we just don't want to rely"
+                + " on that in production (undocumented)" + ". Got: " + ioe;
+        // Ignore exception if it is a directory
+        return;
+      }
+      // Throw original exception
+      throw ioe;
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index a0e646d..0077700 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -125,6 +125,7 @@ class BlockReceiver implements Closeable {
   private boolean isPenultimateNode = false;
 
   private boolean syncOnClose;
+  private volatile boolean dirSyncOnFinalize;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -547,6 +548,9 @@ class BlockReceiver implements Closeable {
     // avoid double sync'ing on close
     if (syncBlock && lastPacketInBlock) {
       this.syncOnClose = false;
+      // sync directory for finalize irrespective of syncOnClose config since
+      // sync is requested.
+      this.dirSyncOnFinalize = true;
     }
 
     // update received bytes
@@ -937,6 +941,7 @@ class BlockReceiver implements Closeable {
       boolean isReplaceBlock) throws IOException {
 
     syncOnClose = datanode.getDnConf().syncOnClose;
+    dirSyncOnFinalize = syncOnClose;
     boolean responderClosed = false;
     mirrorOut = mirrOut;
     mirrorAddr = mirrAddr;
@@ -979,7 +984,7 @@ class BlockReceiver implements Closeable {
           } else {
             // for isDatnode or TRANSFER_FINALIZED
             // Finalize the block.
-            datanode.data.finalizeBlock(block);
+            datanode.data.finalizeBlock(block, dirSyncOnFinalize);
           }
         }
         datanode.metrics.incrBlocksWritten();
@@ -1502,7 +1507,7 @@ class BlockReceiver implements Closeable {
         BlockReceiver.this.close();
         endTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
         block.setNumBytes(replicaInfo.getNumBytes());
-        datanode.data.finalizeBlock(block);
+        datanode.data.finalizeBlock(block, dirSyncOnFinalize);
       }
 
       if (pinning) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
index 694eadd..b8e08d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java
@@ -149,7 +149,24 @@ public class FileIoProvider {
     final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
     try {
       faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
-      fos.getChannel().force(true);
+      IOUtils.fsync(fos.getChannel(), false);
+      profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
+    } catch (Exception e) {
+      onFailure(volume, begin);
+      throw e;
+    }
+  }
+
+  /**
+   * Sync the given directory changes to durable device.
+   * @throws IOException
+   */
+  public void dirSync(@Nullable FsVolumeSpi volume, File dir)
+      throws IOException {
+    final long begin = profilingEventHook.beforeFileIo(volume, SYNC, 0);
+    try {
+      faultInjectorEventHook.beforeFileIo(volume, SYNC, 0);
+      IOUtils.fsync(dir);
       profilingEventHook.afterFileIo(volume, SYNC, begin, 0);
     } catch (Exception e) {
       onFailure(volume, begin);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
index 1d46ddd..2c5af11 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/LocalReplica.java
@@ -510,4 +510,17 @@ abstract public class LocalReplica extends ReplicaInfo {
       metaRAF.write(b, 0, checksumsize);
     }
   }
+
+  /**
+   * Sync the parent directory changes to durable device.
+   * @throws IOException
+   */
+  public void fsyncDirectory() throws IOException {
+    File dir = getDir();
+    try {
+      getFileIoProvider().dirSync(getVolume(), getDir());
+    } catch (IOException e) {
+      throw new IOException("Failed to sync " + dir, e);
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index fd3af5d..7be42e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -394,12 +394,14 @@ public interface FsDatasetSpi<V extends FsVolumeSpi> extends FSDatasetMBean {
    * Finalizes the block previously opened for writing using writeToBlock.
    * The block size is what is in the parameter b and it must match the amount
    *  of data written
+   * @param block Block to be finalized
+   * @param fsyncDir whether to sync the directory changes to durable device.
    * @throws IOException
    * @throws ReplicaNotFoundException if the replica can not be found when the
    * block is been finalized. For instance, the block resides on an HDFS volume
    * that has been removed.
    */
-  void finalizeBlock(ExtendedBlock b) throws IOException;
+  void finalizeBlock(ExtendedBlock b, boolean fsyncDir) throws IOException;
 
   /**
    * Unfinalizes the block previously opened for writing using writeToBlock.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index eb4455b..11835a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.server.datanode.FileIoProvider;
+import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
+import org.apache.hadoop.hdfs.server.datanode.LocalReplica;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -987,7 +989,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         replicaInfo, smallBufferSize, conf);
 
     // Finalize the copied files
-    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
+    newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo,
+        false);
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       // Increment numBlocks here as this block moved without knowing to BPS
       FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
@@ -1290,7 +1293,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
           replicaInfo.bumpReplicaGS(newGS);
           // finalize the replica if RBW
           if (replicaInfo.getState() == ReplicaState.RBW) {
-            finalizeReplica(b.getBlockPoolId(), replicaInfo);
+            finalizeReplica(b.getBlockPoolId(), replicaInfo, false);
           }
           return replicaInfo;
         }
@@ -1604,7 +1607,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * Complete the block write!
    */
   @Override // FsDatasetSpi
-  public void finalizeBlock(ExtendedBlock b) throws IOException {
+  public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       if (Thread.interrupted()) {
         // Don't allow data modifications from interrupted threads
@@ -1616,12 +1620,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         // been opened for append but never modified
         return;
       }
-      finalizeReplica(b.getBlockPoolId(), replicaInfo);
+      finalizeReplica(b.getBlockPoolId(), replicaInfo, fsyncDir);
     }
   }
 
   private ReplicaInfo finalizeReplica(String bpid,
-      ReplicaInfo replicaInfo) throws IOException {
+      ReplicaInfo replicaInfo, boolean fsyncDir) throws IOException {
     try (AutoCloseableLock lock = datasetLock.acquire()) {
       ReplicaInfo newReplicaInfo = null;
       if (replicaInfo.getState() == ReplicaState.RUR &&
@@ -1636,6 +1640,19 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
         newReplicaInfo = v.addFinalizedBlock(
             bpid, replicaInfo, replicaInfo, replicaInfo.getBytesReserved());
+        /*
+         * Sync the directory after rename from tmp/rbw to Finalized if
+         * configured. Though rename should be atomic operation, sync on both
+         * dest and src directories are done because IOUtils.fsync() calls
+         * directory's channel sync, not the journal itself.
+         */
+        if (fsyncDir && newReplicaInfo instanceof FinalizedReplica
+            && replicaInfo instanceof LocalReplica) {
+          FinalizedReplica finalizedReplica = (FinalizedReplica) newReplicaInfo;
+          finalizedReplica.fsyncDirectory();
+          LocalReplica localReplica = (LocalReplica) replicaInfo;
+          localReplica.fsyncDirectory();
+        }
         if (v.isTransientStorage()) {
           releaseLockedMemory(
               replicaInfo.getOriginalBytesReserved()
@@ -2601,11 +2618,11 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
 
         newReplicaInfo.setNumBytes(newlength);
         volumeMap.add(bpid, newReplicaInfo.getReplicaInfo());
-        finalizeReplica(bpid, newReplicaInfo.getReplicaInfo());
+        finalizeReplica(bpid, newReplicaInfo.getReplicaInfo(), false);
       }
     }
     // finalize the block
-    return finalizeReplica(bpid, rur);
+    return finalizeReplica(bpid, rur, false);
   }
 
   @Override // FsDatasetSpi

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index afa7a82..212f953 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -673,7 +673,8 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   }
 
   @Override // FsDatasetSpi
-  public synchronized void finalizeBlock(ExtendedBlock b) throws IOException {
+  public synchronized void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
     final Map<Block, BInfo> map = getMap(b.getBlockPoolId());
     BInfo binfo = map.get(b.getLocalBlock());
     if (binfo == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 9d140a1..0c0440d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -802,10 +802,12 @@ public class TestDataNodeHotSwapVolumes {
             // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
             // the block is not removed, since the volume reference should not
             // be released at this point.
-            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
+            data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0],
+              (boolean) invocation.getArguments()[1]);
             return null;
           }
-        }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));
+        }).when(dn.data).finalizeBlock(any(ExtendedBlock.class),
+            Mockito.anyBoolean());
 
     final CyclicBarrier barrier = new CyclicBarrier(2);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
index 469e249b..4775fc7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
@@ -96,7 +96,7 @@ public class TestSimulatedFSDataset {
         out.close();
       }
       b.setNumBytes(blockIdToLen(i));
-      fsdataset.finalizeBlock(b);
+      fsdataset.finalizeBlock(b, false);
       assertEquals(blockIdToLen(i), fsdataset.getLength(b));
     }
     return bytesAdded;
@@ -295,7 +295,7 @@ public class TestSimulatedFSDataset {
     }
     
     try {
-      fsdataset.finalizeBlock(b);
+      fsdataset.finalizeBlock(b, false);
       assertTrue("Expected an IO exception", false);
     } catch (IOException e) {
       // ok - as expected

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
index d14bd72..13502d9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalDatasetImpl.java
@@ -180,7 +180,8 @@ public class ExternalDatasetImpl implements FsDatasetSpi<ExternalVolumeImpl> {
   }
 
   @Override
-  public void finalizeBlock(ExtendedBlock b) throws IOException {
+  public void finalizeBlock(ExtendedBlock b, boolean fsyncDir)
+      throws IOException {
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a81e70c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 3293561..2a3bf79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -582,7 +582,7 @@ public class TestFsDatasetImpl {
           // Lets wait for the other thread finish getting block report
           blockReportReceivedLatch.await();
 
-          dataset.finalizeBlock(eb);
+          dataset.finalizeBlock(eb, false);
           LOG.info("FinalizeBlock finished");
         } catch (Exception e) {
           LOG.warn("Exception caught. This should not affect the test", e);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[41/50] [abbrv] hadoop git commit: MAPREDUCE-6676. NNBench should Throw IOException when rename and delete fails. Contributed by Brahma Reddy Battula.

Posted by xy...@apache.org.
MAPREDUCE-6676. NNBench should Throw IOException when rename and delete fails. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bea02d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bea02d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bea02d22

Branch: refs/heads/HDFS-7240
Commit: bea02d229cfdc54a29db3d9689c07fe5b5447e80
Parents: 0887355
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Wed Jun 7 16:34:47 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../src/test/java/org/apache/hadoop/hdfs/NNBench.java     | 10 ++++++++--
 1 file changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bea02d22/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
index ee3cc00..29eac43 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
@@ -868,7 +868,10 @@ public class NNBench extends Configured implements Tool {
           try {
             // Set up timer for measuring AL
             startTimeAL = System.currentTimeMillis();
-            filesystem.rename(filePath, filePathR);
+            boolean result = filesystem.rename(filePath, filePathR);
+            if (!result) {
+              throw new IOException("rename failed for " + filePath);
+            }
             totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
             
             successfulOp = true;
@@ -901,7 +904,10 @@ public class NNBench extends Configured implements Tool {
           try {
             // Set up timer for measuring AL
             startTimeAL = System.currentTimeMillis();
-            filesystem.delete(filePath, true);
+            boolean result = filesystem.delete(filePath, true);
+            if (!result) {
+              throw new IOException("delete failed for " + filePath);
+            }
             totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
             
             successfulOp = true;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/50] [abbrv] hadoop git commit: HADOOP-9849. License information is missing for native CRC32 code (Contributed by Andrew Wang via Daniel Templeton)

Posted by xy...@apache.org.
HADOOP-9849. License information is missing for native CRC32 code
(Contributed by Andrew Wang via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/83b97f8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/83b97f8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/83b97f8a

Branch: refs/heads/HDFS-7240
Commit: 83b97f8a598944de0e90aab435a38e676e4393b3
Parents: dd7b6fb
Author: Daniel Templeton <te...@apache.org>
Authored: Wed May 31 15:57:48 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 LICENSE.txt | 47 ++++++++++++++++++++++++++++++++++++++++++-----
 1 file changed, 42 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/83b97f8a/LICENSE.txt
----------------------------------------------------------------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 969708f..5391fd5 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -246,11 +246,48 @@ For the org.apache.hadoop.util.bloom.* classes:
 For portions of the native implementation of slicing-by-8 CRC calculation
 in src/main/native/src/org/apache/hadoop/util:
 
-/**
- *   Copyright 2008,2009,2010 Massachusetts Institute of Technology.
- *   All rights reserved. Use of this source code is governed by a
- *   BSD-style license that can be found in the LICENSE file.
- */
+Copyright (c) 2008,2009,2010 Massachusetts Institute of Technology.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Massachusetts Institute of Technology nor
+  the names of its contributors may be used to endorse or promote
+  products derived from this software without specific prior written
+  permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other portions are under the same license from Intel:
+http://sourceforge.net/projects/slicing-by-8/
+/*++
+ *
+ * Copyright (c) 2004-2006 Intel Corporation - All Rights Reserved
+ *
+ * This software program is licensed subject to the BSD License, 
+ * available at http://www.opensource.org/licenses/bsd-license.html
+ *
+ * Abstract: The main routine
+ * 
+ --*/
 
 For src/main/native/src/org/apache/hadoop/io/compress/lz4/{lz4.h,lz4.c,lz4hc.h,lz4hc.c},
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/50] [abbrv] hadoop git commit: YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.

Posted by xy...@apache.org.
YARN-6366. Refactor the NodeManager DeletionService to support additional DeletionTask types. Contributed by Shane Kumpf.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c3685a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c3685a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c3685a4

Branch: refs/heads/HDFS-7240
Commit: 3c3685a4b86b263fcf716048523d5c82582107b3
Parents: 5511c4e
Author: Varun Vasudev <vv...@apache.org>
Authored: Wed May 31 16:15:35 2017 +0530
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:48 2017 -0700

----------------------------------------------------------------------
 .../server/nodemanager/DeletionService.java     | 468 ++++---------------
 .../nodemanager/api/impl/pb/NMProtoUtils.java   | 110 +++++
 .../nodemanager/api/impl/pb/package-info.java   |  25 +
 .../recovery/DeletionTaskRecoveryInfo.java      |  73 +++
 .../deletion/recovery/package-info.java         |  25 +
 .../deletion/task/DeletionTask.java             | 258 ++++++++++
 .../deletion/task/DeletionTaskType.java         |  24 +
 .../deletion/task/FileDeletionTask.java         | 202 ++++++++
 .../deletion/task/package-info.java             |  25 +
 .../localizer/LocalResourcesTrackerImpl.java    |  13 +-
 .../localizer/ResourceLocalizationService.java  |  40 +-
 .../logaggregation/AppLogAggregatorImpl.java    |  60 ++-
 .../loghandler/NonAggregatingLogHandler.java    |   7 +-
 .../yarn_server_nodemanager_recovery.proto      |   1 +
 .../server/nodemanager/TestDeletionService.java |  57 ++-
 .../nodemanager/TestNodeManagerReboot.java      |  99 +---
 .../api/impl/pb/TestNMProtoUtils.java           |  91 ++++
 .../BaseContainerManagerTest.java               |   7 +-
 .../deletion/task/FileDeletionMatcher.java      |  84 ++++
 .../deletion/task/TestFileDeletionTask.java     |  85 ++++
 .../TestLocalResourcesTrackerImpl.java          |   5 +-
 .../TestResourceLocalizationService.java        |  33 +-
 .../TestAppLogAggregatorImpl.java               |  15 +-
 .../TestLogAggregationService.java              |  17 +-
 .../TestNonAggregatingLogHandler.java           |   8 +-
 25 files changed, 1274 insertions(+), 558 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index aac0af9..38d69a3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -21,11 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import static java.util.concurrent.TimeUnit.SECONDS;
 
 import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -38,461 +35,176 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
-import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+import org.apache.hadoop.yarn.server.nodemanager.api.impl.pb.NMProtoUtils;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMNullStateStoreService;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
-import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredDeletionServiceState;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 public class DeletionService extends AbstractService {
-  static final Log LOG = LogFactory.getLog(DeletionService.class);
+
+  private static final Log LOG = LogFactory.getLog(DeletionService.class);
+
   private int debugDelay;
-  private final ContainerExecutor exec;
-  private ScheduledThreadPoolExecutor sched;
-  private static final FileContext lfs = getLfs();
+  private final ContainerExecutor containerExecutor;
   private final NMStateStoreService stateStore;
+  private ScheduledThreadPoolExecutor sched;
   private AtomicInteger nextTaskId = new AtomicInteger(0);
 
-  static final FileContext getLfs() {
-    try {
-      return FileContext.getLocalFSFileContext();
-    } catch (UnsupportedFileSystemException e) {
-      throw new RuntimeException(e);
-    }
-  }
-
   public DeletionService(ContainerExecutor exec) {
     this(exec, new NMNullStateStoreService());
   }
 
-  public DeletionService(ContainerExecutor exec,
+  public DeletionService(ContainerExecutor containerExecutor,
       NMStateStoreService stateStore) {
     super(DeletionService.class.getName());
-    this.exec = exec;
+    this.containerExecutor = containerExecutor;
     this.debugDelay = 0;
     this.stateStore = stateStore;
   }
-  
-  /**
-   * Delete the path(s) as this user.
-   * @param user The user to delete as, or the JVM user if null
-   * @param subDir the sub directory name
-   * @param baseDirs the base directories which contains the subDir's
-   */
-  public void delete(String user, Path subDir, Path... baseDirs) {
-    // TODO if parent owned by NM, rename within parent inline
-    if (debugDelay != -1) {
-      List<Path> baseDirList = null;
-      if (baseDirs != null && baseDirs.length != 0) {
-        baseDirList = Arrays.asList(baseDirs);
-      }
-      FileDeletionTask task =
-          new FileDeletionTask(this, user, subDir, baseDirList);
-      recordDeletionTaskInStateStore(task);
-      sched.schedule(task, debugDelay, TimeUnit.SECONDS);
-    }
-  }
-  
-  public void scheduleFileDeletionTask(FileDeletionTask fileDeletionTask) {
-    if (debugDelay != -1) {
-      recordDeletionTaskInStateStore(fileDeletionTask);
-      sched.schedule(fileDeletionTask, debugDelay, TimeUnit.SECONDS);
-    }
-  }
-  
-  @Override
-  protected void serviceInit(Configuration conf) throws Exception {
-    ThreadFactory tf = new ThreadFactoryBuilder()
-      .setNameFormat("DeletionService #%d")
-      .build();
-    if (conf != null) {
-      sched = new HadoopScheduledThreadPoolExecutor(
-          conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT,
-          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf);
-      debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0);
-    } else {
-      sched = new HadoopScheduledThreadPoolExecutor(
-          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf);
-    }
-    sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
-    sched.setKeepAliveTime(60L, SECONDS);
-    if (stateStore.canRecover()) {
-      recover(stateStore.loadDeletionServiceState());
-    }
-    super.serviceInit(conf);
-  }
 
-  @Override
-  protected void serviceStop() throws Exception {
-    if (sched != null) {
-      sched.shutdown();
-      boolean terminated = false;
-      try {
-        terminated = sched.awaitTermination(10, SECONDS);
-      } catch (InterruptedException e) {
-      }
-      if (terminated != true) {
-        sched.shutdownNow();
-      }
-    }
-    super.serviceStop();
+  public int getDebugDelay() {
+    return debugDelay;
   }
 
-  /**
-   * Determine if the service has completely stopped.
-   * Used only by unit tests
-   * @return true if service has completely stopped
-   */
-  @Private
-  public boolean isTerminated() {
-    return getServiceState() == STATE.STOPPED && sched.isTerminated();
+  public ContainerExecutor getContainerExecutor() {
+    return containerExecutor;
   }
 
-  public static class FileDeletionTask implements Runnable {
-    public static final int INVALID_TASK_ID = -1;
-    private int taskId;
-    private final String user;
-    private final Path subDir;
-    private final List<Path> baseDirs;
-    private final AtomicInteger numberOfPendingPredecessorTasks;
-    private final Set<FileDeletionTask> successorTaskSet;
-    private final DeletionService delService;
-    // By default all tasks will start as success=true; however if any of
-    // the dependent task fails then it will be marked as false in
-    // fileDeletionTaskFinished().
-    private boolean success;
-    
-    private FileDeletionTask(DeletionService delService, String user,
-        Path subDir, List<Path> baseDirs) {
-      this(INVALID_TASK_ID, delService, user, subDir, baseDirs);
-    }
-
-    private FileDeletionTask(int taskId, DeletionService delService,
-        String user, Path subDir, List<Path> baseDirs) {
-      this.taskId = taskId;
-      this.delService = delService;
-      this.user = user;
-      this.subDir = subDir;
-      this.baseDirs = baseDirs;
-      this.successorTaskSet = new HashSet<FileDeletionTask>();
-      this.numberOfPendingPredecessorTasks = new AtomicInteger(0);
-      success = true;
-    }
-    
-    /**
-     * increments and returns pending predecessor task count
-     */
-    public int incrementAndGetPendingPredecessorTasks() {
-      return numberOfPendingPredecessorTasks.incrementAndGet();
-    }
-    
-    /**
-     * decrements and returns pending predecessor task count
-     */
-    public int decrementAndGetPendingPredecessorTasks() {
-      return numberOfPendingPredecessorTasks.decrementAndGet();
-    }
-    
-    @VisibleForTesting
-    public String getUser() {
-      return this.user;
-    }
-    
-    @VisibleForTesting
-    public Path getSubDir() {
-      return this.subDir;
-    }
-    
-    @VisibleForTesting
-    public List<Path> getBaseDirs() {
-      return this.baseDirs;
-    }
-    
-    public synchronized void setSuccess(boolean success) {
-      this.success = success;
-    }
-    
-    public synchronized boolean getSucess() {
-      return this.success;
-    }
-    
-    public synchronized FileDeletionTask[] getSuccessorTasks() {
-      FileDeletionTask[] successors =
-          new FileDeletionTask[successorTaskSet.size()];
-      return successorTaskSet.toArray(successors);
-    }
+  public NMStateStoreService getStateStore() {
+    return stateStore;
+  }
 
-    @Override
-    public void run() {
+  public void delete(DeletionTask deletionTask) {
+    if (debugDelay != -1) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(this);
-      }
-      boolean error = false;
-      if (null == user) {
-        if (baseDirs == null || baseDirs.size() == 0) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("NM deleting absolute path : " + subDir);
-          }
-          try {
-            lfs.delete(subDir, true);
-          } catch (IOException e) {
-            error = true;
-            LOG.warn("Failed to delete " + subDir);
-          }
-        } else {
-          for (Path baseDir : baseDirs) {
-            Path del = subDir == null? baseDir : new Path(baseDir, subDir);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("NM deleting path : " + del);
-            }
-            try {
-              lfs.delete(del, true);
-            } catch (IOException e) {
-              error = true;
-              LOG.warn("Failed to delete " + subDir);
-            }
-          }
-        }
-      } else {
-        try {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Deleting path: [" + subDir + "] as user: [" + user + "]");
-          }
-          if (baseDirs == null || baseDirs.size() == 0) {
-            delService.exec.deleteAsUser(new DeletionAsUserContext.Builder()
-                .setUser(user)
-                .setSubDir(subDir)
-                .build());
-          } else {
-            delService.exec.deleteAsUser(new DeletionAsUserContext.Builder()
-                .setUser(user)
-                .setSubDir(subDir)
-                .setBasedirs(baseDirs.toArray(new Path[0]))
-                .build());
-          }
-        } catch (IOException e) {
-          error = true;
-          LOG.warn("Failed to delete as user " + user, e);
-        } catch (InterruptedException e) {
-          error = true;
-          LOG.warn("Failed to delete as user " + user, e);
-        }
-      }
-      if (error) {
-        setSuccess(!error);        
-      }
-      fileDeletionTaskFinished();
-    }
-
-    @Override
-    public String toString() {
-      StringBuffer sb = new StringBuffer("\nFileDeletionTask : ");
-      sb.append("  user : ").append(this.user);
-      sb.append("  subDir : ").append(
-        subDir == null ? "null" : subDir.toString());
-      sb.append("  baseDir : ");
-      if (baseDirs == null || baseDirs.size() == 0) {
-        sb.append("null");
-      } else {
-        for (Path baseDir : baseDirs) {
-          sb.append(baseDir.toString()).append(',');
-        }
-      }
-      return sb.toString();
-    }
-    
-    /**
-     * If there is a task dependency between say tasks 1,2,3 such that
-     * task2 and task3 can be started only after task1 then we should define
-     * task2 and task3 as successor tasks for task1.
-     * Note:- Task dependency should be defined prior to
-     * @param successorTask
-     */
-    public synchronized void addFileDeletionTaskDependency(
-        FileDeletionTask successorTask) {
-      if (successorTaskSet.add(successorTask)) {
-        successorTask.incrementAndGetPendingPredecessorTasks();
+        String msg = String.format("Scheduling DeletionTask (delay %d) : %s",
+            debugDelay, deletionTask.toString());
+        LOG.debug(msg);
       }
+      recordDeletionTaskInStateStore(deletionTask);
+      sched.schedule(deletionTask, debugDelay, TimeUnit.SECONDS);
     }
-    
-    /*
-     * This is called when
-     * 1) Current file deletion task ran and finished.
-     * 2) This can be even directly called by predecessor task if one of the
-     * dependent tasks of it has failed marking its success = false.  
-     */
-    private synchronized void fileDeletionTaskFinished() {
-      try {
-        delService.stateStore.removeDeletionTask(taskId);
-      } catch (IOException e) {
-        LOG.error("Unable to remove deletion task " + taskId
-            + " from state store", e);
-      }
-      Iterator<FileDeletionTask> successorTaskI =
-          this.successorTaskSet.iterator();
-      while (successorTaskI.hasNext()) {
-        FileDeletionTask successorTask = successorTaskI.next();
-        if (!success) {
-          successorTask.setSuccess(success);
-        }
-        int count = successorTask.decrementAndGetPendingPredecessorTasks();
-        if (count == 0) {
-          if (successorTask.getSucess()) {
-            successorTask.delService.scheduleFileDeletionTask(successorTask);
-          } else {
-            successorTask.fileDeletionTaskFinished();
-          }
-        }
-      }
-    }
-  }
-  
-  /**
-   * Helper method to create file deletion task. To be used only if we need
-   * a way to define dependencies between deletion tasks.
-   * @param user user on whose behalf this task is suppose to run
-   * @param subDir sub directory as required in 
-   * {@link DeletionService#delete(String, Path, Path...)}
-   * @param baseDirs base directories as required in
-   * {@link DeletionService#delete(String, Path, Path...)}
-   */
-  public FileDeletionTask createFileDeletionTask(String user, Path subDir,
-      Path[] baseDirs) {
-    return new FileDeletionTask(this, user, subDir, Arrays.asList(baseDirs));
   }
 
-  private void recover(RecoveredDeletionServiceState state)
+  private void recover(NMStateStoreService.RecoveredDeletionServiceState state)
       throws IOException {
     List<DeletionServiceDeleteTaskProto> taskProtos = state.getTasks();
     Map<Integer, DeletionTaskRecoveryInfo> idToInfoMap =
-        new HashMap<Integer, DeletionTaskRecoveryInfo>(taskProtos.size());
-    Set<Integer> successorTasks = new HashSet<Integer>();
+        new HashMap<>(taskProtos.size());
+    Set<Integer> successorTasks = new HashSet<>();
     for (DeletionServiceDeleteTaskProto proto : taskProtos) {
-      DeletionTaskRecoveryInfo info = parseTaskProto(proto);
-      idToInfoMap.put(info.task.taskId, info);
-      nextTaskId.set(Math.max(nextTaskId.get(), info.task.taskId));
-      successorTasks.addAll(info.successorTaskIds);
+      DeletionTaskRecoveryInfo info =
+          NMProtoUtils.convertProtoToDeletionTaskRecoveryInfo(proto, this);
+      idToInfoMap.put(info.getTask().getTaskId(), info);
+      nextTaskId.set(Math.max(nextTaskId.get(), info.getTask().getTaskId()));
+      successorTasks.addAll(info.getSuccessorTaskIds());
     }
 
     // restore the task dependencies and schedule the deletion tasks that
     // have no predecessors
     final long now = System.currentTimeMillis();
     for (DeletionTaskRecoveryInfo info : idToInfoMap.values()) {
-      for (Integer successorId : info.successorTaskIds){
+      for (Integer successorId : info.getSuccessorTaskIds()){
         DeletionTaskRecoveryInfo successor = idToInfoMap.get(successorId);
         if (successor != null) {
-          info.task.addFileDeletionTaskDependency(successor.task);
+          info.getTask().addDeletionTaskDependency(successor.getTask());
         } else {
           LOG.error("Unable to locate dependency task for deletion task "
-              + info.task.taskId + " at " + info.task.getSubDir());
+              + info.getTask().getTaskId());
         }
       }
-      if (!successorTasks.contains(info.task.taskId)) {
-        long msecTilDeletion = info.deletionTimestamp - now;
-        sched.schedule(info.task, msecTilDeletion, TimeUnit.MILLISECONDS);
+      if (!successorTasks.contains(info.getTask().getTaskId())) {
+        long msecTilDeletion = info.getDeletionTimestamp() - now;
+        sched.schedule(info.getTask(), msecTilDeletion, TimeUnit.MILLISECONDS);
       }
     }
   }
 
-  private DeletionTaskRecoveryInfo parseTaskProto(
-      DeletionServiceDeleteTaskProto proto) throws IOException {
-    int taskId = proto.getId();
-    String user = proto.hasUser() ? proto.getUser() : null;
-    Path subdir = null;
-    List<Path> basePaths = null;
-    if (proto.hasSubdir()) {
-      subdir = new Path(proto.getSubdir());
-    }
-    List<String> basedirs = proto.getBasedirsList();
-    if (basedirs != null && basedirs.size() > 0) {
-      basePaths = new ArrayList<Path>(basedirs.size());
-      for (String basedir : basedirs) {
-        basePaths.add(new Path(basedir));
-      }
-    }
-
-    FileDeletionTask task = new FileDeletionTask(taskId, this, user,
-        subdir, basePaths);
-    return new DeletionTaskRecoveryInfo(task,
-        proto.getSuccessorIdsList(),
-        proto.getDeletionTime());
-  }
-
   private int generateTaskId() {
     // get the next ID but avoid an invalid ID
     int taskId = nextTaskId.incrementAndGet();
-    while (taskId == FileDeletionTask.INVALID_TASK_ID) {
+    while (taskId == DeletionTask.INVALID_TASK_ID) {
       taskId = nextTaskId.incrementAndGet();
     }
     return taskId;
   }
 
-  private void recordDeletionTaskInStateStore(FileDeletionTask task) {
+  private void recordDeletionTaskInStateStore(DeletionTask task) {
     if (!stateStore.canRecover()) {
       // optimize the case where we aren't really recording
       return;
     }
-    if (task.taskId != FileDeletionTask.INVALID_TASK_ID) {
+    if (task.getTaskId() != DeletionTask.INVALID_TASK_ID) {
       return;  // task already recorded
     }
 
-    task.taskId = generateTaskId();
-
-    FileDeletionTask[] successors = task.getSuccessorTasks();
+    task.setTaskId(generateTaskId());
 
     // store successors first to ensure task IDs have been generated for them
-    for (FileDeletionTask successor : successors) {
+    DeletionTask[] successors = task.getSuccessorTasks();
+    for (DeletionTask successor : successors) {
       recordDeletionTaskInStateStore(successor);
     }
 
-    DeletionServiceDeleteTaskProto.Builder builder =
-        DeletionServiceDeleteTaskProto.newBuilder();
-    builder.setId(task.taskId);
-    if (task.getUser() != null) {
-      builder.setUser(task.getUser());
-    }
-    if (task.getSubDir() != null) {
-      builder.setSubdir(task.getSubDir().toString());
-    }
-    builder.setDeletionTime(System.currentTimeMillis() +
-        TimeUnit.MILLISECONDS.convert(debugDelay, TimeUnit.SECONDS));
-    if (task.getBaseDirs() != null) {
-      for (Path dir : task.getBaseDirs()) {
-        builder.addBasedirs(dir.toString());
-      }
-    }
-    for (FileDeletionTask successor : successors) {
-      builder.addSuccessorIds(successor.taskId);
-    }
-
     try {
-      stateStore.storeDeletionTask(task.taskId, builder.build());
+      stateStore.storeDeletionTask(task.getTaskId(),
+          task.convertDeletionTaskToProto());
     } catch (IOException e) {
-      LOG.error("Unable to store deletion task " + task.taskId + " for "
-          + task.getSubDir(), e);
+      LOG.error("Unable to store deletion task " + task.getTaskId(), e);
     }
   }
 
-  private static class DeletionTaskRecoveryInfo {
-    FileDeletionTask task;
-    List<Integer> successorTaskIds;
-    long deletionTimestamp;
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    ThreadFactory tf = new ThreadFactoryBuilder()
+        .setNameFormat("DeletionService #%d")
+        .build();
+    if (conf != null) {
+      sched = new HadoopScheduledThreadPoolExecutor(
+          conf.getInt(YarnConfiguration.NM_DELETE_THREAD_COUNT,
+              YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT), tf);
+      debugDelay = conf.getInt(YarnConfiguration.DEBUG_NM_DELETE_DELAY_SEC, 0);
+    } else {
+      sched = new HadoopScheduledThreadPoolExecutor(
+          YarnConfiguration.DEFAULT_NM_DELETE_THREAD_COUNT, tf);
+    }
+    sched.setExecuteExistingDelayedTasksAfterShutdownPolicy(false);
+    sched.setKeepAliveTime(60L, SECONDS);
+    if (stateStore.canRecover()) {
+      recover(stateStore.loadDeletionServiceState());
+    }
+    super.serviceInit(conf);
+  }
 
-    public DeletionTaskRecoveryInfo(FileDeletionTask task,
-        List<Integer> successorTaskIds, long deletionTimestamp) {
-      this.task = task;
-      this.successorTaskIds = successorTaskIds;
-      this.deletionTimestamp = deletionTimestamp;
+  @Override
+  public void serviceStop() throws Exception {
+    if (sched != null) {
+      sched.shutdown();
+      boolean terminated = false;
+      try {
+        terminated = sched.awaitTermination(10, SECONDS);
+      } catch (InterruptedException e) { }
+      if (!terminated) {
+        sched.shutdownNow();
+      }
     }
+    super.serviceStop();
+  }
+
+  /**
+   * Determine if the service has completely stopped.
+   * Used only by unit tests
+   * @return true if service has completely stopped
+   */
+  @Private
+  public boolean isTerminated() {
+    return getServiceState() == STATE.STOPPED && sched.isTerminated();
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
new file mode 100644
index 0000000..e47b3ee
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/NMProtoUtils.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery.DeletionTaskRecoveryInfo;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTaskType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Utilities for converting from PB representations.
+ */
+public final class NMProtoUtils {
+
+  private static final Log LOG = LogFactory.getLog(NMProtoUtils.class);
+
+  private NMProtoUtils() { }
+
+  /**
+   * Convert the Protobuf representation into a {@link DeletionTask}.
+   *
+   * @param proto             the Protobuf representation for the DeletionTask
+   * @param deletionService   the {@link DeletionService}
+   * @return the converted {@link DeletionTask}
+   */
+  public static DeletionTask convertProtoToDeletionTask(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService) {
+    int taskId = proto.getId();
+    if (proto.hasTaskType() && proto.getTaskType() != null) {
+      if (proto.getTaskType().equals(DeletionTaskType.FILE.name())) {
+        LOG.debug("Converting recovered FileDeletionTask");
+        return convertProtoToFileDeletionTask(proto, deletionService, taskId);
+      }
+    }
+    LOG.debug("Unable to get task type, trying FileDeletionTask");
+    return convertProtoToFileDeletionTask(proto, deletionService, taskId);
+  }
+
+  /**
+   * Convert the Protobuf representation into the {@link FileDeletionTask}.
+   *
+   * @param proto the Protobuf representation of the {@link FileDeletionTask}
+   * @param deletionService the {@link DeletionService}.
+   * @param taskId the ID of the {@link DeletionTask}.
+   * @return the populated {@link FileDeletionTask}.
+   */
+  public static FileDeletionTask convertProtoToFileDeletionTask(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService,
+      int taskId) {
+    String user = proto.hasUser() ? proto.getUser() : null;
+    Path subdir = null;
+    if (proto.hasSubdir()) {
+      subdir = new Path(proto.getSubdir());
+    }
+    List<Path> basePaths = null;
+    List<String> basedirs = proto.getBasedirsList();
+    if (basedirs != null && basedirs.size() > 0) {
+      basePaths = new ArrayList<>(basedirs.size());
+      for (String basedir : basedirs) {
+        basePaths.add(new Path(basedir));
+      }
+    }
+    return new FileDeletionTask(taskId, deletionService, user, subdir,
+        basePaths);
+  }
+
+  /**
+   * Convert the Protobuf representation to the {@link DeletionTaskRecoveryInfo}
+   * representation.
+   *
+   * @param proto the Protobuf representation of the {@link DeletionTask}
+   * @param deletionService the {@link DeletionService}
+   * @return the populated {@link DeletionTaskRecoveryInfo}
+   */
+  public static DeletionTaskRecoveryInfo convertProtoToDeletionTaskRecoveryInfo(
+      DeletionServiceDeleteTaskProto proto, DeletionService deletionService) {
+    DeletionTask deletionTask =
+        NMProtoUtils.convertProtoToDeletionTask(proto, deletionService);
+    List<Integer> successorTaskIds = new ArrayList<>();
+    if (proto.getSuccessorIdsList() != null &&
+        !proto.getSuccessorIdsList().isEmpty()) {
+      successorTaskIds = proto.getSuccessorIdsList();
+    }
+    long deletionTimestamp = proto.getDeletionTime();
+    return new DeletionTaskRecoveryInfo(deletionTask, successorTaskIds,
+        deletionTimestamp);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
new file mode 100644
index 0000000..006f49f
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/api/impl/pb/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing classes for working with Protobuf.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.api.impl.pb;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
new file mode 100644
index 0000000..c62ea02
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/DeletionTaskRecoveryInfo.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery;
+
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+
+import java.util.List;
+
+/**
+ * Encapsulates the recovery info needed to recover a DeletionTask from the NM
+ * state store.
+ */
+public class DeletionTaskRecoveryInfo {
+
+  private DeletionTask task;
+  private List<Integer> successorTaskIds;
+  private long deletionTimestamp;
+
+  /**
+   * Information needed for recovering the DeletionTask.
+   *
+   * @param task the DeletionTask
+   * @param successorTaskIds the dependent DeletionTasks.
+   * @param deletionTimestamp the scheduled times of deletion.
+   */
+  public DeletionTaskRecoveryInfo(DeletionTask task,
+      List<Integer> successorTaskIds, long deletionTimestamp) {
+    this.task = task;
+    this.successorTaskIds = successorTaskIds;
+    this.deletionTimestamp = deletionTimestamp;
+  }
+
+  /**
+   * Return the recovered DeletionTask.
+   *
+   * @return the recovered DeletionTask.
+   */
+  public DeletionTask getTask() {
+    return task;
+  }
+
+  /**
+   * Return all of the dependent DeletionTasks.
+   *
+   * @return the dependent DeletionTasks.
+   */
+  public List<Integer> getSuccessorTaskIds() {
+    return successorTaskIds;
+  }
+
+  /**
+   * Return the deletion timestamp.
+   *
+   * @return the deletion timestamp.
+   */
+  public long getDeletionTimestamp() {
+    return deletionTimestamp;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
new file mode 100644
index 0000000..28d7f62
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/recovery/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing classes for recovering DeletionTasks.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.recovery;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
new file mode 100644
index 0000000..635d7a9
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTask.java
@@ -0,0 +1,258 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService;
+
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+/**
+ * DeletionTasks are supplied to the {@link DeletionService} for deletion.
+ */
+public abstract class DeletionTask implements Runnable {
+
+  static final Log LOG = LogFactory.getLog(DeletionTask.class);
+
+  public static final int INVALID_TASK_ID = -1;
+
+  private int taskId;
+  private String user;
+  private DeletionTaskType deletionTaskType;
+  private DeletionService deletionService;
+  private final AtomicInteger numberOfPendingPredecessorTasks;
+  private final Set<DeletionTask> successorTaskSet;
+  // By default all tasks will start as success=true; however if any of
+  // the dependent task fails then it will be marked as false in
+  // deletionTaskFinished().
+  private boolean success;
+
+  /**
+   * Deletion task with taskId and default values.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user associated with the delete.
+   * @param deletionTaskType    the {@link DeletionTaskType}.
+   */
+  public DeletionTask(int taskId, DeletionService deletionService, String user,
+      DeletionTaskType deletionTaskType) {
+    this(taskId, deletionService, user, new AtomicInteger(0),
+        new HashSet<DeletionTask>(), deletionTaskType);
+  }
+
+  /**
+   * Deletion task with taskId and user supplied values.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user associated with the delete.
+   * @param numberOfPendingPredecessorTasks  Number of pending tasks.
+   * @param successorTaskSet    the list of successor DeletionTasks
+   * @param deletionTaskType    the {@link DeletionTaskType}.
+   */
+  public DeletionTask(int taskId, DeletionService deletionService, String user,
+      AtomicInteger numberOfPendingPredecessorTasks,
+      Set<DeletionTask> successorTaskSet, DeletionTaskType deletionTaskType) {
+    this.taskId = taskId;
+    this.deletionService = deletionService;
+    this.user = user;
+    this.numberOfPendingPredecessorTasks = numberOfPendingPredecessorTasks;
+    this.successorTaskSet = successorTaskSet;
+    this.deletionTaskType = deletionTaskType;
+    success = true;
+  }
+
+  /**
+   * Get the taskId for the DeletionTask.
+   *
+   * @return the taskId.
+   */
+  public int getTaskId() {
+    return taskId;
+  }
+
+  /**
+   * Set the taskId for the DeletionTask.
+   *
+   * @param taskId the taskId.
+   */
+  public void setTaskId(int taskId) {
+    this.taskId = taskId;
+  }
+
+  /**
+   * The the user assoicated with the DeletionTask.
+   *
+   * @return the user name.
+   */
+  public String getUser() {
+    return user;
+  }
+
+  /**
+   * Get the {@link DeletionService} for this DeletionTask.
+   *
+   * @return the {@link DeletionService}.
+   */
+  public DeletionService getDeletionService() {
+    return deletionService;
+  }
+
+  /**
+   * Get the {@link DeletionTaskType} for this DeletionTask.
+   *
+   * @return the {@link DeletionTaskType}.
+   */
+  public DeletionTaskType getDeletionTaskType() {
+    return deletionTaskType;
+  }
+
+  /**
+   * Set the DeletionTask run status.
+   *
+   * @param success the status of the running DeletionTask.
+   */
+  public synchronized void setSuccess(boolean success) {
+    this.success = success;
+  }
+
+  /**
+   * Return the DeletionTask run status.
+   *
+   * @return the status of the running DeletionTask.
+   */
+  public synchronized boolean getSucess() {
+    return this.success;
+  }
+
+  /**
+   * Return the list of successor tasks for the DeletionTask.
+   *
+   * @return the list of successor tasks.
+   */
+  public synchronized DeletionTask[] getSuccessorTasks() {
+    DeletionTask[] successors = new DeletionTask[successorTaskSet.size()];
+    return successorTaskSet.toArray(successors);
+  }
+
+  /**
+   * Convert the DeletionTask to the Protobuf representation for storing in the
+   * state store and recovery.
+   *
+   * @return the protobuf representation of the DeletionTask.
+   */
+  public abstract DeletionServiceDeleteTaskProto convertDeletionTaskToProto();
+
+  /**
+   * Add a dependent DeletionTask.
+   *
+   * If there is a task dependency between say tasks 1,2,3 such that
+   * task2 and task3 can be started only after task1 then we should define
+   * task2 and task3 as successor tasks for task1.
+   * Note:- Task dependency should be defined prior to calling delete.
+   *
+   * @param successorTask the DeletionTask the depends on this DeletionTask.
+   */
+  public synchronized void addDeletionTaskDependency(
+      DeletionTask successorTask) {
+    if (successorTaskSet.add(successorTask)) {
+      successorTask.incrementAndGetPendingPredecessorTasks();
+    }
+  }
+
+  /**
+   * Increments and returns pending predecessor task count.
+   *
+   * @return the number of pending predecessor DeletionTasks.
+   */
+  public int incrementAndGetPendingPredecessorTasks() {
+    return numberOfPendingPredecessorTasks.incrementAndGet();
+  }
+
+  /**
+   * Decrements and returns pending predecessor task count.
+   *
+   * @return the number of pending predecessor DeletionTasks.
+   */
+  public int decrementAndGetPendingPredecessorTasks() {
+    return numberOfPendingPredecessorTasks.decrementAndGet();
+  }
+
+  /**
+   * Removes the DeletionTask from the state store and validates that successor
+   * tasks have been scheduled and completed.
+   *
+   * This is called when:
+   * 1) Current deletion task ran and finished.
+   * 2) When directly called by predecessor task if one of the
+   * dependent tasks of it has failed marking its success = false.
+   */
+  synchronized void deletionTaskFinished() {
+    try {
+      NMStateStoreService stateStore = deletionService.getStateStore();
+      stateStore.removeDeletionTask(taskId);
+    } catch (IOException e) {
+      LOG.error("Unable to remove deletion task " + taskId
+          + " from state store", e);
+    }
+    Iterator<DeletionTask> successorTaskI = this.successorTaskSet.iterator();
+    while (successorTaskI.hasNext()) {
+      DeletionTask successorTask = successorTaskI.next();
+      if (!success) {
+        successorTask.setSuccess(success);
+      }
+      int count = successorTask.decrementAndGetPendingPredecessorTasks();
+      if (count == 0) {
+        if (successorTask.getSucess()) {
+          successorTask.deletionService.delete(successorTask);
+        } else {
+          successorTask.deletionTaskFinished();
+        }
+      }
+    }
+  }
+
+  /**
+   * Return the Protobuf builder with the base DeletionTask attributes.
+   *
+   * @return pre-populated Buidler with the base attributes.
+   */
+  DeletionServiceDeleteTaskProto.Builder getBaseDeletionTaskProtoBuilder() {
+    DeletionServiceDeleteTaskProto.Builder builder =
+        DeletionServiceDeleteTaskProto.newBuilder();
+    builder.setId(getTaskId());
+    if (getUser() != null) {
+      builder.setUser(getUser());
+    }
+    builder.setDeletionTime(System.currentTimeMillis() +
+        TimeUnit.MILLISECONDS.convert(getDeletionService().getDebugDelay(),
+            TimeUnit.SECONDS));
+    for (DeletionTask successor : getSuccessorTasks()) {
+      builder.addSuccessorIds(successor.getTaskId());
+    }
+    return builder;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
new file mode 100644
index 0000000..676c71b
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/DeletionTaskType.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+/**
+ * Available types of {@link DeletionTask}s.
+ */
+public enum DeletionTaskType {
+  FILE
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
new file mode 100644
index 0000000..fd07f16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/FileDeletionTask.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.DeletionServiceDeleteTaskProto;
+import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
+import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
+
+import java.io.IOException;
+import java.util.List;
+
+/**
+ * {@link DeletionTask} handling the removal of files (and directories).
+ */
+public class FileDeletionTask extends DeletionTask implements Runnable {
+
+  private final Path subDir;
+  private final List<Path> baseDirs;
+  private static final FileContext lfs = getLfs();
+
+  private static FileContext getLfs() {
+    try {
+      return FileContext.getLocalFSFileContext();
+    } catch (UnsupportedFileSystemException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Construct a FileDeletionTask with the default INVALID_TASK_ID.
+   *
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user deleting the file.
+   * @param subDir              the subdirectory to delete.
+   * @param baseDirs            the base directories containing the subdir.
+   */
+  public FileDeletionTask(DeletionService deletionService, String user,
+      Path subDir, List<Path> baseDirs) {
+    this(INVALID_TASK_ID, deletionService, user, subDir, baseDirs);
+  }
+
+  /**
+   * Construct a FileDeletionTask with the default INVALID_TASK_ID.
+   *
+   * @param taskId              the ID of the task, if previously set.
+   * @param deletionService     the {@link DeletionService}.
+   * @param user                the user deleting the file.
+   * @param subDir              the subdirectory to delete.
+   * @param baseDirs            the base directories containing the subdir.
+   */
+  public FileDeletionTask(int taskId, DeletionService deletionService,
+      String user, Path subDir, List<Path> baseDirs) {
+    super(taskId, deletionService, user, DeletionTaskType.FILE);
+    this.subDir = subDir;
+    this.baseDirs = baseDirs;
+  }
+
+  /**
+   * Get the subdirectory to delete.
+   *
+   * @return the subDir for the FileDeletionTask.
+   */
+  public Path getSubDir() {
+    return this.subDir;
+  }
+
+  /**
+   * Get the base directories containing the subdirectory.
+   *
+   * @return the base directories for the FileDeletionTask.
+   */
+  public List<Path> getBaseDirs() {
+    return this.baseDirs;
+  }
+
+  /**
+   * Delete the specified file/directory as the specified user.
+   */
+  @Override
+  public void run() {
+    if (LOG.isDebugEnabled()) {
+      String msg = String.format("Running DeletionTask : %s", toString());
+      LOG.debug(msg);
+    }
+    boolean error = false;
+    if (null == getUser()) {
+      if (baseDirs == null || baseDirs.size() == 0) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("NM deleting absolute path : " + subDir);
+        }
+        try {
+          lfs.delete(subDir, true);
+        } catch (IOException e) {
+          error = true;
+          LOG.warn("Failed to delete " + subDir);
+        }
+      } else {
+        for (Path baseDir : baseDirs) {
+          Path del = subDir == null? baseDir : new Path(baseDir, subDir);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("NM deleting path : " + del);
+          }
+          try {
+            lfs.delete(del, true);
+          } catch (IOException e) {
+            error = true;
+            LOG.warn("Failed to delete " + subDir);
+          }
+        }
+      }
+    } else {
+      try {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(
+              "Deleting path: [" + subDir + "] as user: [" + getUser() + "]");
+        }
+        if (baseDirs == null || baseDirs.size() == 0) {
+          getDeletionService().getContainerExecutor().deleteAsUser(
+              new DeletionAsUserContext.Builder()
+              .setUser(getUser())
+              .setSubDir(subDir)
+              .build());
+        } else {
+          getDeletionService().getContainerExecutor().deleteAsUser(
+              new DeletionAsUserContext.Builder()
+              .setUser(getUser())
+              .setSubDir(subDir)
+              .setBasedirs(baseDirs.toArray(new Path[0]))
+              .build());
+        }
+      } catch (IOException|InterruptedException e) {
+        error = true;
+        LOG.warn("Failed to delete as user " + getUser(), e);
+      }
+    }
+    if (error) {
+      setSuccess(!error);
+    }
+    deletionTaskFinished();
+  }
+
+  /**
+   * Convert the FileDeletionTask to a String representation.
+   *
+   * @return String representation of the FileDeletionTask.
+   */
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FileDeletionTask :");
+    sb.append("  id : ").append(getTaskId());
+    sb.append("  user : ").append(getUser());
+    sb.append("  subDir : ").append(
+        subDir == null ? "null" : subDir.toString());
+    sb.append("  baseDir : ");
+    if (baseDirs == null || baseDirs.size() == 0) {
+      sb.append("null");
+    } else {
+      for (Path baseDir : baseDirs) {
+        sb.append(baseDir.toString()).append(',');
+      }
+    }
+    return sb.toString().trim();
+  }
+
+  /**
+   * Convert the FileDeletionTask to the Protobuf representation for storing
+   * in the state store and recovery.
+   *
+   * @return the protobuf representation of the FileDeletionTask.
+   */
+  public DeletionServiceDeleteTaskProto convertDeletionTaskToProto() {
+    DeletionServiceDeleteTaskProto.Builder builder =
+        getBaseDeletionTaskProtoBuilder();
+    builder.setTaskType(DeletionTaskType.FILE.name());
+    if (getSubDir() != null) {
+      builder.setSubdir(getSubDir().toString());
+    }
+    if (getBaseDirs() != null) {
+      for (Path dir : getBaseDirs()) {
+        builder.addBasedirs(dir.toString());
+      }
+    }
+    return builder.build();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
new file mode 100644
index 0000000..f1a3985
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/deletion/task/package-info.java
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Package containing DeletionTasks for use with the DeletionService.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
index af34e92..47e6a55 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/LocalResourcesTrackerImpl.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceProto;
 import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.LocalizedResourceProto;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ResourceRecoveredEvent;
@@ -113,9 +114,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
     this.useLocalCacheDirectoryManager = useLocalCacheDirectoryManager;
     if (this.useLocalCacheDirectoryManager) {
       directoryManagers =
-          new ConcurrentHashMap<Path, LocalCacheDirectoryManager>();
+          new ConcurrentHashMap<>();
       inProgressLocalResourcesMap =
-          new ConcurrentHashMap<LocalResourceRequest, Path>();
+          new ConcurrentHashMap<>();
     }
     this.conf = conf;
     this.stateStore = stateStore;
@@ -393,7 +394,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
       return false;
     } else { // ResourceState is LOCALIZED or INIT
       if (ResourceState.LOCALIZED.equals(rsrc.getState())) {
-        delService.delete(getUser(), getPathToDelete(rsrc.getLocalPath()));
+        FileDeletionTask deletionTask = new FileDeletionTask(delService,
+            getUser(), getPathToDelete(rsrc.getLocalPath()), null);
+        delService.delete(deletionTask);
       }
       removeResource(rem.getRequest());
       LOG.info("Removed " + rsrc.getLocalPath() + " from localized cache");
@@ -488,7 +491,9 @@ class LocalResourcesTrackerImpl implements LocalResourcesTracker {
       LOG.warn("Directory " + uniquePath + " already exists, " +
           "try next one.");
       if (delService != null) {
-        delService.delete(getUser(), uniquePath);
+        FileDeletionTask deletionTask = new FileDeletionTask(delService,
+            getUser(), uniquePath, null);
+        delService.delete(deletionTask);
       }
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index 663bad7..5bc0da7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -95,7 +95,6 @@ import org.apache.hadoop.yarn.proto.YarnServerNodemanagerRecoveryProtos.Localize
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.DirectoryCollection.DirsChangeListener;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
@@ -113,6 +112,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerResourceFailedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalCacheCleaner.LocalCacheCleanerStats;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ApplicationLocalizationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent;
@@ -604,7 +604,9 @@ public class ResourceLocalizationService extends CompositeService
   private void submitDirForDeletion(String userName, Path dir) {
     try {
       lfs.getFileStatus(dir);
-      delService.delete(userName, dir, new Path[] {});
+      FileDeletionTask deletionTask = new FileDeletionTask(delService, userName,
+          dir, null);
+      delService.delete(deletionTask);
     } catch (UnsupportedFileSystemException ue) {
       LOG.warn("Local dir " + dir + " is an unsupported filesystem", ue);
     } catch (IOException ie) {
@@ -1234,10 +1236,13 @@ public class ResourceLocalizationService extends CompositeService
           event.getResource().unlock();
         }
         if (!paths.isEmpty()) {
-          delService.delete(context.getUser(),
-              null, paths.toArray(new Path[paths.size()]));
+          FileDeletionTask deletionTask = new FileDeletionTask(delService,
+              context.getUser(), null, paths);
+          delService.delete(deletionTask);
         }
-        delService.delete(null, nmPrivateCTokensPath, new Path[] {});
+        FileDeletionTask deletionTask = new FileDeletionTask(delService, null,
+            nmPrivateCTokensPath, null);
+        delService.delete(deletionTask);
       }
     }
 
@@ -1456,7 +1461,9 @@ public class ResourceLocalizationService extends CompositeService
         String appName = fileStatus.getPath().getName();
         if (appName.matches("^application_\\d+_\\d+_DEL_\\d+$")) {
           LOG.info("delete app log dir," + appName);
-          del.delete(null, fileStatus.getPath());
+          FileDeletionTask deletionTask = new FileDeletionTask(del, null,
+              fileStatus.getPath(), null);
+          del.delete(deletionTask);
         }
       }
     }
@@ -1516,7 +1523,9 @@ public class ResourceLocalizationService extends CompositeService
               ||
               status.getPath().getName()
                   .matches(".*" + ContainerLocalizer.FILECACHE + "_DEL_.*")) {
-            del.delete(null, status.getPath(), new Path[] {});
+            FileDeletionTask deletionTask = new FileDeletionTask(del, null,
+                status.getPath(), null);
+            del.delete(deletionTask);
           }
         } catch (IOException ex) {
           // Do nothing, just give the warning
@@ -1530,24 +1539,25 @@ public class ResourceLocalizationService extends CompositeService
   private void cleanUpFilesPerUserDir(FileContext lfs, DeletionService del,
       Path userDirPath) throws IOException {
     RemoteIterator<FileStatus> userDirStatus = lfs.listStatus(userDirPath);
-    FileDeletionTask dependentDeletionTask =
-        del.createFileDeletionTask(null, userDirPath, new Path[] {});
+    FileDeletionTask dependentDeletionTask = new FileDeletionTask(del, null,
+        userDirPath, new ArrayList<Path>());
     if (userDirStatus != null && userDirStatus.hasNext()) {
       List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
       while (userDirStatus.hasNext()) {
         FileStatus status = userDirStatus.next();
         String owner = status.getOwner();
-        FileDeletionTask deletionTask =
-            del.createFileDeletionTask(owner, null,
-              new Path[] { status.getPath() });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+        List<Path> pathList = new ArrayList<>();
+        pathList.add(status.getPath());
+        FileDeletionTask deletionTask = new FileDeletionTask(del, owner, null,
+            pathList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
     } else {
-      del.scheduleFileDeletionTask(dependentDeletionTask);
+      del.delete(dependentDeletionTask);
     }
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
index f465534..0d9e686 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AppLogAggregatorImpl.java
@@ -69,6 +69,8 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.DeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
@@ -258,19 +260,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       return;
     }
 
-    if (UserGroupInformation.isSecurityEnabled()) {
-      Credentials systemCredentials =
-          context.getSystemCredentialsForApps().get(appId);
-      if (systemCredentials != null) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Adding new framework-token for " + appId
-              + " for log-aggregation: " + systemCredentials.getAllTokens()
-              + "; userUgi=" + userUgi);
-        }
-        // this will replace old token
-        userUgi.addCredentials(systemCredentials);
-      }
-    }
+    addCredentials();
 
     // Create a set of Containers whose logs will be uploaded in this cycle.
     // It includes:
@@ -332,9 +322,12 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
             finishedContainers.contains(container));
         if (uploadedFilePathsInThisCycle.size() > 0) {
           uploadedLogsInThisCycle = true;
-          this.delService.delete(this.userUgi.getShortUserName(), null,
-              uploadedFilePathsInThisCycle
-                  .toArray(new Path[uploadedFilePathsInThisCycle.size()]));
+          List<Path> uploadedFilePathsInThisCycleList = new ArrayList<>();
+          uploadedFilePathsInThisCycleList.addAll(uploadedFilePathsInThisCycle);
+          DeletionTask deletionTask = new FileDeletionTask(delService,
+              this.userUgi.getShortUserName(), null,
+              uploadedFilePathsInThisCycleList);
+          delService.delete(deletionTask);
         }
 
         // This container is finished, and all its logs have been uploaded,
@@ -352,11 +345,7 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
       }
 
       long currentTime = System.currentTimeMillis();
-      final Path renamedPath = this.rollingMonitorInterval <= 0
-              ? remoteNodeLogFileForApp : new Path(
-                remoteNodeLogFileForApp.getParent(),
-                remoteNodeLogFileForApp.getName() + "_"
-                    + currentTime);
+      final Path renamedPath = getRenamedPath(currentTime);
 
       final boolean rename = uploadedLogsInThisCycle;
       try {
@@ -396,6 +385,28 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     }
   }
 
+  private Path getRenamedPath(long currentTime) {
+    return this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp
+        : new Path(remoteNodeLogFileForApp.getParent(),
+        remoteNodeLogFileForApp.getName() + "_" + currentTime);
+  }
+
+  private void addCredentials() {
+    if (UserGroupInformation.isSecurityEnabled()) {
+      Credentials systemCredentials =
+          context.getSystemCredentialsForApps().get(appId);
+      if (systemCredentials != null) {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Adding new framework-token for " + appId
+              + " for log-aggregation: " + systemCredentials.getAllTokens()
+              + "; userUgi=" + userUgi);
+        }
+        // this will replace old token
+        userUgi.addCredentials(systemCredentials);
+      }
+    }
+  }
+
   @VisibleForTesting
   protected LogWriter createLogWriter() {
     return new LogWriter();
@@ -561,8 +572,11 @@ public class AppLogAggregatorImpl implements AppLogAggregator {
     }
 
     if (localAppLogDirs.size() > 0) {
-      this.delService.delete(this.userUgi.getShortUserName(), null,
-        localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
+      List<Path> localAppLogDirsList = new ArrayList<>();
+      localAppLogDirsList.addAll(localAppLogDirs);
+      DeletionTask deletionTask = new FileDeletionTask(delService,
+          this.userUgi.getShortUserName(), null, localAppLogDirsList);
+      this.delService.delete(deletionTask);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
index 2901743..9961748 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/loghandler/NonAggregatingLogHandler.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
 import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppFinishedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerAppStartedEvent;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.loghandler.event.LogHandlerEvent;
@@ -247,8 +248,10 @@ public class NonAggregatingLogHandler extends AbstractService implements
         new ApplicationEvent(this.applicationId,
           ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED));
       if (localAppLogDirs.size() > 0) {
-        NonAggregatingLogHandler.this.delService.delete(user, null,
-          (Path[]) localAppLogDirs.toArray(new Path[localAppLogDirs.size()]));
+        FileDeletionTask deletionTask = new FileDeletionTask(
+            NonAggregatingLogHandler.this.delService, user, null,
+            localAppLogDirs);
+        NonAggregatingLogHandler.this.delService.delete(deletionTask);
       }
       try {
         NonAggregatingLogHandler.this.stateStore.removeLogDeleter(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
index 7831711..7212953 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/proto/yarn_server_nodemanager_recovery.proto
@@ -41,6 +41,7 @@ message DeletionServiceDeleteTaskProto {
   optional int64 deletionTime = 4;
   repeated string basedirs = 5;
   repeated int32 successorIds = 6;
+  optional string taskType = 7;
 }
 
 message LocalizedResourceProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c3685a4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
index 2e0bbe0..87f4a1c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestDeletionService.java
@@ -33,13 +33,14 @@ import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.server.nodemanager.DeletionService.FileDeletionTask;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask;
 import org.apache.hadoop.yarn.server.nodemanager.executor.DeletionAsUserContext;
 import org.apache.hadoop.yarn.server.nodemanager.recovery.NMMemoryStateStoreService;
 import org.junit.AfterClass;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+
 public class TestDeletionService {
 
   private static final FileContext lfs = getLfs();
@@ -123,8 +124,9 @@ public class TestDeletionService {
     del.start();
     try {
       for (Path p : dirs) {
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, null);
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
+        del.delete(deletionTask);
       }
 
       int msecToWait = 20 * 1000;
@@ -159,8 +161,10 @@ public class TestDeletionService {
       del.start();
       for (Path p : content) {
         assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, baseDirs.toArray(new Path[4]));
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
+            baseDirs);
+        del.delete(deletionTask);
       }
 
       int msecToWait = 20 * 1000;
@@ -196,8 +200,9 @@ public class TestDeletionService {
       del.init(conf);
       del.start();
       for (Path p : dirs) {
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
-            null);
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p, null);
+        del.delete(deletionTask);
       }
       int msecToWait = 20 * 1000;
       for (Path p : dirs) {
@@ -220,7 +225,9 @@ public class TestDeletionService {
     try {
       del.init(conf);
       del.start();
-      del.delete("dingo", new Path("/does/not/exist"));
+      FileDeletionTask deletionTask = new FileDeletionTask(del, "dingo",
+          new Path("/does/not/exist"), null);
+      del.delete(deletionTask);
     } finally {
       del.stop();
     }
@@ -247,18 +254,20 @@ public class TestDeletionService {
       // first we will try to delete sub directories which are present. This
       // should then trigger parent directory to be deleted.
       List<Path> subDirs = buildDirs(r, dirs.get(0), 2);
-      
+
       FileDeletionTask dependentDeletionTask =
-          del.createFileDeletionTask(null, dirs.get(0), new Path[] {});
+          new FileDeletionTask(del, null, dirs.get(0), new ArrayList<Path>());
       List<FileDeletionTask> deletionTasks = new ArrayList<FileDeletionTask>();
       for (Path subDir : subDirs) {
+        List<Path> subDirList = new ArrayList<>();
+        subDirList.add(subDir);
         FileDeletionTask deletionTask =
-            del.createFileDeletionTask(null, null, new Path[] { subDir });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+            new FileDeletionTask(del, null, dirs.get(0), subDirList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
 
       int msecToWait = 20 * 1000;
@@ -274,19 +283,21 @@ public class TestDeletionService {
       subDirs = buildDirs(r, dirs.get(1), 2);
       subDirs.add(new Path(dirs.get(1), "absentFile"));
       
-      dependentDeletionTask =
-          del.createFileDeletionTask(null, dirs.get(1), new Path[] {});
+      dependentDeletionTask = new FileDeletionTask(del, null, dirs.get(1),
+          new ArrayList<Path>());
       deletionTasks = new ArrayList<FileDeletionTask>();
       for (Path subDir : subDirs) {
-        FileDeletionTask deletionTask =
-            del.createFileDeletionTask(null, null, new Path[] { subDir });
-        deletionTask.addFileDeletionTaskDependency(dependentDeletionTask);
+        List<Path> subDirList = new ArrayList<>();
+        subDirList.add(subDir);
+        FileDeletionTask deletionTask = new FileDeletionTask(del, null, null,
+            subDirList);
+        deletionTask.addDeletionTaskDependency(dependentDeletionTask);
         deletionTasks.add(deletionTask);
       }
       // marking one of the tasks as a failure.
       deletionTasks.get(2).setSuccess(false);
       for (FileDeletionTask task : deletionTasks) {
-        del.scheduleFileDeletionTask(task);
+        del.delete(task);
       }
 
       msecToWait = 20 * 1000;
@@ -327,8 +338,10 @@ public class TestDeletionService {
       del.start();
       for (Path p : content) {
         assertTrue(lfs.util().exists(new Path(baseDirs.get(0), p)));
-        del.delete((Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo",
-            p, baseDirs.toArray(new Path[4]));
+        FileDeletionTask deletionTask = new FileDeletionTask(del,
+            (Long.parseLong(p.getName()) % 2) == 0 ? null : "dingo", p,
+            baseDirs);
+        del.delete(deletionTask);
       }
 
       // restart the deletion service
@@ -341,8 +354,10 @@ public class TestDeletionService {
       // verify paths are still eventually deleted
       int msecToWait = 10 * 1000;
       for (Path p : baseDirs) {
+        System.out.println("TEST Basedir: " + p.getName());
         for (Path q : content) {
           Path fp = new Path(p, q);
+          System.out.println("TEST Path: " + fp.toString());
           while (msecToWait > 0 && lfs.util().exists(fp)) {
             Thread.sleep(100);
             msecToWait -= 100;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[50/50] [abbrv] hadoop git commit: HADOOP-14500. Azure: TestFileSystemOperationExceptionHandling{, MultiThreaded} fails. Contributed by Rajesh Balamohan

Posted by xy...@apache.org.
HADOOP-14500. Azure: TestFileSystemOperationExceptionHandling{,MultiThreaded} fails. Contributed by Rajesh Balamohan


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2f0ddc8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2f0ddc8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2f0ddc8

Branch: refs/heads/HDFS-7240
Commit: d2f0ddc8f6f5026dd5e1aa27d60b736d07d67a79
Parents: a5c15bc
Author: Mingliang Liu <li...@apache.org>
Authored: Wed Jun 7 13:54:52 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:53 2017 -0700

----------------------------------------------------------------------
 .../hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java   | 1 +
 .../TestFileSystemOperationsExceptionHandlingMultiThreaded.java     | 1 +
 2 files changed, 2 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2f0ddc8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
index f5a1754..9ac25dd 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationExceptionHandling.java
@@ -94,6 +94,7 @@ public class TestFileSystemOperationExceptionHandling
     AzureBlobStorageTestAccount testAccount = createTestAccount();
     setupInputStreamToTest(testAccount);
     inputStream.seek(5);
+    inputStream.read();
   }
 
   /*

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2f0ddc8/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
index 4c56795..1cd18ee 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestFileSystemOperationsExceptionHandlingMultiThreaded.java
@@ -91,6 +91,7 @@ public class TestFileSystemOperationsExceptionHandlingMultiThreaded
     renameThread.join();
 
     inputStream.seek(5);
+    inputStream.read();
   }
 
   @Test(expected=FileNotFoundException.class)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/50] [abbrv] hadoop git commit: HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.

Posted by xy...@apache.org.
HADOOP-14440. Add metrics for connections dropped. Contributed by Eric Badger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/323f8bb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/323f8bb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/323f8bb6

Branch: refs/heads/HDFS-7240
Commit: 323f8bb6e47d25b98ceb3c1efa5ae184e1ff7858
Parents: f4fba3d
Author: Brahma Reddy Battula <br...@apache.org>
Authored: Tue Jun 6 00:21:03 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/ipc/Server.java | 20 +++++++++++++++++++-
 .../apache/hadoop/ipc/metrics/RpcMetrics.java   |  4 ++++
 .../hadoop-common/src/site/markdown/Metrics.md  |  1 +
 .../java/org/apache/hadoop/ipc/TestIPC.java     |  8 +++++---
 4 files changed, 29 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/323f8bb6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 3ea5a24..f3b9a82 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -64,6 +64,7 @@ import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicLong;
 
 import javax.security.sasl.Sasl;
 import javax.security.sasl.SaslException;
@@ -1220,6 +1221,7 @@ public abstract class Server {
           if (channel.isOpen()) {
             IOUtils.cleanup(null, channel);
           }
+          connectionManager.droppedConnections.getAndIncrement();
           continue;
         }
         key.attach(c);  // so closeCurrentConnection can get the object
@@ -3161,6 +3163,16 @@ public abstract class Server {
   }
 
   /**
+   * The number of RPC connections dropped due to
+   * too many connections.
+   * @return the number of dropped rpc connections
+   */
+  public long getNumDroppedConnections() {
+    return connectionManager.getDroppedConnections();
+
+  }
+
+  /**
    * The number of rpc calls in the queue.
    * @return The number of rpc calls in the queue.
    */
@@ -3277,7 +3289,8 @@ public abstract class Server {
   }
   
   private class ConnectionManager {
-    final private AtomicInteger count = new AtomicInteger();    
+    final private AtomicInteger count = new AtomicInteger();
+    final private AtomicLong droppedConnections = new AtomicLong();
     final private Set<Connection> connections;
     /* Map to maintain the statistics per User */
     final private Map<String, Integer> userToConnectionsMap;
@@ -3364,6 +3377,11 @@ public abstract class Server {
       return userToConnectionsMap;
     }
 
+
+    long getDroppedConnections() {
+      return droppedConnections.get();
+    }
+
     int size() {
       return count.get();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/323f8bb6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
index e5dde10..8ce1379 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/metrics/RpcMetrics.java
@@ -121,6 +121,10 @@ public class RpcMetrics {
     return server.getCallQueueLen();
   }
 
+  @Metric("Number of dropped connections") public long numDroppedConnections() {
+    return server.getNumDroppedConnections();
+  }
+
   // Public instrumentation methods that could be extracted to an
   // abstract class if we decide to do custom instrumentation classes a la
   // JobTrackerInstrumentation. The methods with //@Override comment are

http://git-wip-us.apache.org/repos/asf/hadoop/blob/323f8bb6/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
index a14c86d..4b89bc2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
@@ -79,6 +79,7 @@ Each metrics record contains tags such as Hostname and port (number to which ser
 | `RpcAuthorizationSuccesses` | Total number of authorization successes |
 | `NumOpenConnections` | Current number of open connections |
 | `CallQueueLength` | Current length of the call queue |
+| `numDroppedConnections` | Total number of dropped connections |
 | `rpcQueueTime`*num*`sNumOps` | Shows total number of RPC calls (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
 | `rpcQueueTime`*num*`s50thPercentileLatency` | Shows the 50th percentile of RPC queue time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |
 | `rpcQueueTime`*num*`s75thPercentileLatency` | Shows the 75th percentile of RPC queue time in milliseconds (*num* seconds granularity) if `rpc.metrics.quantile.enable` is set to true. *num* is specified by `rpc.metrics.percentiles.intervals`. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/323f8bb6/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 4198e40..611000d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -1339,7 +1339,7 @@ public class TestIPC {
 
   @Test
   public void testMaxConnections() throws Exception {
-    conf.setInt("ipc.server.max.connections", 5);
+    conf.setInt("ipc.server.max.connections", 6);
     Server server = null;
     Thread connectors[] = new Thread[10];
 
@@ -1374,8 +1374,10 @@ public class TestIPC {
       }
 
       Thread.sleep(1000);
-      // server should only accept up to 5 connections
-      assertEquals(5, server.getNumOpenConnections());
+      // server should only accept up to 6 connections
+      assertEquals(6, server.getNumOpenConnections());
+      // server should drop the other 4 connections
+      assertEquals(4, server.getNumDroppedConnections());
 
       for (int i = 0; i < 10; i++) {
         connectors[i].join();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/50] [abbrv] hadoop git commit: HDFS-11359. DFSAdmin report command supports displaying maintenance state datanodes. Contributed by Yiqun Lin.

Posted by xy...@apache.org.
HDFS-11359. DFSAdmin report command supports displaying maintenance state datanodes. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60a7f57b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60a7f57b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60a7f57b

Branch: refs/heads/HDFS-7240
Commit: 60a7f57b61922581e1ced871e4a649cb5cd0ca0f
Parents: d48f2f6
Author: Yiqun Lin <yq...@apache.org>
Authored: Fri Jun 2 12:48:30 2017 +0800
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/protocol/HdfsConstants.java     |  2 +-
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  8 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  2 +
 .../server/blockmanagement/DatanodeManager.java |  7 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 88 +++++++++---------
 .../src/site/markdown/HDFSCommands.md           |  4 +-
 .../hadoop/hdfs/TestMaintenanceState.java       | 94 ++++++++++++++++++++
 .../src/test/resources/testHDFSConf.xml         |  2 +-
 8 files changed, 161 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 0d31bc4..b636121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -141,7 +141,7 @@ public final class HdfsConstants {
 
   // type of the datanode report
   public enum DatanodeReportType {
-    ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE
+    ALL, LIVE, DEAD, DECOMMISSIONING, ENTERING_MAINTENANCE, IN_MAINTENANCE
   }
 
   /* Hidden constructor */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index cc52f7c..928133f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -1725,6 +1725,10 @@ public class PBHelperClient {
     case LIVE: return DatanodeReportTypeProto.LIVE;
     case DEAD: return DatanodeReportTypeProto.DEAD;
     case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
+    case ENTERING_MAINTENANCE:
+      return DatanodeReportTypeProto.ENTERING_MAINTENANCE;
+    case IN_MAINTENANCE:
+      return DatanodeReportTypeProto.IN_MAINTENANCE;
     default:
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }
@@ -2128,6 +2132,10 @@ public class PBHelperClient {
     case LIVE: return DatanodeReportType.LIVE;
     case DEAD: return DatanodeReportType.DEAD;
     case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
+    case ENTERING_MAINTENANCE:
+      return DatanodeReportType.ENTERING_MAINTENANCE;
+    case IN_MAINTENANCE:
+      return DatanodeReportType.IN_MAINTENANCE;
     default:
       throw new IllegalArgumentException("Unexpected data type report:" + t);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index a4b6f0b..fb42271 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -332,6 +332,8 @@ enum DatanodeReportTypeProto {  // type of the datanode report
   LIVE = 2;
   DEAD = 3;
   DECOMMISSIONING = 4;
+  ENTERING_MAINTENANCE = 5;
+  IN_MAINTENANCE = 6;
 }
 
 message GetDatanodeReportRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index c303594..a786c6a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1441,6 +1441,9 @@ public class DatanodeManager {
     final boolean listEnteringMaintenanceNodes =
         type == DatanodeReportType.ALL ||
         type == DatanodeReportType.ENTERING_MAINTENANCE;
+    final boolean listInMaintenanceNodes =
+        type == DatanodeReportType.ALL ||
+        type == DatanodeReportType.IN_MAINTENANCE;
 
     ArrayList<DatanodeDescriptor> nodes;
     final HostSet foundNodes = new HostSet();
@@ -1453,11 +1456,13 @@ public class DatanodeManager {
         final boolean isDead = isDatanodeDead(dn);
         final boolean isDecommissioning = dn.isDecommissionInProgress();
         final boolean isEnteringMaintenance = dn.isEnteringMaintenance();
+        final boolean isInMaintenance = dn.isInMaintenance();
 
         if (((listLiveNodes && !isDead) ||
             (listDeadNodes && isDead) ||
             (listDecommissioningNodes && isDecommissioning) ||
-            (listEnteringMaintenanceNodes && isEnteringMaintenance)) &&
+            (listEnteringMaintenanceNodes && isEnteringMaintenance) ||
+            (listInMaintenanceNodes && isInMaintenance)) &&
             hostConfigManager.isIncluded(dn)) {
           nodes.add(dn);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 5375cd9..e59ea37 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -421,7 +421,8 @@ public class DFSAdmin extends FsShell {
    * "hdfs dfsadmin"
    */
   private static final String commonUsageSummary =
-    "\t[-report [-live] [-dead] [-decommissioning]]\n" +
+    "\t[-report [-live] [-dead] [-decommissioning] " +
+    "[-enteringmaintenance] [-inmaintenance]]\n" +
     "\t[-safemode <enter | leave | get | wait>]\n" +
     "\t[-saveNamespace [-beforeShutdown]]\n" +
     "\t[-rollEdits]\n" +
@@ -544,48 +545,51 @@ public class DFSAdmin extends FsShell {
     final boolean listDead = StringUtils.popOption("-dead", args);
     final boolean listDecommissioning =
         StringUtils.popOption("-decommissioning", args);
+    final boolean listEnteringMaintenance =
+        StringUtils.popOption("-enteringmaintenance", args);
+    final boolean listInMaintenance =
+        StringUtils.popOption("-inmaintenance", args);
+
 
     // If no filter flags are found, then list all DN types
-    boolean listAll = (!listLive && !listDead && !listDecommissioning);
+    boolean listAll = (!listLive && !listDead && !listDecommissioning
+        && !listEnteringMaintenance && !listInMaintenance);
 
     if (listAll || listLive) {
-      DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
-      if (live.length > 0 || listLive) {
-        System.out.println("Live datanodes (" + live.length + "):\n");
-      }
-      if (live.length > 0) {
-        for (DatanodeInfo dn : live) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
-      }
+      printDataNodeReports(dfs, DatanodeReportType.LIVE, listLive, "Live");
     }
 
     if (listAll || listDead) {
-      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
-      if (dead.length > 0 || listDead) {
-        System.out.println("Dead datanodes (" + dead.length + "):\n");
-      }
-      if (dead.length > 0) {
-        for (DatanodeInfo dn : dead) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
-      }
+      printDataNodeReports(dfs, DatanodeReportType.DEAD, listDead, "Dead");
     }
 
     if (listAll || listDecommissioning) {
-      DatanodeInfo[] decom =
-          dfs.getDataNodeStats(DatanodeReportType.DECOMMISSIONING);
-      if (decom.length > 0 || listDecommissioning) {
-        System.out.println("Decommissioning datanodes (" + decom.length
-            + "):\n");
-      }
-      if (decom.length > 0) {
-        for (DatanodeInfo dn : decom) {
-          System.out.println(dn.getDatanodeReport());
-          System.out.println();
-        }
+      printDataNodeReports(dfs, DatanodeReportType.DECOMMISSIONING,
+          listDecommissioning, "Decommissioning");
+    }
+
+    if (listAll || listEnteringMaintenance) {
+      printDataNodeReports(dfs, DatanodeReportType.ENTERING_MAINTENANCE,
+          listEnteringMaintenance, "Entering maintenance");
+    }
+
+    if (listAll || listInMaintenance) {
+      printDataNodeReports(dfs, DatanodeReportType.IN_MAINTENANCE,
+          listInMaintenance, "In maintenance");
+    }
+  }
+
+  private static void printDataNodeReports(DistributedFileSystem dfs,
+      DatanodeReportType type, boolean listNodes, String nodeState)
+      throws IOException {
+    DatanodeInfo[] nodes = dfs.getDataNodeStats(type);
+    if (nodes.length > 0 || listNodes) {
+      System.out.println(nodeState + " datanodes (" + nodes.length + "):\n");
+    }
+    if (nodes.length > 0) {
+      for (DatanodeInfo dn : nodes) {
+        System.out.println(dn.getDatanodeReport());
+        System.out.println();
       }
     }
   }
@@ -986,12 +990,13 @@ public class DFSAdmin extends FsShell {
       "hdfs dfsadmin\n" +
       commonUsageSummary;
 
-    String report ="-report [-live] [-dead] [-decommissioning]:\n" +
-      "\tReports basic filesystem information and statistics. \n" +
-      "\tThe dfs usage can be different from \"du\" usage, because it\n" +
-      "\tmeasures raw space used by replication, checksums, snapshots\n" +
-      "\tand etc. on all the DNs.\n" +
-      "\tOptional flags may be used to filter the list of displayed DNs.\n";
+    String report ="-report [-live] [-dead] [-decommissioning] "
+        + "[-enteringmaintenance] [-inmaintenance]:\n" +
+        "\tReports basic filesystem information and statistics. \n" +
+        "\tThe dfs usage can be different from \"du\" usage, because it\n" +
+        "\tmeasures raw space used by replication, checksums, snapshots\n" +
+        "\tand etc. on all the DNs.\n" +
+        "\tOptional flags may be used to filter the list of displayed DNs.\n";
 
     String safemode = "-safemode <enter|leave|get|wait|forceExit>:  Safe mode " +
         "maintenance command.\n" +
@@ -1779,7 +1784,8 @@ public class DFSAdmin extends FsShell {
   private static void printUsage(String cmd) {
     if ("-report".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
-          + " [-report] [-live] [-dead] [-decommissioning]");
+          + " [-report] [-live] [-dead] [-decommissioning]"
+          + " [-enteringmaintenance] [-inmaintenance]");
     } else if ("-safemode".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [-safemode enter | leave | get | wait | forceExit]");
@@ -1917,7 +1923,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length > 4) {
+      if (argv.length > 6) {
         printUsage(cmd);
         return exitCode;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index b8d1362..6765a8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -338,7 +338,7 @@ Runs a HDFS datanode.
 
 Usage:
 
-        hdfs dfsadmin [-report [-live] [-dead] [-decommissioning]]
+        hdfs dfsadmin [-report [-live] [-dead] [-decommissioning] [-enteringmaintenance] [-inmaintenance]]
         hdfs dfsadmin [-safemode enter | leave | get | wait | forceExit]
         hdfs dfsadmin [-saveNamespace [-beforeShutdown]]
         hdfs dfsadmin [-rollEdits]
@@ -374,7 +374,7 @@ Usage:
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| `-report` `[-live]` `[-dead]` `[-decommissioning]` | Reports basic filesystem information and statistics, The dfs usage can be different from "du" usage, because it measures raw space used by replication, checksums, snapshots and etc. on all the DNs. Optional flags may be used to filter the list of displayed DataNodes. |
+| `-report` `[-live]` `[-dead]` `[-decommissioning]` `[-enteringmaintenance]` `[-inmaintenance]` | Reports basic filesystem information and statistics, The dfs usage can be different from "du" usage, because it measures raw space used by replication, checksums, snapshots and etc. on all the DNs. Optional flags may be used to filter the list of displayed DataNodes. |
 | `-safemode` enter\|leave\|get\|wait\|forceExit | Safe mode maintenance command. Safe mode is a Namenode state in which it <br/>1. does not accept changes to the name space (read-only) <br/>2. does not replicate or delete blocks. <br/>Safe mode is entered automatically at Namenode startup, and leaves safe mode automatically when the configured minimum percentage of blocks satisfies the minimum replication condition. If Namenode detects any anomaly then it will linger in safe mode till that issue is resolved. If that anomaly is the consequence of a deliberate action, then administrator can use -safemode forceExit to exit safe mode. The cases where forceExit may be required are<br/> 1. Namenode metadata is not consistent. If Namenode detects that metadata has been modified out of band and can cause data loss, then Namenode will enter forceExit state. At that point user can either restart Namenode with correct metadata files or forceExit (if data loss is acceptable).<br/>2. Rollback c
 auses metadata to be replaced and rarely it can trigger safe mode forceExit state in Namenode. In that case you may proceed by issuing -safemode forceExit.<br/> Safe mode can also be entered manually, but then it can only be turned off manually as well. |
 | `-saveNamespace` `[-beforeShutdown]` | Save current namespace into storage directories and reset edits log. Requires safe mode. If the "beforeShutdown" option is given, the NameNode does a checkpoint if and only if no checkpoint has been done during a time window (a configurable number of checkpoint periods). This is usually used before shutting down the NameNode to prevent potential fsimage/editlog corruption. |
 | `-rollEdits` | Rolls the edit log on the active NameNode. |

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
index b49fba0..e0dfb4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMaintenanceState.java
@@ -17,11 +17,18 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.hamcrest.CoreMatchers.allOf;
+import static org.hamcrest.CoreMatchers.containsString;
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.ByteArrayOutputStream;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
@@ -29,6 +36,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,8 +51,10 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.ToolRunner;
 import org.junit.Assert;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -1124,4 +1134,88 @@ public class TestMaintenanceState extends AdminStatesBaseTest {
       return null;
     }
   }
+
+  @Test(timeout = 120000)
+  public void testReportMaintenanceNodes() throws Exception {
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    ByteArrayOutputStream err = new ByteArrayOutputStream();
+    System.setOut(new PrintStream(out));
+    System.setErr(new PrintStream(err));
+
+    LOG.info("Starting testReportMaintenanceNodes");
+    int expirationInMs = 30 * 1000;
+    int numNodes = 2;
+    setMinMaintenanceR(numNodes);
+
+    startCluster(1, numNodes);
+    getCluster().waitActive();
+
+    FileSystem fileSys = getCluster().getFileSystem(0);
+    getConf().set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        fileSys.getUri().toString());
+    DFSAdmin dfsAdmin = new DFSAdmin(getConf());
+
+    FSNamesystem fsn = getCluster().getNameNode().getNamesystem();
+    assertEquals(numNodes, fsn.getNumLiveDataNodes());
+
+    int ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-enteringmaintenance", "-inmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("Entering maintenance datanodes (0):"),
+            containsString("In maintenance datanodes (0):"),
+            not(containsString(
+                getCluster().getDataNodes().get(0).getDisplayName())),
+            not(containsString(
+                getCluster().getDataNodes().get(1).getDisplayName())))));
+
+    final Path file = new Path("/testReportMaintenanceNodes.dat");
+    writeFile(fileSys, file, numNodes, 1);
+
+    DatanodeInfo[] nodes = getFirstBlockReplicasDatanodeInfos(fileSys, file);
+    // Request maintenance for DataNodes1. The DataNode1 will not transition
+    // to the next state AdminStates.IN_MAINTENANCE immediately since there
+    // are not enough candidate nodes to satisfy the min maintenance
+    // replication.
+    DatanodeInfo maintenanceDN = takeNodeOutofService(0,
+        nodes[0].getDatanodeUuid(), Time.now() + expirationInMs, null, null,
+        AdminStates.ENTERING_MAINTENANCE);
+    assertEquals(1, fsn.getNumEnteringMaintenanceDataNodes());
+
+    // reset stream
+    out.reset();
+    err.reset();
+
+    ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-enteringmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("Entering maintenance datanodes (1):"),
+            containsString(nodes[0].getXferAddr()),
+            not(containsString(nodes[1].getXferAddr())))));
+
+    // reset stream
+    out.reset();
+    err.reset();
+
+    // start a new datanode to make state transition to
+    // AdminStates.IN_MAINTENANCE
+    getCluster().startDataNodes(getConf(), 1, true, null, null);
+    getCluster().waitActive();
+
+    waitNodeState(maintenanceDN, AdminStates.IN_MAINTENANCE);
+    assertEquals(1, fsn.getNumInMaintenanceLiveDataNodes());
+
+    ret = ToolRunner.run(dfsAdmin,
+        new String[] {"-report", "-inmaintenance"});
+    assertEquals(0, ret);
+    assertThat(out.toString(),
+        is(allOf(containsString("In maintenance datanodes (1):"),
+            containsString(nodes[0].getXferAddr()),
+            not(containsString(nodes[1].getXferAddr())),
+            not(containsString(
+                getCluster().getDataNodes().get(2).getDisplayName())))));
+
+    cleanupFile(getCluster().getFileSystem(), file);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60a7f57b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
index 88518bf..9302507 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
@@ -15665,7 +15665,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-report \[-live\] \[-dead\] \[-decommissioning\]:(.)*</expected-output>
+          <expected-output>^-report \[-live\] \[-dead\] \[-decommissioning\] \[-enteringmaintenance\] \[-inmaintenance\]:(.)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[47/50] [abbrv] hadoop git commit: HDFS-11932. BPServiceActor thread name is not correctly set. Contributed by Chen Liang.

Posted by xy...@apache.org.
HDFS-11932. BPServiceActor thread name is not correctly set. Contributed by Chen Liang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df7d952f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df7d952f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df7d952f

Branch: refs/heads/HDFS-7240
Commit: df7d952f12da040275035dc0e060817bf2bce4a8
Parents: 974f33a
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Jun 6 13:51:02 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:52 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/server/datanode/BPOfferService.java   | 4 ++--
 .../org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java   | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df7d952f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 042169a..2644c0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -189,8 +189,8 @@ class BPOfferService {
       if (bpNSInfo != null) {
         return bpNSInfo.getBlockPoolID();
       } else {
-        LOG.warn("Block pool ID needed, but service not yet registered with NN",
-            new Exception("trace"));
+        LOG.warn("Block pool ID needed, but service not yet registered with " +
+                "NN, trace:", new Exception());
         return null;
       }
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/df7d952f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index fcf7d5e..0896844 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -541,7 +541,7 @@ class BPServiceActor implements Runnable {
       //Thread is started already
       return;
     }
-    bpThread = new Thread(this, formatThreadName("heartbeating", nnAddr));
+    bpThread = new Thread(this);
     bpThread.setDaemon(true); // needed for JUnit testing
     bpThread.start();
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/50] [abbrv] hadoop git commit: YARN-6246. Identifying starved apps does not need the scheduler writelock (Contributed by Karthik Kambatla via Daniel Templeton)

Posted by xy...@apache.org.
YARN-6246. Identifying starved apps does not need the scheduler writelock
(Contributed by Karthik Kambatla via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd7b6fb3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd7b6fb3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd7b6fb3

Branch: refs/heads/HDFS-7240
Commit: dd7b6fb3cd072776d50e8828d0c8d2cdda0c20cc
Parents: 177c0c1
Author: Daniel Templeton <te...@apache.org>
Authored: Wed May 31 15:48:04 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:49 2017 -0700

----------------------------------------------------------------------
 .../scheduler/fair/FSLeafQueue.java             |  9 +++----
 .../scheduler/fair/FSParentQueue.java           |  4 +--
 .../resourcemanager/scheduler/fair/FSQueue.java | 19 +++++++++-----
 .../scheduler/fair/FairScheduler.java           | 27 ++++++++++++++------
 4 files changed, 38 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7b6fb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 10f1e28..1de0e30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -198,13 +198,10 @@ public class FSLeafQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
     readLock.lock();
     try {
       policy.computeShares(runnableApps, getFairShare());
-      if (checkStarvation) {
-        updateStarvedApps();
-      }
     } finally {
       readLock.unlock();
     }
@@ -283,8 +280,10 @@ public class FSLeafQueue extends FSQueue {
    * If this queue is starving due to fairshare, there must be at least
    * one application that is starved. And, even if the queue is not
    * starved due to fairshare, there might still be starved applications.
+   *
+   * Caller does not need read/write lock on the leaf queue.
    */
-  private void updateStarvedApps() {
+  void updateStarvedApps() {
     // Fetch apps with pending demand
     TreeSet<FSAppAttempt> appsWithDemand = fetchAppsWithDemand(false);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7b6fb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index b062c58..5b4e4dc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -79,13 +79,13 @@ public class FSParentQueue extends FSQueue {
   }
 
   @Override
-  public void updateInternal(boolean checkStarvation) {
+  void updateInternal() {
     readLock.lock();
     try {
       policy.computeShares(childQueues, getFairShare());
       for (FSQueue childQueue : childQueues) {
         childQueue.getMetrics().setFairShare(childQueue.getFairShare());
-        childQueue.updateInternal(checkStarvation);
+        childQueue.updateInternal();
       }
     } finally {
       readLock.unlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7b6fb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index e131140..12b1b83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -326,16 +326,23 @@ public abstract class FSQueue implements Queue, Schedulable {
 
   /**
    * Recomputes the shares for all child queues and applications based on this
-   * queue's current share, and checks for starvation.
+   * queue's current share.
    *
-   * @param checkStarvation whether to check for fairshare or minshare
-   *                        starvation on update
+   * To be called holding the scheduler writelock.
    */
-  abstract void updateInternal(boolean checkStarvation);
+  abstract void updateInternal();
 
-  public void update(Resource fairShare, boolean checkStarvation) {
+  /**
+   * Set the queue's fairshare and update the demand/fairshare of child
+   * queues/applications.
+   *
+   * To be called holding the scheduler writelock.
+   *
+   * @param fairShare
+   */
+  public void update(Resource fairShare) {
     setFairShare(fairShare);
-    updateInternal(checkStarvation);
+    updateInternal();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd7b6fb3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index d1a237a..d779159 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -366,20 +366,31 @@ public class FairScheduler extends
    */
   @VisibleForTesting
   public void update() {
-    try {
-      writeLock.lock();
-
-      FSQueue rootQueue = queueMgr.getRootQueue();
+    FSQueue rootQueue = queueMgr.getRootQueue();
 
+    // Update demands and fairshares
+    writeLock.lock();
+    try {
       // Recursively update demands for all queues
       rootQueue.updateDemand();
-
-      Resource clusterResource = getClusterResource();
-      rootQueue.update(clusterResource, shouldAttemptPreemption());
+      rootQueue.update(getClusterResource());
 
       // Update metrics
       updateRootQueueMetrics();
+    } finally {
+      writeLock.unlock();
+    }
+
+    readLock.lock();
+    try {
+      // Update starvation stats and identify starved applications
+      if (shouldAttemptPreemption()) {
+        for (FSLeafQueue queue : queueMgr.getLeafQueues()) {
+          queue.updateStarvedApps();
+        }
+      }
 
+      // Log debug information
       if (LOG.isDebugEnabled()) {
         if (--updatesToSkipForDebug < 0) {
           updatesToSkipForDebug = UPDATE_DEBUG_FREQUENCY;
@@ -387,7 +398,7 @@ public class FairScheduler extends
         }
       }
     } finally {
-      writeLock.unlock();
+      readLock.unlock();
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/50] [abbrv] hadoop git commit: HADOOP-14428. s3a: mkdir appears to be broken. Contributed by Mingliang Liu

Posted by xy...@apache.org.
HADOOP-14428. s3a: mkdir appears to be broken. Contributed by Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce634881
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce634881
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce634881

Branch: refs/heads/HDFS-7240
Commit: ce634881ced7ff14118a7789cb70ff6428710e00
Parents: 323f8bb
Author: Mingliang Liu <li...@apache.org>
Authored: Wed May 24 14:44:27 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:51 2017 -0700

----------------------------------------------------------------------
 .../fs/contract/AbstractContractMkdirTest.java  | 31 ++++++++++++--------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  5 +++-
 2 files changed, 23 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce634881/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
index 71d2706..c5a546d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMkdirTest.java
@@ -113,18 +113,25 @@ public abstract class AbstractContractMkdirTest extends AbstractFSContractTestBa
     describe("verify mkdir slash handling");
     FileSystem fs = getFileSystem();
 
-    // No trailing slash
-    assertTrue(fs.mkdirs(path("testmkdir/a")));
-    assertPathExists("mkdir without trailing slash failed",
-        path("testmkdir/a"));
-
-    // With trailing slash
-    assertTrue(fs.mkdirs(path("testmkdir/b/")));
-    assertPathExists("mkdir with trailing slash failed", path("testmkdir/b/"));
-
-    // Mismatched slashes
-    assertPathExists("check path existence without trailing slash failed",
-        path("testmkdir/b"));
+    final Path[] paths = new Path[] {
+        path("testMkdirSlashHandling/a"), // w/o trailing slash
+        path("testMkdirSlashHandling/b/"), // w/ trailing slash
+        // unqualified w/o trailing slash
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/c"),
+        // unqualified w/ trailing slash
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/d/"),
+        // unqualified w/ multiple trailing slashes
+        new Path(getContract().getTestPath() + "/testMkdirSlashHandling/e///")
+    };
+    for (Path path : paths) {
+      assertTrue(fs.mkdirs(path));
+      assertPathExists(path + " does not exist after mkdirs", path);
+      assertIsDirectory(path);
+      if (path.toString().endsWith("/")) {
+        String s = path.toString().substring(0, path.toString().length() - 1);
+        assertIsDirectory(new Path(s));
+      }
+    }
   }
 
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce634881/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 25f2671..872dd5f 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1587,7 +1587,9 @@ public class S3AFileSystem extends FileSystem {
 
       String key = pathToKey(f);
       createFakeDirectory(key);
-      deleteUnnecessaryFakeDirectories(f.getParent());
+      // this is complicated because getParent(a/b/c/) returns a/b/c, but
+      // we want a/b. See HADOOP-14428 for more details.
+      deleteUnnecessaryFakeDirectories(new Path(f.toString()).getParent());
       return true;
     }
   }
@@ -1971,6 +1973,7 @@ public class S3AFileSystem extends FileSystem {
     while (!path.isRoot()) {
       String key = pathToKey(path);
       key = (key.endsWith("/")) ? key : (key + "/");
+      LOG.trace("To delete unnecessary fake directory {} for {}", key, path);
       keysToRemove.add(new DeleteObjectsRequest.KeyVersion(key));
       path = path.getParent();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/50] [abbrv] hadoop git commit: HDFS-11383. Intern strings in BlockLocation and ExtendedBlock. Contributed by Misha Dmitriev.

Posted by xy...@apache.org.
HDFS-11383. Intern strings in BlockLocation and ExtendedBlock. Contributed by Misha Dmitriev.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a09e1f2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a09e1f2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a09e1f2

Branch: refs/heads/HDFS-7240
Commit: 0a09e1f29deb21d73b6e6a74767f82072b213c11
Parents: 36914a7
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Jun 1 15:20:18 2017 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Thu Jun 8 10:44:50 2017 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/BlockLocation.java     | 21 +++++------
 .../org/apache/hadoop/util/StringInterner.java  | 37 ++++++++++----------
 .../hadoop/hdfs/protocol/ExtendedBlock.java     | 16 +++++----
 3 files changed, 40 insertions(+), 34 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a09e1f2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index b8cad3a..591febf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -22,6 +22,7 @@ import java.io.Serializable;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.util.StringInterner;
 
 /**
  * Represents the network location of a block, information about the hosts
@@ -114,27 +115,27 @@ public class BlockLocation implements Serializable {
     if (names == null) {
       this.names = EMPTY_STR_ARRAY;
     } else {
-      this.names = names;
+      this.names = StringInterner.internStringsInArray(names);
     }
     if (hosts == null) {
       this.hosts = EMPTY_STR_ARRAY;
     } else {
-      this.hosts = hosts;
+      this.hosts = StringInterner.internStringsInArray(hosts);
     }
     if (cachedHosts == null) {
       this.cachedHosts = EMPTY_STR_ARRAY;
     } else {
-      this.cachedHosts = cachedHosts;
+      this.cachedHosts = StringInterner.internStringsInArray(cachedHosts);
     }
     if (topologyPaths == null) {
       this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
-      this.topologyPaths = topologyPaths;
+      this.topologyPaths = StringInterner.internStringsInArray(topologyPaths);
     }
     if (storageIds == null) {
       this.storageIds = EMPTY_STR_ARRAY;
     } else {
-      this.storageIds = storageIds;
+      this.storageIds = StringInterner.internStringsInArray(storageIds);
     }
     if (storageTypes == null) {
       this.storageTypes = EMPTY_STORAGE_TYPE_ARRAY;
@@ -238,7 +239,7 @@ public class BlockLocation implements Serializable {
     if (hosts == null) {
       this.hosts = EMPTY_STR_ARRAY;
     } else {
-      this.hosts = hosts;
+      this.hosts = StringInterner.internStringsInArray(hosts);
     }
   }
 
@@ -249,7 +250,7 @@ public class BlockLocation implements Serializable {
     if (cachedHosts == null) {
       this.cachedHosts = EMPTY_STR_ARRAY;
     } else {
-      this.cachedHosts = cachedHosts;
+      this.cachedHosts = StringInterner.internStringsInArray(cachedHosts);
     }
   }
 
@@ -260,7 +261,7 @@ public class BlockLocation implements Serializable {
     if (names == null) {
       this.names = EMPTY_STR_ARRAY;
     } else {
-      this.names = names;
+      this.names = StringInterner.internStringsInArray(names);
     }
   }
 
@@ -271,7 +272,7 @@ public class BlockLocation implements Serializable {
     if (topologyPaths == null) {
       this.topologyPaths = EMPTY_STR_ARRAY;
     } else {
-      this.topologyPaths = topologyPaths;
+      this.topologyPaths = StringInterner.internStringsInArray(topologyPaths);
     }
   }
 
@@ -279,7 +280,7 @@ public class BlockLocation implements Serializable {
     if (storageIds == null) {
       this.storageIds = EMPTY_STR_ARRAY;
     } else {
-      this.storageIds = storageIds;
+      this.storageIds = StringInterner.internStringsInArray(storageIds);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a09e1f2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
index d74f810..028e49a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringInterner.java
@@ -25,8 +25,9 @@ import com.google.common.collect.Interner;
 import com.google.common.collect.Interners;
 
 /**
- * Provides equivalent behavior to String.intern() to optimize performance, 
- * whereby does not consume memory in the permanent generation.
+ * Provides string interning utility methods. For weak interning,
+ * we use the standard String.intern() call, that performs very well
+ * (no problems with PermGen overflowing, etc.) starting from JDK 7.
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
@@ -35,20 +36,9 @@ public class StringInterner {
   /**
    * Retains a strong reference to each string instance it has interned.
    */
-  private final static Interner<String> strongInterner;
-  
-  /**
-   * Retains a weak reference to each string instance it has interned. 
-   */
-  private final static Interner<String> weakInterner;
-  
-  
-  
-  static {
-    strongInterner = Interners.newStrongInterner();
-    weakInterner = Interners.newWeakInterner();
-  }
-  
+  private final static Interner<String> STRONG_INTERNER =
+      Interners.newStrongInterner();
+
   /**
    * Interns and returns a reference to the representative instance 
    * for any of a collection of string instances that are equal to each other.
@@ -62,7 +52,7 @@ public class StringInterner {
     if (sample == null) {
       return null;
     }
-    return strongInterner.intern(sample);
+    return STRONG_INTERNER.intern(sample);
   }
   
   /**
@@ -78,7 +68,18 @@ public class StringInterner {
     if (sample == null) {
       return null;
     }
-    return weakInterner.intern(sample);
+    return sample.intern();
+  }
+
+  /**
+   * Interns all the strings in the given array in place,
+   * returning the same array.
+   */
+  public static String[] internStringsInArray(String[] strings) {
+    for (int i = 0; i < strings.length; i++) {
+      strings[i] = weakIntern(strings[i]);
+    }
+    return strings;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a09e1f2/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
index af24909..7939662 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 
@@ -42,13 +43,13 @@ public class ExtendedBlock {
   }
 
   public ExtendedBlock(String poolId, Block b) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     this.block = b;
   }
 
   public ExtendedBlock(final String poolId, final long blkid, final long len,
       final long genstamp) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     block = new Block(blkid, len, genstamp);
   }
 
@@ -86,7 +87,7 @@ public class ExtendedBlock {
   }
 
   public void set(String poolId, Block blk) {
-    this.poolId = poolId;
+    this.poolId = poolId != null ? poolId.intern() : null;
     this.block = blk;
   }
 
@@ -107,13 +108,16 @@ public class ExtendedBlock {
       return false;
     }
     ExtendedBlock b = (ExtendedBlock)o;
-    return b.block.equals(block) && b.poolId.equals(poolId);
+    return b.block.equals(block) &&
+        (b.poolId != null ? b.poolId.equals(poolId) : poolId == null);
   }
 
   @Override // Object
   public int hashCode() {
-    int result = 31 + poolId.hashCode();
-    return (31 * result + block.hashCode());
+    return new HashCodeBuilder(31, 17).
+        append(poolId).
+        append(block).
+        toHashCode();
   }
 
   @Override // Object


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org