You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2017/02/27 15:14:57 UTC

[01/31] hadoop git commit: HDFS-11438. Fix typo in error message of StoragePolicyAdmin tool. Contributed by Alison Yu.

Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-13345 95e014323 -> 0abbb7029


HDFS-11438. Fix typo in error message of StoragePolicyAdmin tool. Contributed by Alison Yu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d150f061
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d150f061
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d150f061

Branch: refs/heads/HADOOP-13345
Commit: d150f061f4ebde923fda28ea898a9606b8789758
Parents: 0013090
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Feb 22 15:16:09 2017 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Wed Feb 22 15:16:09 2017 -0800

----------------------------------------------------------------------
 .../main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d150f061/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index 4e4f018..f0643b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -259,7 +259,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
       final String path = StringUtils.popOptionWithArgument("-path", args);
       if (path == null) {
         System.err.println("Please specify the path from which "
-            + "the storage policy will be unsetd.\nUsage: " + getLongUsage());
+            + "the storage policy will be unset.\nUsage: " + getLongUsage());
         return 1;
       }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[27/31] hadoop git commit: YARN-6172. FSLeafQueue demand update needs to be atomic. (Miklos Szegedi via kasha)

Posted by st...@apache.org.
YARN-6172. FSLeafQueue demand update needs to be atomic. (Miklos Szegedi via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbfe86de
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbfe86de
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbfe86de

Branch: refs/heads/HADOOP-13345
Commit: fbfe86deea5f2aa857cd13fee913b7becee57f93
Parents: 815d535
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Feb 26 20:36:33 2017 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Sun Feb 26 20:36:33 2017 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FSLeafQueue.java             | 21 +++++------------
 .../scheduler/fair/TestFSAppStarvation.java     | 24 ++++++++++++++++----
 2 files changed, 26 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbfe86de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index d0e0961..aad2916 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -331,20 +331,22 @@ public class FSLeafQueue extends FSQueue {
   public void updateDemand() {
     // Compute demand by iterating through apps in the queue
     // Limit demand to maxResources
-    demand = Resources.createResource(0);
+    Resource tmpDemand = Resources.createResource(0);
     readLock.lock();
     try {
       for (FSAppAttempt sched : runnableApps) {
-        updateDemandForApp(sched);
+        sched.updateDemand();
+        Resources.addTo(tmpDemand, sched.getDemand());
       }
       for (FSAppAttempt sched : nonRunnableApps) {
-        updateDemandForApp(sched);
+        sched.updateDemand();
+        Resources.addTo(tmpDemand, sched.getDemand());
       }
     } finally {
       readLock.unlock();
     }
     // Cap demand to maxShare to limit allocation to maxShare
-    demand = Resources.componentwiseMin(demand, maxShare);
+    demand = Resources.componentwiseMin(tmpDemand, maxShare);
     if (LOG.isDebugEnabled()) {
       LOG.debug("The updated demand for " + getName() + " is " + demand
           + "; the max is " + maxShare);
@@ -352,17 +354,6 @@ public class FSLeafQueue extends FSQueue {
           + getFairShare());
     }
   }
-  
-  private void updateDemandForApp(FSAppAttempt sched) {
-    sched.updateDemand();
-    Resource toAdd = sched.getDemand();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Counting resource from " + sched.getName() + " " + toAdd
-          + "; Total resource demand for " + getName() + " now "
-          + demand);
-    }
-    demand = Resources.add(demand, toAdd);
-  }
 
   @Override
   public Resource assignContainer(FSSchedulerNode node) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbfe86de/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
index 2eacc9e..0712b4c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppStarvation.java
@@ -96,6 +96,14 @@ public class TestFSAppStarvation extends FairSchedulerTestBase {
   public void testPreemptionEnabled() throws Exception {
     setupClusterAndSubmitJobs();
 
+    // Wait for apps to be processed by MockPreemptionThread
+    for (int i = 0; i < 6000; ++i) {
+      if (preemptionThread.uniqueAppsAdded() >= 3) {
+        break;
+      }
+      Thread.sleep(10);
+    }
+
     assertNotNull("FSContext does not have an FSStarvedApps instance",
         scheduler.getContext().getStarvedApps());
     assertEquals("Expecting 3 starved applications, one each for the "
@@ -113,8 +121,19 @@ public class TestFSAppStarvation extends FairSchedulerTestBase {
     clock.tickMsec(
         FairSchedulerWithMockPreemption.DELAY_FOR_NEXT_STARVATION_CHECK_MS);
     scheduler.update();
+
+    // Wait for apps to be processed by MockPreemptionThread
+    for (int i = 0; i < 6000; ++i) {
+      if(preemptionThread.totalAppsAdded() >
+          preemptionThread.uniqueAppsAdded()) {
+        break;
+      }
+      Thread.sleep(10);
+    }
+
     assertTrue("Each app is marked as starved exactly once",
-        preemptionThread.totalAppsAdded() > preemptionThread.uniqueAppsAdded());
+        preemptionThread.totalAppsAdded() >
+            preemptionThread.uniqueAppsAdded());
   }
 
   /*
@@ -154,9 +173,6 @@ public class TestFSAppStarvation extends FairSchedulerTestBase {
 
     // Scheduler update to populate starved apps
     scheduler.update();
-
-    // Wait for apps to be processed by MockPreemptionThread
-    Thread.yield();
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[09/31] hadoop git commit: HADOOP-14100. Upgrade Jsch jar to latest version to fix vulnerability in old versions. Contributed by Vinayakumar B

Posted by st...@apache.org.
HADOOP-14100. Upgrade Jsch jar to latest version to fix vulnerability in old versions. Contributed by Vinayakumar B


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/159d6c56
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/159d6c56
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/159d6c56

Branch: refs/heads/HADOOP-13345
Commit: 159d6c56e7f3aa3ebe45750cf88735287f047b42
Parents: 82607fc
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Feb 23 14:25:08 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Thu Feb 23 14:25:08 2017 -0800

----------------------------------------------------------------------
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/159d6c56/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 47e21d8..c8aa857 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -963,7 +963,7 @@
       <dependency>
         <groupId>com.jcraft</groupId>
         <artifactId>jsch</artifactId>
-        <version>0.1.51</version>
+        <version>0.1.54</version>
       </dependency>
       <dependency>
         <groupId>org.apache.htrace</groupId>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[31/31] hadoop git commit: Merge branch 'trunk' into HADOOP-13345 branch

Posted by st...@apache.org.
Merge branch 'trunk' into HADOOP-13345 branch


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0abbb702
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0abbb702
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0abbb702

Branch: refs/heads/HADOOP-13345
Commit: 0abbb7029a2f7dcd91fda067921335a535b4c9dd
Parents: 95e0143 5f5b031
Author: Steve Loughran <st...@apache.org>
Authored: Mon Feb 27 15:13:34 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Mon Feb 27 15:13:34 2017 +0000

----------------------------------------------------------------------
 .../apache/hadoop/fs/ChecksumFileSystem.java    |   2 +
 .../fs/CommonConfigurationKeysPublic.java       |  15 +
 .../java/org/apache/hadoop/fs/FileSystem.java   |  11 +
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |   1 +
 .../apache/hadoop/io/compress/GzipCodec.java    |  78 +--
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  28 +-
 .../io/erasurecode/ErasureCodeConstants.java    |   8 +-
 .../erasurecode/coder/HHXORErasureDecoder.java  |   2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |   2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |   2 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |   2 +-
 .../apache/hadoop/io/retry/RetryPolicies.java   |   3 +-
 .../security/ShellBasedUnixGroupsMapping.java   | 114 +++-
 .../main/java/org/apache/hadoop/util/Shell.java |  19 +-
 .../src/main/resources/core-default.xml         |  17 +-
 .../org/apache/hadoop/cli/CLITestHelper.java    |  15 +-
 .../erasurecode/TestCodecRawCoderMapping.java   |  10 +-
 .../coder/TestHHXORErasureCoder.java            |   2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |   2 +-
 .../hadoop/security/TestGroupsCaching.java      |  19 +-
 .../TestShellBasedUnixGroupsMapping.java        | 135 ++++-
 .../hdfs/client/HdfsClientConfigKeys.java       |   2 +
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |   6 +-
 .../hdfs/web/resources/AclPermissionParam.java  |  17 +-
 .../hadoop-hdfs/src/main/bin/hdfs               |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  16 +
 .../qjournal/client/QuorumJournalManager.java   |  38 +-
 .../hadoop/hdfs/qjournal/server/JNStorage.java  |   9 +-
 .../hadoop/hdfs/qjournal/server/Journal.java    |  19 +
 .../hdfs/qjournal/server/JournalNode.java       |  23 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java | 413 +++++++++++++++
 .../BlockPlacementPolicyDefault.java            |   2 +-
 .../blockmanagement/DatanodeStorageInfo.java    |   5 +
 .../hadoop/hdfs/server/common/Storage.java      |   9 +
 .../apache/hadoop/hdfs/server/common/Util.java  |  46 +-
 .../datanode/web/webhdfs/WebHdfsHandler.java    |  10 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |   5 +-
 .../server/namenode/NameNodeHttpServer.java     |   4 +
 .../hdfs/server/namenode/TransferFsImage.java   |   3 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 320 ++++++++++++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   4 +-
 .../hadoop/hdfs/tools/erasurecode/ECCli.java    |  62 ---
 .../hdfs/tools/erasurecode/ECCommand.java       | 248 ---------
 .../OfflineImageReconstructor.java              |  85 ++-
 .../offlineImageViewer/PBImageXmlWriter.java    |  30 +-
 .../src/main/resources/hdfs-default.xml         |  49 ++
 .../src/site/markdown/HDFSErasureCoding.md      |  22 +-
 .../src/site/markdown/HdfsEditsViewer.md        |  73 ++-
 .../hadoop/cli/CLITestCmdErasureCoding.java     |   4 +-
 .../cli/util/ErasureCodingCliCmdExecutor.java   |   6 +-
 .../TestDFSRSDefault10x4StripedInputStream.java |   2 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |   2 +-
 ...fault10x4StripedOutputStreamWithFailure.java |   4 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |   2 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |   2 +-
 .../TestUnsetAndChangeDirectoryEcPolicy.java    |   2 +-
 .../hdfs/qjournal/MiniJournalCluster.java       |   8 +
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |   1 +
 .../hdfs/qjournal/TestJournalNodeSync.java      | 264 ++++++++++
 .../blockmanagement/TestReplicationPolicy.java  |  35 +-
 .../TestReplicationPolicyWithNodeGroup.java     |  23 +-
 .../namenode/TestNameNodeMetricsLogger.java     |  11 +-
 .../TestOfflineImageViewer.java                 |  15 +-
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java |  29 +-
 .../hadoop/hdfs/web/resources/TestParam.java    |  34 ++
 .../test/resources/testErasureCodingConf.xml    | 137 +++--
 .../src/site/markdown/MapReduceTutorial.md      |   2 +-
 .../org/apache/hadoop/mapred/YARNRunner.java    | 141 +++--
 hadoop-project/pom.xml                          |   2 +-
 .../hadoop/fs/s3a/S3ABlockOutputStream.java     |  68 ++-
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 514 +++++++++++--------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  80 ++-
 .../hadoop/fs/s3a/S3AInstrumentation.java       |  40 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  26 +
 .../hadoop/fs/s3native/S3xLoginHelper.java      |  15 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  76 ++-
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |   5 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  12 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  13 +
 .../apache/hadoop/fs/s3a/TestDataBlocks.java    |  45 +-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |  28 +-
 .../hadoop/fs/s3native/TestS3xLoginHelper.java  |  28 +
 .../src/site/markdown/index.md                  | 237 +++++----
 .../java/org/apache/hadoop/fs/azure/Wasbs.java  |  47 ++
 .../fs/azure/TestWasbUriAndConfiguration.java   |  57 ++
 .../fs/swift/snative/SwiftNativeFileSystem.java |   2 +
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  20 +-
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  89 ++--
 .../yarn/sls/appmaster/MRAMSimulator.java       | 218 ++++----
 .../sls/resourcemanager/MockAMLauncher.java     | 115 +++++
 .../sls/scheduler/SLSCapacityScheduler.java     |  24 +
 .../hadoop/yarn/conf/YarnConfiguration.java     |   3 +
 .../resource/DefaultResourceCalculator.java     |   3 +-
 .../resource/DominantResourceCalculator.java    |  13 +-
 .../yarn/util/resource/ResourceCalculator.java  |  32 +-
 .../src/main/resources/yarn-default.xml         |   5 +
 .../server/resourcemanager/ClientRMService.java |   6 +-
 .../server/resourcemanager/RMAppManager.java    |   5 +-
 .../reservation/AbstractReservationSystem.java  |   8 +-
 .../planning/AlignedPlannerWithGreedy.java      |  16 +-
 .../planning/GreedyReservationAgent.java        |  19 +-
 .../reservation/planning/PlanningAlgorithm.java |   4 +
 .../reservation/planning/ReservationAgent.java  |   8 +
 .../planning/TryManyReservationAgents.java      |   5 +-
 .../scheduler/fair/FSAppAttempt.java            |  61 ++-
 .../scheduler/fair/FSContext.java               |  21 +-
 .../scheduler/fair/FSLeafQueue.java             |  24 +-
 .../scheduler/fair/FSPreemptionThread.java      |  12 +-
 .../resourcemanager/scheduler/fair/FSQueue.java |   2 +-
 .../scheduler/fair/FairScheduler.java           |  11 +-
 .../scheduler/fair/SchedulingPolicy.java        |  19 +-
 .../DominantResourceFairnessPolicy.java         |  24 +-
 .../fair/policies/FairSharePolicy.java          |   3 +-
 .../planning/TestAlignedPlanner.java            |   3 +
 .../planning/TestGreedyReservationAgent.java    |   4 +-
 .../scheduler/fair/TestFSAppStarvation.java     |  24 +-
 .../scheduler/fair/TestFairScheduler.java       | 136 +++--
 .../fair/TestFairSchedulerPreemption.java       |  44 +-
 .../TestDominantResourceFairnessPolicy.java     |  25 +-
 .../yarn/server/timeline/EntityCacheItem.java   |   9 +-
 122 files changed, 3602 insertions(+), 1445 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-project/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0abbb702/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --cc hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index 72fb3be,9528967..ec0952d
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@@ -20,10 -20,9 +20,11 @@@ package org.apache.hadoop.fs.s3a
  
  import org.apache.commons.lang.StringUtils;
  import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FSDataOutputStream;
  import org.apache.hadoop.fs.FileContext;
 +import org.apache.hadoop.fs.FileStatus;
  import org.apache.hadoop.fs.Path;
 +import org.apache.hadoop.fs.permission.FsPermission;
  import org.junit.Assert;
  import org.junit.Assume;
  import org.junit.internal.AssumptionViolatedException;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[23/31] hadoop git commit: HDFS-11462. Fix occasional BindException in TestNameNodeMetricsLogger.

Posted by st...@apache.org.
HDFS-11462. Fix occasional BindException in TestNameNodeMetricsLogger.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/120bef7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/120bef7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/120bef7d

Branch: refs/heads/HADOOP-13345
Commit: 120bef7de81de96854156da192c855035b7d3e7a
Parents: 4a58870
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 24 19:53:39 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 24 19:53:39 2017 -0800

----------------------------------------------------------------------
 .../hdfs/server/namenode/TestNameNodeMetricsLogger.java  | 11 ++---------
 1 file changed, 2 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/120bef7d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
index 6968bc4..9a0e67c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java
@@ -25,7 +25,6 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.ServerSocketUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Appender;
 import org.apache.log4j.AppenderSkeleton;
@@ -38,7 +37,6 @@ import org.junit.rules.Timeout;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
-import java.util.Random;
 import java.util.concurrent.TimeoutException;
 import java.util.regex.Pattern;
 
@@ -51,7 +49,6 @@ import static org.mockito.Mockito.mock;
  */
 public class TestNameNodeMetricsLogger {
   static final Log LOG = LogFactory.getLog(TestNameNodeMetricsLogger.class);
-  static final Random random = new Random(System.currentTimeMillis());
 
   @Rule
   public Timeout timeout = new Timeout(300000);
@@ -112,17 +109,13 @@ public class TestNameNodeMetricsLogger {
   private NameNode makeNameNode(boolean enableMetricsLogging)
       throws IOException {
     Configuration conf = new HdfsConfiguration();
-    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:" + getRandomPort());
-    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:" + getRandomPort());
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:0");
+    conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.setInt(DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY,
         enableMetricsLogging ? 1 : 0);  // If enabled, log early and log often
     return new TestNameNode(conf);
   }
 
-  private int getRandomPort() throws IOException {
-    return ServerSocketUtil.getPort(0, 10);
-  }
-
   private void addAppender(Log log, Appender appender) {
     org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger();
     @SuppressWarnings("unchecked")


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[18/31] hadoop git commit: YARN-6228: EntityGroupFSTimelineStore should allow configurable cache stores. Contributed by Li Lu

Posted by st...@apache.org.
YARN-6228: EntityGroupFSTimelineStore should allow configurable cache
stores. Contributed by Li Lu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53d372a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53d372a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53d372a2

Branch: refs/heads/HADOOP-13345
Commit: 53d372a2550c970f3dd3c49738af3c1789ae589b
Parents: c1a52b0
Author: Xuan <xg...@apache.org>
Authored: Fri Feb 24 10:58:48 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Fri Feb 24 10:59:35 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 3 +++
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml  | 5 +++++
 .../apache/hadoop/yarn/server/timeline/EntityCacheItem.java | 9 +++++++--
 3 files changed, 15 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 094a424..cdccec6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1890,6 +1890,9 @@ public class YarnConfiguration extends Configuration {
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX =
       TIMELINE_SERVICE_PREFIX + "entity-group-fs-store.";
 
+  public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE =
+      TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "cache-store-class";
+
   public static final String TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_ACTIVE_DIR =
       TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_PREFIX + "active-dir";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 53beb5e..368946e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2096,6 +2096,11 @@
   </property>
 
   <!-- Timeline Service v1.5 Configuration -->
+  <property>
+    <name>yarn.timeline-service.entity-group-fs-store.cache-store-class</name>
+    <value>org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore</value>
+    <description>Caching storage timeline server v1.5 is using. </description>
+  </property>
 
   <property>
     <name>yarn.timeline-service.entity-group-fs-store.active-dir</name>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53d372a2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
index 7ed7c4a..8df60ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/main/java/org/apache/hadoop/yarn/server/timeline/EntityCacheItem.java
@@ -17,8 +17,10 @@
 package org.apache.hadoop.yarn.server.timeline;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -95,8 +97,11 @@ public class EntityCacheItem {
       }
       if (!appLogs.getDetailLogs().isEmpty()) {
         if (store == null) {
-          store = new LevelDBCacheTimelineStore(groupId.toString(),
-              "LeveldbCache." + groupId);
+          store = ReflectionUtils.newInstance(config.getClass(
+              YarnConfiguration
+                  .TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_CACHE_STORE,
+              MemoryTimelineStore.class, TimelineStore.class),
+              config);
           store.init(config);
           store.start();
         } else {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[30/31] hadoop git commit: YARN-5703. ReservationAgents are not correctly configured. Contributed by Manikandan R.

Posted by st...@apache.org.
YARN-5703. ReservationAgents are not correctly configured. Contributed by Manikandan R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f5b031d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f5b031d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f5b031d

Branch: refs/heads/HADOOP-13345
Commit: 5f5b031d1f20cb7f621db41979e963eaa42cf52f
Parents: 4d33683
Author: Naganarasimha <na...@apache.org>
Authored: Mon Feb 27 20:38:29 2017 +0530
Committer: Naganarasimha <na...@apache.org>
Committed: Mon Feb 27 20:38:29 2017 +0530

----------------------------------------------------------------------
 .../reservation/AbstractReservationSystem.java   |  8 ++++++--
 .../planning/AlignedPlannerWithGreedy.java       | 16 +++++++++-------
 .../planning/GreedyReservationAgent.java         | 19 ++++++-------------
 .../reservation/planning/PlanningAlgorithm.java  |  4 ++++
 .../reservation/planning/ReservationAgent.java   |  8 ++++++++
 .../planning/TryManyReservationAgents.java       |  5 ++++-
 .../reservation/planning/TestAlignedPlanner.java |  3 +++
 .../planning/TestGreedyReservationAgent.java     |  4 ++--
 8 files changed, 42 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java
index 8769ca1..5ef4912 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractReservationSystem.java
@@ -451,12 +451,16 @@ public abstract class AbstractReservationSystem extends AbstractService
     try {
       Class<?> agentClazz = conf.getClassByName(agentClassName);
       if (ReservationAgent.class.isAssignableFrom(agentClazz)) {
-        return (ReservationAgent) ReflectionUtils.newInstance(agentClazz, conf);
+        ReservationAgent resevertionAgent =
+            (ReservationAgent) agentClazz.newInstance();
+        resevertionAgent.init(conf);
+        return resevertionAgent;
       } else {
         throw new YarnRuntimeException("Class: " + agentClassName
             + " not instance of " + ReservationAgent.class.getCanonicalName());
       }
-    } catch (ClassNotFoundException e) {
+    } catch (ClassNotFoundException | InstantiationException
+        | IllegalAccessException e) {
       throw new YarnRuntimeException("Could not instantiate Agent: "
           + agentClassName + " for queue: " + queueName, e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
index b23cf1e..00c2333 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/AlignedPlannerWithGreedy.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
@@ -35,22 +36,25 @@ import org.slf4j.LoggerFactory;
 public class AlignedPlannerWithGreedy implements ReservationAgent {
 
   // Default smoothness factor
-  private static final int DEFAULT_SMOOTHNESS_FACTOR = 10;
+  public static final int DEFAULT_SMOOTHNESS_FACTOR = 10;
+  public static final String SMOOTHNESS_FACTOR =
+      "yarn.resourcemanager.reservation-system.smoothness-factor";
 
   // Log
   private static final Logger LOG = LoggerFactory
       .getLogger(AlignedPlannerWithGreedy.class);
 
   // Smoothness factor
-  private final ReservationAgent planner;
+  private ReservationAgent planner;
 
   // Constructor
   public AlignedPlannerWithGreedy() {
-    this(DEFAULT_SMOOTHNESS_FACTOR);
   }
 
-  // Constructor
-  public AlignedPlannerWithGreedy(int smoothnessFactor) {
+  @Override
+  public void init(Configuration conf) {
+    int smoothnessFactor =
+        conf.getInt(SMOOTHNESS_FACTOR, DEFAULT_SMOOTHNESS_FACTOR);
 
     // List of algorithms
     List<ReservationAgent> listAlg = new LinkedList<ReservationAgent>();
@@ -71,7 +75,6 @@ public class AlignedPlannerWithGreedy implements ReservationAgent {
     // 1. Attempt to execute algAligned
     // 2. If failed, fall back to algGreedy
     planner = new TryManyReservationAgents(listAlg);
-
   }
 
   @Override
@@ -119,5 +122,4 @@ public class AlignedPlannerWithGreedy implements ReservationAgent {
     return planner.deleteReservation(reservationId, user, plan);
 
   }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
index 915a834..1559b97 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/GreedyReservationAgent.java
@@ -46,25 +46,19 @@ public class GreedyReservationAgent implements ReservationAgent {
       .getLogger(GreedyReservationAgent.class);
 
   // Greedy planner
-  private final ReservationAgent planner;
-
+  private ReservationAgent planner;
   public final static String GREEDY_FAVOR_EARLY_ALLOCATION =
       "yarn.resourcemanager.reservation-system.favor-early-allocation";
-
   public final static boolean DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION = true;
-
-  private final boolean allocateLeft;
+  private boolean allocateLeft;
 
   public GreedyReservationAgent() {
-    this(new Configuration());
   }
 
-  public GreedyReservationAgent(Configuration yarnConfiguration) {
-
-    allocateLeft =
-        yarnConfiguration.getBoolean(GREEDY_FAVOR_EARLY_ALLOCATION,
-            DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION);
-
+  @Override
+  public void init(Configuration conf) {
+    allocateLeft = conf.getBoolean(GREEDY_FAVOR_EARLY_ALLOCATION,
+        DEFAULT_GREEDY_FAVOR_EARLY_ALLOCATION);
     if (allocateLeft) {
       LOG.info("Initializing the GreedyReservationAgent to favor \"early\""
           + " (left) allocations (controlled by parameter: "
@@ -78,7 +72,6 @@ public class GreedyReservationAgent implements ReservationAgent {
     planner =
         new IterativePlanner(new StageEarliestStartByJobArrival(),
             new StageAllocatorGreedyRLE(allocateLeft), allocateLeft);
-
   }
 
   public boolean isAllocateLeft(){

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
index e1b508d..199bfa5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/PlanningAlgorithm.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 import java.util.Map;
 import java.util.Map.Entry;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.api.records.Resource;
@@ -206,4 +207,7 @@ public abstract class PlanningAlgorithm implements ReservationAgent {
 
   }
 
+  @Override
+  public void init(Configuration conf) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
index bdea2f4..52e7055 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/ReservationAgent.java
@@ -17,6 +17,7 @@
  *******************************************************************************/
 package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
@@ -70,4 +71,11 @@ public interface ReservationAgent {
   public boolean deleteReservation(ReservationId reservationId, String user,
       Plan plan) throws PlanningException;
 
+  /**
+   * Init configuration.
+   *
+   * @param conf Configuration
+   */
+  void init(Configuration conf);
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TryManyReservationAgents.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TryManyReservationAgents.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TryManyReservationAgents.java
index 1d37ce5..ab2e1e0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TryManyReservationAgents.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TryManyReservationAgents.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.reservation.planning;
 import java.util.LinkedList;
 import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.api.records.ReservationDefinition;
 import org.apache.hadoop.yarn.api.records.ReservationId;
 import org.apache.hadoop.yarn.server.resourcemanager.reservation.Plan;
@@ -110,5 +111,7 @@ public class TryManyReservationAgents implements ReservationAgent {
     return plan.deleteReservation(reservationId);
 
   }
-
+  @Override
+  public void init(Configuration conf) {
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
index 4b01eb9..2645366 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestAlignedPlanner.java
@@ -726,8 +726,11 @@ public class TestAlignedPlanner {
     QueueMetrics queueMetrics = mock(QueueMetrics.class);
     RMContext context = ReservationSystemTestUtil.createMockRMContext();
 
+    conf.setInt(AlignedPlannerWithGreedy.SMOOTHNESS_FACTOR,
+        AlignedPlannerWithGreedy.DEFAULT_SMOOTHNESS_FACTOR);
     // Set planning agent
     agent = new AlignedPlannerWithGreedy();
+    agent.init(conf);
 
     // Create Plan
     plan =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f5b031d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
index ec11ffe..6d1cfa8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestGreedyReservationAgent.java
@@ -110,8 +110,8 @@ public class TestGreedyReservationAgent {
     // setting conf to
     conf.setBoolean(GreedyReservationAgent.GREEDY_FAVOR_EARLY_ALLOCATION,
         allocateLeft);
-
-    agent = new GreedyReservationAgent(conf);
+    agent = new GreedyReservationAgent();
+    agent.init(conf);
 
     QueueMetrics queueMetrics = mock(QueueMetrics.class);
     RMContext context = ReservationSystemTestUtil.createMockRMContext();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[24/31] hadoop git commit: HADOOP-14028. S3A BlockOutputStreams doesn't delete temporary files in multipart uploads or handle part upload failures. Contributed by Steve Loughran.

Posted by st...@apache.org.
HADOOP-14028. S3A BlockOutputStreams doesn't delete temporary files in multipart uploads or handle part upload failures.
Contributed by Steve Loughran.

(cherry picked from commit 29fe5af017b945d8750c074ca39031b5b777eddd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dab00da1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dab00da1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dab00da1

Branch: refs/heads/HADOOP-13345
Commit: dab00da19f25619ccc71c7f803a235b21766bf1e
Parents: 120bef7
Author: Steve Loughran <st...@apache.org>
Authored: Sat Feb 25 15:35:19 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Sat Feb 25 15:35:19 2017 +0000

----------------------------------------------------------------------
 .../hadoop/fs/s3a/S3ABlockOutputStream.java     |  68 ++-
 .../org/apache/hadoop/fs/s3a/S3ADataBlocks.java | 514 +++++++++++--------
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  78 ++-
 .../hadoop/fs/s3a/S3AInstrumentation.java       |  40 +-
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |  26 +
 .../hadoop/fs/s3a/ITestS3ABlockOutputArray.java |  76 ++-
 .../fs/s3a/ITestS3ABlockOutputByteBuffer.java   |   5 +-
 .../hadoop/fs/s3a/ITestS3ABlockOutputDisk.java  |  12 +
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  13 +
 .../apache/hadoop/fs/s3a/TestDataBlocks.java    |  45 +-
 .../fs/s3a/scale/AbstractSTestS3AHugeFiles.java |  28 +-
 11 files changed, 639 insertions(+), 266 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
index 89b9b29..1b0929b 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ABlockOutputStream.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.fs.s3a;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.List;
@@ -48,7 +47,6 @@ import org.slf4j.LoggerFactory;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.util.Progressable;
@@ -178,10 +176,10 @@ class S3ABlockOutputStream extends OutputStream {
     if (activeBlock == null) {
       blockCount++;
       if (blockCount>= Constants.MAX_MULTIPART_COUNT) {
-        LOG.error("Number of partitions in stream exceeds limit for S3: " +
+        LOG.error("Number of partitions in stream exceeds limit for S3: "
              + Constants.MAX_MULTIPART_COUNT +  " write may fail.");
       }
-      activeBlock = blockFactory.create(this.blockSize);
+      activeBlock = blockFactory.create(blockCount, this.blockSize, statistics);
     }
     return activeBlock;
   }
@@ -206,7 +204,9 @@ class S3ABlockOutputStream extends OutputStream {
    * Clear the active block.
    */
   private void clearActiveBlock() {
-    LOG.debug("Clearing active block");
+    if (activeBlock != null) {
+      LOG.debug("Clearing active block");
+    }
     synchronized (this) {
       activeBlock = null;
     }
@@ -356,11 +356,9 @@ class S3ABlockOutputStream extends OutputStream {
       writeOperationHelper.writeFailed(ioe);
       throw ioe;
     } finally {
-      LOG.debug("Closing block and factory");
-      IOUtils.closeStream(block);
-      IOUtils.closeStream(blockFactory);
+      closeAll(LOG, block, blockFactory);
       LOG.debug("Statistics: {}", statistics);
-      IOUtils.closeStream(statistics);
+      closeAll(LOG, statistics);
       clearActiveBlock();
     }
     // All end of write operations, including deleting fake parent directories
@@ -378,10 +376,10 @@ class S3ABlockOutputStream extends OutputStream {
 
     final S3ADataBlocks.DataBlock block = getActiveBlock();
     int size = block.dataSize();
-    final PutObjectRequest putObjectRequest =
-        writeOperationHelper.newPutRequest(
-            block.startUpload(),
-            size);
+    final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
+    final PutObjectRequest putObjectRequest = uploadData.hasFile() ?
+        writeOperationHelper.newPutRequest(uploadData.getFile())
+        : writeOperationHelper.newPutRequest(uploadData.getUploadStream(), size);
     fs.setOptionalPutRequestParameters(putObjectRequest);
     long transferQueueTime = now();
     BlockUploadProgress callback =
@@ -393,8 +391,14 @@ class S3ABlockOutputStream extends OutputStream {
         executorService.submit(new Callable<PutObjectResult>() {
           @Override
           public PutObjectResult call() throws Exception {
-            PutObjectResult result = fs.putObjectDirect(putObjectRequest);
-            block.close();
+            PutObjectResult result;
+            try {
+              // the putObject call automatically closes the input
+              // stream afterwards.
+              result = writeOperationHelper.putObject(putObjectRequest);
+            } finally {
+              closeAll(LOG, uploadData, block);
+            }
             return result;
           }
         });
@@ -438,13 +442,21 @@ class S3ABlockOutputStream extends OutputStream {
   }
 
   /**
+   * Get the statistics for this stream.
+   * @return stream statistics
+   */
+  S3AInstrumentation.OutputStreamStatistics getStatistics() {
+    return statistics;
+  }
+
+  /**
    * Multiple partition upload.
    */
   private class MultiPartUpload {
     private final String uploadId;
     private final List<ListenableFuture<PartETag>> partETagsFutures;
 
-    public MultiPartUpload() throws IOException {
+    MultiPartUpload() throws IOException {
       this.uploadId = writeOperationHelper.initiateMultiPartUpload();
       this.partETagsFutures = new ArrayList<>(2);
       LOG.debug("Initiated multi-part upload for {} with " +
@@ -461,14 +473,16 @@ class S3ABlockOutputStream extends OutputStream {
         throws IOException {
       LOG.debug("Queueing upload of {}", block);
       final int size = block.dataSize();
-      final InputStream uploadStream = block.startUpload();
+      final S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
       final int currentPartNumber = partETagsFutures.size() + 1;
       final UploadPartRequest request =
           writeOperationHelper.newUploadPartRequest(
               uploadId,
-              uploadStream,
               currentPartNumber,
-              size);
+              size,
+              uploadData.getUploadStream(),
+              uploadData.getFile());
+
       long transferQueueTime = now();
       BlockUploadProgress callback =
           new BlockUploadProgress(
@@ -483,12 +497,16 @@ class S3ABlockOutputStream extends OutputStream {
               LOG.debug("Uploading part {} for id '{}'", currentPartNumber,
                   uploadId);
               // do the upload
-              PartETag partETag = fs.uploadPart(request).getPartETag();
-              LOG.debug("Completed upload of {}", block);
-              LOG.debug("Stream statistics of {}", statistics);
-
-              // close the block
-              block.close();
+              PartETag partETag;
+              try {
+                partETag = fs.uploadPart(request).getPartETag();
+                LOG.debug("Completed upload of {} to part {}", block,
+                    partETag.getETag());
+                LOG.debug("Stream statistics of {}", statistics);
+              } finally {
+                // close the stream and block
+                closeAll(LOG, uploadData, block);
+              }
               return partETag;
             }
           });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
index 05f8efe..9bc8dcd 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3ADataBlocks.java
@@ -24,10 +24,8 @@ import java.io.ByteArrayOutputStream;
 import java.io.Closeable;
 import java.io.EOFException;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
-import java.io.FilterInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
@@ -42,10 +40,11 @@ import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.util.DirectBufferPool;
 
 import static org.apache.hadoop.fs.s3a.S3ADataBlocks.DataBlock.DestState.*;
+import static org.apache.hadoop.fs.s3a.S3AUtils.closeAll;
 
 /**
  * Set of classes to support output streaming into blocks which are then
- * uploaded as partitions.
+ * uploaded as to S3 as a single PUT, or as part of a multipart request.
  */
 final class S3ADataBlocks {
 
@@ -97,6 +96,70 @@ final class S3ADataBlocks {
   }
 
   /**
+   * The output information for an upload.
+   * It can be one of a file or an input stream.
+   * When closed, any stream is closed. Any source file is untouched.
+   */
+  static final class BlockUploadData implements Closeable {
+    private final File file;
+    private final InputStream uploadStream;
+
+    /**
+     * File constructor; input stream will be null.
+     * @param file file to upload
+     */
+    BlockUploadData(File file) {
+      Preconditions.checkArgument(file.exists(), "No file: " + file);
+      this.file = file;
+      this.uploadStream = null;
+    }
+
+    /**
+     * Stream constructor, file field will be null.
+     * @param uploadStream stream to upload
+     */
+    BlockUploadData(InputStream uploadStream) {
+      Preconditions.checkNotNull(uploadStream, "rawUploadStream");
+      this.uploadStream = uploadStream;
+      this.file = null;
+    }
+
+    /**
+     * Predicate: does this instance contain a file reference.
+     * @return true if there is a file.
+     */
+    boolean hasFile() {
+      return file != null;
+    }
+
+    /**
+     * Get the file, if there is one.
+     * @return the file for uploading, or null.
+     */
+    File getFile() {
+      return file;
+    }
+
+    /**
+     * Get the raw upload stream, if the object was
+     * created with one.
+     * @return the upload stream or null.
+     */
+    InputStream getUploadStream() {
+      return uploadStream;
+    }
+
+    /**
+     * Close: closes any upload stream provided in the constructor.
+     * @throws IOException inherited exception
+     */
+    @Override
+    public void close() throws IOException {
+      closeAll(LOG, uploadStream);
+    }
+  }
+
+  /**
    * Base class for block factories.
    */
   static abstract class BlockFactory implements Closeable {
@@ -110,15 +173,21 @@ final class S3ADataBlocks {
 
     /**
      * Create a block.
+     *
+     * @param index index of block
      * @param limit limit of the block.
+     * @param statistics stats to work with
      * @return a new block.
      */
-    abstract DataBlock create(int limit) throws IOException;
+    abstract DataBlock create(long index, int limit,
+        S3AInstrumentation.OutputStreamStatistics statistics)
+        throws IOException;
 
     /**
      * Implement any close/cleanup operation.
      * Base class is a no-op
-     * @throws IOException -ideally, it shouldn't.
+     * @throws IOException Inherited exception; implementations should
+     * avoid raising it.
      */
     @Override
     public void close() throws IOException {
@@ -140,6 +209,14 @@ final class S3ADataBlocks {
     enum DestState {Writing, Upload, Closed}
 
     private volatile DestState state = Writing;
+    protected final long index;
+    protected final S3AInstrumentation.OutputStreamStatistics statistics;
+
+    protected DataBlock(long index,
+        S3AInstrumentation.OutputStreamStatistics statistics) {
+      this.index = index;
+      this.statistics = statistics;
+    }
 
     /**
      * Atomically enter a state, verifying current state.
@@ -243,8 +320,8 @@ final class S3ADataBlocks {
      * @return the stream
      * @throws IOException trouble
      */
-    InputStream startUpload() throws IOException {
-      LOG.debug("Start datablock upload");
+    BlockUploadData startUpload() throws IOException {
+      LOG.debug("Start datablock[{}] upload", index);
       enterState(Writing, Upload);
       return null;
     }
@@ -278,6 +355,23 @@ final class S3ADataBlocks {
 
     }
 
+    /**
+     * A block has been allocated.
+     */
+    protected void blockAllocated() {
+      if (statistics != null) {
+        statistics.blockAllocated();
+      }
+    }
+
+    /**
+     * A block has been released.
+     */
+    protected void blockReleased() {
+      if (statistics != null) {
+        statistics.blockReleased();
+      }
+    }
   }
 
   // ====================================================================
@@ -292,8 +386,10 @@ final class S3ADataBlocks {
     }
 
     @Override
-    DataBlock create(int limit) throws IOException {
-      return new ByteArrayBlock(limit);
+    DataBlock create(long index, int limit,
+        S3AInstrumentation.OutputStreamStatistics statistics)
+        throws IOException {
+      return new ByteArrayBlock(0, limit, statistics);
     }
 
   }
@@ -334,9 +430,13 @@ final class S3ADataBlocks {
     // cache data size so that it is consistent after the buffer is reset.
     private Integer dataSize;
 
-    ByteArrayBlock(int limit) {
+    ByteArrayBlock(long index,
+        int limit,
+        S3AInstrumentation.OutputStreamStatistics statistics) {
+      super(index, statistics);
       this.limit = limit;
       buffer = new S3AByteArrayOutputStream(limit);
+      blockAllocated();
     }
 
     /**
@@ -349,12 +449,12 @@ final class S3ADataBlocks {
     }
 
     @Override
-    InputStream startUpload() throws IOException {
+    BlockUploadData startUpload() throws IOException {
       super.startUpload();
       dataSize = buffer.size();
       ByteArrayInputStream bufferData = buffer.getInputStream();
       buffer = null;
-      return bufferData;
+      return new BlockUploadData(bufferData);
     }
 
     @Override
@@ -378,12 +478,14 @@ final class S3ADataBlocks {
     @Override
     protected void innerClose() {
       buffer = null;
+      blockReleased();
     }
 
     @Override
     public String toString() {
-      return "ByteArrayBlock{" +
-          "state=" + getState() +
+      return "ByteArrayBlock{"
+          +"index=" + index +
+          ", state=" + getState() +
           ", limit=" + limit +
           ", dataSize=" + dataSize +
           '}';
@@ -395,12 +497,6 @@ final class S3ADataBlocks {
   /**
    * Stream via Direct ByteBuffers; these are allocated off heap
    * via {@link DirectBufferPool}.
-   * This is actually the most complex of all the block factories,
-   * due to the need to explicitly recycle buffers; in comparison, the
-   * {@link DiskBlock} buffer delegates the work of deleting files to
-   * the {@link DiskBlock.FileDeletingInputStream}. Here the
-   * input stream {@link ByteBufferInputStream} has a similar task, along
-   * with the foundational work of streaming data from a byte array.
    */
 
   static class ByteBufferBlockFactory extends BlockFactory {
@@ -413,8 +509,10 @@ final class S3ADataBlocks {
     }
 
     @Override
-    ByteBufferBlock create(int limit) throws IOException {
-      return new ByteBufferBlock(limit);
+    ByteBufferBlock create(long index, int limit,
+        S3AInstrumentation.OutputStreamStatistics statistics)
+        throws IOException {
+      return new ByteBufferBlock(index, limit, statistics);
     }
 
     private ByteBuffer requestBuffer(int limit) {
@@ -446,21 +544,27 @@ final class S3ADataBlocks {
 
     /**
      * A DataBlock which requests a buffer from pool on creation; returns
-     * it when the output stream is closed.
+     * it when it is closed.
      */
     class ByteBufferBlock extends DataBlock {
-      private ByteBuffer buffer;
+      private ByteBuffer blockBuffer;
       private final int bufferSize;
       // cache data size so that it is consistent after the buffer is reset.
       private Integer dataSize;
 
       /**
        * Instantiate. This will request a ByteBuffer of the desired size.
+       * @param index block index
        * @param bufferSize buffer size
+       * @param statistics statistics to update
        */
-      ByteBufferBlock(int bufferSize) {
+      ByteBufferBlock(long index,
+          int bufferSize,
+          S3AInstrumentation.OutputStreamStatistics statistics) {
+        super(index, statistics);
         this.bufferSize = bufferSize;
-        buffer = requestBuffer(bufferSize);
+        blockBuffer = requestBuffer(bufferSize);
+        blockAllocated();
       }
 
       /**
@@ -473,13 +577,14 @@ final class S3ADataBlocks {
       }
 
       @Override
-      ByteBufferInputStream startUpload() throws IOException {
+      BlockUploadData startUpload() throws IOException {
         super.startUpload();
         dataSize = bufferCapacityUsed();
         // set the buffer up from reading from the beginning
-        buffer.limit(buffer.position());
-        buffer.position(0);
-        return new ByteBufferInputStream(dataSize, buffer);
+        blockBuffer.limit(blockBuffer.position());
+        blockBuffer.position(0);
+        return new BlockUploadData(
+            new ByteBufferInputStream(dataSize, blockBuffer));
       }
 
       @Override
@@ -489,182 +594,190 @@ final class S3ADataBlocks {
 
       @Override
       public int remainingCapacity() {
-        return buffer != null ? buffer.remaining() : 0;
+        return blockBuffer != null ? blockBuffer.remaining() : 0;
       }
 
       private int bufferCapacityUsed() {
-        return buffer.capacity() - buffer.remaining();
+        return blockBuffer.capacity() - blockBuffer.remaining();
       }
 
       @Override
       int write(byte[] b, int offset, int len) throws IOException {
         super.write(b, offset, len);
         int written = Math.min(remainingCapacity(), len);
-        buffer.put(b, offset, written);
+        blockBuffer.put(b, offset, written);
         return written;
       }
 
+      /**
+       * Closing the block will release the buffer.
+       */
       @Override
       protected void innerClose() {
-        buffer = null;
+        if (blockBuffer != null) {
+          blockReleased();
+          releaseBuffer(blockBuffer);
+          blockBuffer = null;
+        }
       }
 
       @Override
       public String toString() {
         return "ByteBufferBlock{"
-            + "state=" + getState() +
+            + "index=" + index +
+            ", state=" + getState() +
             ", dataSize=" + dataSize() +
             ", limit=" + bufferSize +
             ", remainingCapacity=" + remainingCapacity() +
             '}';
       }
 
-    }
-
-    /**
-     * Provide an input stream from a byte buffer; supporting
-     * {@link #mark(int)}, which is required to enable replay of failed
-     * PUT attempts.
-     * This input stream returns the buffer to the pool afterwards.
-     */
-    class ByteBufferInputStream extends InputStream {
+      /**
+       * Provide an input stream from a byte buffer; supporting
+       * {@link #mark(int)}, which is required to enable replay of failed
+       * PUT attempts.
+       */
+      class ByteBufferInputStream extends InputStream {
 
-      private final int size;
-      private ByteBuffer byteBuffer;
+        private final int size;
+        private ByteBuffer byteBuffer;
 
-      ByteBufferInputStream(int size, ByteBuffer byteBuffer) {
-        LOG.debug("Creating ByteBufferInputStream of size {}", size);
-        this.size = size;
-        this.byteBuffer = byteBuffer;
-      }
+        ByteBufferInputStream(int size,
+            ByteBuffer byteBuffer) {
+          LOG.debug("Creating ByteBufferInputStream of size {}", size);
+          this.size = size;
+          this.byteBuffer = byteBuffer;
+        }
 
-      /**
-       * Return the buffer to the pool after the stream is closed.
-       */
-      @Override
-      public synchronized void close() {
-        if (byteBuffer != null) {
-          LOG.debug("releasing buffer");
-          releaseBuffer(byteBuffer);
+        /**
+         * After the stream is closed, set the local reference to the byte
+         * buffer to null; this guarantees that future attempts to use
+         * stream methods will fail.
+         */
+        @Override
+        public synchronized void close() {
+          LOG.debug("ByteBufferInputStream.close() for {}",
+              ByteBufferBlock.super.toString());
           byteBuffer = null;
         }
-      }
 
-      /**
-       * Verify that the stream is open.
-       * @throws IOException if the stream is closed
-       */
-      private void verifyOpen() throws IOException {
-        if (byteBuffer == null) {
-          throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+        /**
+         * Verify that the stream is open.
+         * @throws IOException if the stream is closed
+         */
+        private void verifyOpen() throws IOException {
+          if (byteBuffer == null) {
+            throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED);
+          }
         }
-      }
 
-      public synchronized int read() throws IOException {
-        if (available() > 0) {
-          return byteBuffer.get() & 0xFF;
-        } else {
-          return -1;
+        public synchronized int read() throws IOException {
+          if (available() > 0) {
+            return byteBuffer.get() & 0xFF;
+          } else {
+            return -1;
+          }
         }
-      }
 
-      @Override
-      public synchronized long skip(long offset) throws IOException {
-        verifyOpen();
-        long newPos = position() + offset;
-        if (newPos < 0) {
-          throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+        @Override
+        public synchronized long skip(long offset) throws IOException {
+          verifyOpen();
+          long newPos = position() + offset;
+          if (newPos < 0) {
+            throw new EOFException(FSExceptionMessages.NEGATIVE_SEEK);
+          }
+          if (newPos > size) {
+            throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+          }
+          byteBuffer.position((int) newPos);
+          return newPos;
         }
-        if (newPos > size) {
-          throw new EOFException(FSExceptionMessages.CANNOT_SEEK_PAST_EOF);
+
+        @Override
+        public synchronized int available() {
+          Preconditions.checkState(byteBuffer != null,
+              FSExceptionMessages.STREAM_IS_CLOSED);
+          return byteBuffer.remaining();
         }
-        byteBuffer.position((int) newPos);
-        return newPos;
-      }
 
-      @Override
-      public synchronized int available() {
-        Preconditions.checkState(byteBuffer != null,
-            FSExceptionMessages.STREAM_IS_CLOSED);
-        return byteBuffer.remaining();
-      }
+        /**
+         * Get the current buffer position.
+         * @return the buffer position
+         */
+        public synchronized int position() {
+          return byteBuffer.position();
+        }
 
-      /**
-       * Get the current buffer position.
-       * @return the buffer position
-       */
-      public synchronized int position() {
-        return byteBuffer.position();
-      }
+        /**
+         * Check if there is data left.
+         * @return true if there is data remaining in the buffer.
+         */
+        public synchronized boolean hasRemaining() {
+          return byteBuffer.hasRemaining();
+        }
 
-      /**
-       * Check if there is data left.
-       * @return true if there is data remaining in the buffer.
-       */
-      public synchronized boolean hasRemaining() {
-        return byteBuffer.hasRemaining();
-      }
+        @Override
+        public synchronized void mark(int readlimit) {
+          LOG.debug("mark at {}", position());
+          byteBuffer.mark();
+        }
 
-      @Override
-      public synchronized void mark(int readlimit) {
-        LOG.debug("mark at {}", position());
-        byteBuffer.mark();
-      }
+        @Override
+        public synchronized void reset() throws IOException {
+          LOG.debug("reset");
+          byteBuffer.reset();
+        }
 
-      @Override
-      public synchronized void reset() throws IOException {
-        LOG.debug("reset");
-        byteBuffer.reset();
-      }
+        @Override
+        public boolean markSupported() {
+          return true;
+        }
 
-      @Override
-      public boolean markSupported() {
-        return true;
-      }
+        /**
+         * Read in data.
+         * @param b destination buffer
+         * @param offset offset within the buffer
+         * @param length length of bytes to read
+         * @throws EOFException if the position is negative
+         * @throws IndexOutOfBoundsException if there isn't space for the
+         * amount of data requested.
+         * @throws IllegalArgumentException other arguments are invalid.
+         */
+        @SuppressWarnings("NullableProblems")
+        public synchronized int read(byte[] b, int offset, int length)
+            throws IOException {
+          Preconditions.checkArgument(length >= 0, "length is negative");
+          Preconditions.checkArgument(b != null, "Null buffer");
+          if (b.length - offset < length) {
+            throw new IndexOutOfBoundsException(
+                FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
+                    + ": request length =" + length
+                    + ", with offset =" + offset
+                    + "; buffer capacity =" + (b.length - offset));
+          }
+          verifyOpen();
+          if (!hasRemaining()) {
+            return -1;
+          }
 
-      /**
-       * Read in data.
-       * @param buffer destination buffer
-       * @param offset offset within the buffer
-       * @param length length of bytes to read
-       * @throws EOFException if the position is negative
-       * @throws IndexOutOfBoundsException if there isn't space for the
-       * amount of data requested.
-       * @throws IllegalArgumentException other arguments are invalid.
-       */
-      @SuppressWarnings("NullableProblems")
-      public synchronized int read(byte[] buffer, int offset, int length)
-          throws IOException {
-        Preconditions.checkArgument(length >= 0, "length is negative");
-        Preconditions.checkArgument(buffer != null, "Null buffer");
-        if (buffer.length - offset < length) {
-          throw new IndexOutOfBoundsException(
-              FSExceptionMessages.TOO_MANY_BYTES_FOR_DEST_BUFFER
-                  + ": request length =" + length
-                  + ", with offset =" + offset
-                  + "; buffer capacity =" + (buffer.length - offset));
+          int toRead = Math.min(length, available());
+          byteBuffer.get(b, offset, toRead);
+          return toRead;
         }
-        verifyOpen();
-        if (!hasRemaining()) {
-          return -1;
-        }
-
-        int toRead = Math.min(length, available());
-        byteBuffer.get(buffer, offset, toRead);
-        return toRead;
-      }
 
-      @Override
-      public String toString() {
-        final StringBuilder sb = new StringBuilder(
-            "ByteBufferInputStream{");
-        sb.append("size=").append(size);
-        ByteBuffer buffer = this.byteBuffer;
-        if (buffer != null) {
-          sb.append(", available=").append(buffer.remaining());
+        @Override
+        public String toString() {
+          final StringBuilder sb = new StringBuilder(
+              "ByteBufferInputStream{");
+          sb.append("size=").append(size);
+          ByteBuffer buf = this.byteBuffer;
+          if (buf != null) {
+            sb.append(", available=").append(buf.remaining());
+          }
+          sb.append(", ").append(ByteBufferBlock.super.toString());
+          sb.append('}');
+          return sb.toString();
         }
-        sb.append('}');
-        return sb.toString();
       }
     }
   }
@@ -681,22 +794,29 @@ final class S3ADataBlocks {
     }
 
     /**
-     * Create a temp file and a block which writes to it.
+     * Create a temp file and a {@link DiskBlock} instance to manage it.
+     *
+     * @param index block index
      * @param limit limit of the block.
+     * @param statistics statistics to update
      * @return the new block
      * @throws IOException IO problems
      */
     @Override
-    DataBlock create(int limit) throws IOException {
+    DataBlock create(long index,
+        int limit,
+        S3AInstrumentation.OutputStreamStatistics statistics)
+        throws IOException {
       File destFile = getOwner()
-          .createTmpFileForWrite("s3ablock", limit, getOwner().getConf());
-      return new DiskBlock(destFile, limit);
+          .createTmpFileForWrite(String.format("s3ablock-%04d-", index),
+              limit, getOwner().getConf());
+      return new DiskBlock(destFile, limit, index, statistics);
     }
   }
 
   /**
    * Stream to a file.
-   * This will stop at the limit; the caller is expected to create a new block
+   * This will stop at the limit; the caller is expected to create a new block.
    */
   static class DiskBlock extends DataBlock {
 
@@ -704,12 +824,17 @@ final class S3ADataBlocks {
     private final File bufferFile;
     private final int limit;
     private BufferedOutputStream out;
-    private InputStream uploadStream;
+    private final AtomicBoolean closed = new AtomicBoolean(false);
 
-    DiskBlock(File bufferFile, int limit)
+    DiskBlock(File bufferFile,
+        int limit,
+        long index,
+        S3AInstrumentation.OutputStreamStatistics statistics)
         throws FileNotFoundException {
+      super(index, statistics);
       this.limit = limit;
       this.bufferFile = bufferFile;
+      blockAllocated();
       out = new BufferedOutputStream(new FileOutputStream(bufferFile));
     }
 
@@ -738,7 +863,7 @@ final class S3ADataBlocks {
     }
 
     @Override
-    InputStream startUpload() throws IOException {
+    BlockUploadData startUpload() throws IOException {
       super.startUpload();
       try {
         out.flush();
@@ -746,8 +871,7 @@ final class S3ADataBlocks {
         out.close();
         out = null;
       }
-      uploadStream = new FileInputStream(bufferFile);
-      return new FileDeletingInputStream(uploadStream);
+      return new BlockUploadData(bufferFile);
     }
 
     /**
@@ -755,6 +879,7 @@ final class S3ADataBlocks {
      * exists.
      * @throws IOException IO problems
      */
+    @SuppressWarnings("UnnecessaryDefault")
     @Override
     protected void innerClose() throws IOException {
       final DestState state = getState();
@@ -763,20 +888,19 @@ final class S3ADataBlocks {
       case Writing:
         if (bufferFile.exists()) {
           // file was not uploaded
-          LOG.debug("Deleting buffer file as upload did not start");
-          boolean deleted = bufferFile.delete();
-          if (!deleted && bufferFile.exists()) {
-            LOG.warn("Failed to delete buffer file {}", bufferFile);
-          }
+          LOG.debug("Block[{}]: Deleting buffer file as upload did not start",
+              index);
+          closeBlock();
         }
         break;
 
       case Upload:
-        LOG.debug("Buffer file {} exists \u2014close upload stream", bufferFile);
+        LOG.debug("Block[{}]: Buffer file {} exists \u2014close upload stream",
+            index, bufferFile);
         break;
 
       case Closed:
-        // no-op
+        closeBlock();
         break;
 
       default:
@@ -798,7 +922,8 @@ final class S3ADataBlocks {
     @Override
     public String toString() {
       String sb = "FileBlock{"
-          + "destFile=" + bufferFile +
+          + "index=" + index
+          + ", destFile=" + bufferFile +
           ", state=" + getState() +
           ", dataSize=" + dataSize() +
           ", limit=" + limit +
@@ -807,31 +932,20 @@ final class S3ADataBlocks {
     }
 
     /**
-     * An input stream which deletes the buffer file when closed.
+     * Close the block.
+     * This will delete the block's buffer file if the block has
+     * not previously been closed.
      */
-    private final class FileDeletingInputStream extends FilterInputStream {
-      private final AtomicBoolean closed = new AtomicBoolean(false);
-
-      FileDeletingInputStream(InputStream source) {
-        super(source);
-      }
-
-      /**
-       * Delete the input file when closed.
-       * @throws IOException IO problem
-       */
-      @Override
-      public void close() throws IOException {
-        try {
-          super.close();
-        } finally {
-          if (!closed.getAndSet(true)) {
-            if (!bufferFile.delete()) {
-              LOG.warn("delete({}) returned false",
-                  bufferFile.getAbsoluteFile());
-            }
-          }
+    void closeBlock() {
+      LOG.debug("block[{}]: closeBlock()", index);
+      if (!closed.getAndSet(true)) {
+        blockReleased();
+        if (!bufferFile.delete() && bufferFile.exists()) {
+          LOG.warn("delete({}) returned false",
+              bufferFile.getAbsoluteFile());
         }
+      } else {
+        LOG.debug("block[{}]: skipping re-entrant closeBlock()", index);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index bc47918..1786e68 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -1022,6 +1022,7 @@ public class S3AFileSystem extends FileSystem {
    */
   public PutObjectRequest newPutObjectRequest(String key,
       ObjectMetadata metadata, File srcfile) {
+    Preconditions.checkNotNull(srcfile);
     PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
         srcfile);
     setOptionalPutRequestParameters(putObjectRequest);
@@ -1039,8 +1040,9 @@ public class S3AFileSystem extends FileSystem {
    * @param inputStream source data.
    * @return the request
    */
-  PutObjectRequest newPutObjectRequest(String key,
+  private PutObjectRequest newPutObjectRequest(String key,
       ObjectMetadata metadata, InputStream inputStream) {
+    Preconditions.checkNotNull(inputStream);
     PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key,
         inputStream, metadata);
     setOptionalPutRequestParameters(putObjectRequest);
@@ -1077,12 +1079,16 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
-   * PUT an object, incrementing the put requests and put bytes
+   * Start a transfer-manager managed async PUT of an object,
+   * incrementing the put requests and put bytes
    * counters.
    * It does not update the other counters,
    * as existing code does that as progress callbacks come in.
    * Byte length is calculated from the file length, or, if there is no
    * file, from the content length of the header.
+   * Because the operation is async, any stream supplied in the request
+   * must reference data (files, buffers) which stay valid until the upload
+   * completes.
    * @param putObjectRequest the request
    * @return the upload initiated
    */
@@ -1108,6 +1114,7 @@ public class S3AFileSystem extends FileSystem {
    * PUT an object directly (i.e. not via the transfer manager).
    * Byte length is calculated from the file length, or, if there is no
    * file, from the content length of the header.
+   * <i>Important: this call will close any input stream in the request.</i>
    * @param putObjectRequest the request
    * @return the upload initiated
    * @throws AmazonClientException on problems
@@ -1133,7 +1140,8 @@ public class S3AFileSystem extends FileSystem {
 
   /**
    * Upload part of a multi-partition file.
-   * Increments the write and put counters
+   * Increments the write and put counters.
+   * <i>Important: this call does not close any input stream in the request.</i>
    * @param request request
    * @return the result of the operation.
    * @throws AmazonClientException on problems
@@ -2309,14 +2317,28 @@ public class S3AFileSystem extends FileSystem {
 
     /**
      * Create a {@link PutObjectRequest} request.
-     * The metadata is assumed to have been configured with the size of the
-     * operation.
+     * If {@code length} is set, the metadata is configured with the size of
+     * the upload.
      * @param inputStream source data.
      * @param length size, if known. Use -1 for not known
      * @return the request
      */
     PutObjectRequest newPutRequest(InputStream inputStream, long length) {
-      return newPutObjectRequest(key, newObjectMetadata(length), inputStream);
+      PutObjectRequest request = newPutObjectRequest(key,
+          newObjectMetadata(length), inputStream);
+      return request;
+    }
+
+    /**
+     * Create a {@link PutObjectRequest} request to upload a file.
+     * @param sourceFile source file
+     * @return the request
+     */
+    PutObjectRequest newPutRequest(File sourceFile) {
+      int length = (int) sourceFile.length();
+      PutObjectRequest request = newPutObjectRequest(key,
+          newObjectMetadata(length), sourceFile);
+      return request;
     }
 
     /**
@@ -2379,6 +2401,8 @@ public class S3AFileSystem extends FileSystem {
       Preconditions.checkNotNull(partETags);
       Preconditions.checkArgument(!partETags.isEmpty(),
           "No partitions have been uploaded");
+      LOG.debug("Completing multipart upload {} with {} parts",
+          uploadId, partETags.size());
       return s3.completeMultipartUpload(
           new CompleteMultipartUploadRequest(bucket,
               key,
@@ -2389,42 +2413,51 @@ public class S3AFileSystem extends FileSystem {
     /**
      * Abort a multipart upload operation.
      * @param uploadId multipart operation Id
-     * @return the result
      * @throws AmazonClientException on problems.
      */
     void abortMultipartUpload(String uploadId) throws AmazonClientException {
+      LOG.debug("Aborting multipart upload {}", uploadId);
       s3.abortMultipartUpload(
           new AbortMultipartUploadRequest(bucket, key, uploadId));
     }
 
     /**
      * Create and initialize a part request of a multipart upload.
+     * Exactly one of: {@code uploadStream} or {@code sourceFile}
+     * must be specified.
      * @param uploadId ID of ongoing upload
-     * @param uploadStream source of data to upload
      * @param partNumber current part number of the upload
      * @param size amount of data
+     * @param uploadStream source of data to upload
+     * @param sourceFile optional source file.
      * @return the request.
      */
     UploadPartRequest newUploadPartRequest(String uploadId,
-        InputStream uploadStream,
-        int partNumber,
-        int size) {
+        int partNumber, int size, InputStream uploadStream, File sourceFile) {
       Preconditions.checkNotNull(uploadId);
-      Preconditions.checkNotNull(uploadStream);
+      // exactly one source must be set; xor verifies this
+      Preconditions.checkArgument((uploadStream != null) ^ (sourceFile != null),
+          "Data source");
       Preconditions.checkArgument(size > 0, "Invalid partition size %s", size);
-      Preconditions.checkArgument(partNumber> 0 && partNumber <=10000,
+      Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000,
           "partNumber must be between 1 and 10000 inclusive, but is %s",
           partNumber);
 
       LOG.debug("Creating part upload request for {} #{} size {}",
           uploadId, partNumber, size);
-      return new UploadPartRequest()
+      UploadPartRequest request = new UploadPartRequest()
           .withBucketName(bucket)
           .withKey(key)
           .withUploadId(uploadId)
-          .withInputStream(uploadStream)
           .withPartNumber(partNumber)
           .withPartSize(size);
+      if (uploadStream != null) {
+        // there's an upload stream. Bind to it.
+        request.setInputStream(uploadStream);
+      } else {
+        request.setFile(sourceFile);
+      }
+      return request;
     }
 
     /**
@@ -2439,6 +2472,21 @@ public class S3AFileSystem extends FileSystem {
       sb.append('}');
       return sb.toString();
     }
+
+    /**
+     * PUT an object directly (i.e. not via the transfer manager).
+     * @param putObjectRequest the request
+     * @return the upload initiated
+     * @throws IOException on problems
+     */
+    PutObjectResult putObject(PutObjectRequest putObjectRequest)
+        throws IOException {
+      try {
+        return putObjectDirect(putObjectRequest);
+      } catch (AmazonClientException e) {
+        throw translateException("put", putObjectRequest.getKey(), e);
+      }
+    }
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
index fb8c852..d2e7a88 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AInstrumentation.java
@@ -36,6 +36,7 @@ import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 
@@ -428,7 +429,7 @@ public class S3AInstrumentation {
     if (gauge != null) {
       gauge.decr(count);
     } else {
-      LOG.debug("No Gauge: " + op);
+      LOG.debug("No Gauge: {}", op);
     }
   }
 
@@ -676,6 +677,8 @@ public class S3AInstrumentation {
     private final AtomicLong transferDuration = new AtomicLong(0);
     private final AtomicLong queueDuration = new AtomicLong(0);
     private final AtomicLong exceptionsInMultipartFinalize = new AtomicLong(0);
+    private final AtomicInteger blocksAllocated = new AtomicInteger(0);
+    private final AtomicInteger blocksReleased = new AtomicInteger(0);
 
     private Statistics statistics;
 
@@ -684,6 +687,20 @@ public class S3AInstrumentation {
     }
 
     /**
+     * A block has been allocated.
+     */
+    void blockAllocated() {
+      blocksAllocated.incrementAndGet();
+    }
+
+    /**
+     * A block has been released.
+     */
+    void blockReleased() {
+      blocksReleased.incrementAndGet();
+    }
+
+    /**
      * Block is queued for upload.
      */
     void blockUploadQueued(int blockSize) {
@@ -778,6 +795,24 @@ public class S3AInstrumentation {
       return queueDuration.get() + transferDuration.get();
     }
 
+    public int blocksAllocated() {
+      return blocksAllocated.get();
+    }
+
+    public int blocksReleased() {
+      return blocksReleased.get();
+    }
+
+    /**
+     * Get counters of blocks actively allocated; my be inaccurate
+     * if the numbers change during the (non-synchronized) calculation.
+     * @return the number of actively allocated blocks.
+     */
+    public int blocksActivelyAllocated() {
+      return blocksAllocated.get() - blocksReleased.get();
+    }
+
+
     @Override
     public String toString() {
       final StringBuilder sb = new StringBuilder(
@@ -789,6 +824,9 @@ public class S3AInstrumentation {
       sb.append(", blockUploadsFailed=").append(blockUploadsFailed);
       sb.append(", bytesPendingUpload=").append(bytesPendingUpload);
       sb.append(", bytesUploaded=").append(bytesUploaded);
+      sb.append(", blocksAllocated=").append(blocksAllocated);
+      sb.append(", blocksReleased=").append(blocksReleased);
+      sb.append(", blocksActivelyAllocated=").append(blocksActivelyAllocated());
       sb.append(", exceptionsInMultipartFinalize=").append(
           exceptionsInMultipartFinalize);
       sb.append(", transferDuration=").append(transferDuration).append(" ms");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
index 5311211..84f3c99 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AUtils.java
@@ -733,4 +733,30 @@ public final class S3AUtils {
     }
     return null;
   }
+
+  /**
+   * Close the Closeable objects and <b>ignore</b> any Exception or
+   * null pointers.
+   * (This is the SLF4J equivalent of that in {@code IOUtils}).
+   * @param log the log to log at debug level. Can be null.
+   * @param closeables the objects to close
+   */
+  public static void closeAll(Logger log,
+      java.io.Closeable... closeables) {
+    for (java.io.Closeable c : closeables) {
+      if (c != null) {
+        try {
+          if (log != null) {
+            log.debug("Closing {}", c);
+          }
+          c.close();
+        } catch (Exception e) {
+          if (log != null && log.isDebugEnabled()) {
+            log.debug("Exception in closing {}", c, e);
+          }
+        }
+      }
+    }
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java
index 74cad00..87f676c 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputArray.java
@@ -24,9 +24,12 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.io.IOUtils;
 
+import org.junit.BeforeClass;
 import org.junit.Test;
 
 import java.io.IOException;
+import java.io.InputStream;
+import java.net.URI;
 
 import static org.apache.hadoop.fs.s3a.Constants.*;
 
@@ -38,6 +41,14 @@ import static org.apache.hadoop.fs.s3a.Constants.*;
  * multipart tests are kept in scale tests.
  */
 public class ITestS3ABlockOutputArray extends AbstractS3ATestBase {
+  private static final int BLOCK_SIZE = 256 * 1024;
+
+  private static byte[] dataset;
+
+  @BeforeClass
+  public static void setupDataset() {
+    dataset = ContractTestUtils.dataset(BLOCK_SIZE, 0, 256);
+  }
 
   @Override
   protected Configuration createConfiguration() {
@@ -65,9 +76,9 @@ public class ITestS3ABlockOutputArray extends AbstractS3ATestBase {
   }
 
   @Test(expected = IOException.class)
-  public void testDoubleStreamClose() throws Throwable {
-    Path dest = path("testDoubleStreamClose");
-    describe(" testDoubleStreamClose");
+  public void testWriteAfterStreamClose() throws Throwable {
+    Path dest = path("testWriteAfterStreamClose");
+    describe(" testWriteAfterStreamClose");
     FSDataOutputStream stream = getFileSystem().create(dest, true);
     byte[] data = ContractTestUtils.dataset(16, 'a', 26);
     try {
@@ -79,7 +90,25 @@ public class ITestS3ABlockOutputArray extends AbstractS3ATestBase {
     }
   }
 
-  public void verifyUpload(String name, int fileSize) throws IOException {
+  @Test
+  public void testBlocksClosed() throws Throwable {
+    Path dest = path("testBlocksClosed");
+    describe(" testBlocksClosed");
+    FSDataOutputStream stream = getFileSystem().create(dest, true);
+    S3AInstrumentation.OutputStreamStatistics statistics
+        = S3ATestUtils.getOutputStreamStatistics(stream);
+    byte[] data = ContractTestUtils.dataset(16, 'a', 26);
+    stream.write(data);
+    LOG.info("closing output stream");
+    stream.close();
+    assertEquals("total allocated blocks in " + statistics,
+        1, statistics.blocksAllocated());
+    assertEquals("actively allocated blocks in " + statistics,
+        0, statistics.blocksActivelyAllocated());
+    LOG.info("end of test case");
+  }
+
+  private void verifyUpload(String name, int fileSize) throws IOException {
     Path dest = path(name);
     describe(name + " upload to " + dest);
     ContractTestUtils.createAndVerifyFile(
@@ -87,4 +116,43 @@ public class ITestS3ABlockOutputArray extends AbstractS3ATestBase {
         dest,
         fileSize);
   }
+
+  /**
+   * Create a factory for used in mark/reset tests.
+   * @param fileSystem source FS
+   * @return the factory
+   */
+  protected S3ADataBlocks.BlockFactory createFactory(S3AFileSystem fileSystem) {
+    return new S3ADataBlocks.ArrayBlockFactory(fileSystem);
+  }
+
+  private void markAndResetDatablock(S3ADataBlocks.BlockFactory factory)
+      throws Exception {
+    S3AInstrumentation instrumentation =
+        new S3AInstrumentation(new URI("s3a://example"));
+    S3AInstrumentation.OutputStreamStatistics outstats
+        = instrumentation.newOutputStreamStatistics(null);
+    S3ADataBlocks.DataBlock block = factory.create(1, BLOCK_SIZE, outstats);
+    block.write(dataset, 0, dataset.length);
+    S3ADataBlocks.BlockUploadData uploadData = block.startUpload();
+    InputStream stream = uploadData.getUploadStream();
+    assertNotNull(stream);
+    assertTrue("Mark not supported in " + stream, stream.markSupported());
+    assertEquals(0, stream.read());
+    stream.mark(BLOCK_SIZE);
+    // read a lot
+    long l = 0;
+    while (stream.read() != -1) {
+      // do nothing
+      l++;
+    }
+    stream.reset();
+    assertEquals(1, stream.read());
+  }
+
+  @Test
+  public void testMarkReset() throws Throwable {
+    markAndResetDatablock(createFactory(getFileSystem()));
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputByteBuffer.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputByteBuffer.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputByteBuffer.java
index 504426b..02f3de0 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputByteBuffer.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputByteBuffer.java
@@ -17,7 +17,6 @@
  */
 
 package org.apache.hadoop.fs.s3a;
-
 /**
  * Use {@link Constants#FAST_UPLOAD_BYTEBUFFER} for buffering.
  */
@@ -27,4 +26,8 @@ public class ITestS3ABlockOutputByteBuffer extends ITestS3ABlockOutputArray {
     return Constants.FAST_UPLOAD_BYTEBUFFER;
   }
 
+  protected S3ADataBlocks.BlockFactory createFactory(S3AFileSystem fileSystem) {
+    return new S3ADataBlocks.ByteBufferBlockFactory(fileSystem);
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputDisk.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputDisk.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputDisk.java
index 550706d..abe8656 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputDisk.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ABlockOutputDisk.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.fs.s3a;
 
+import org.junit.Assume;
+
 /**
  * Use {@link Constants#FAST_UPLOAD_BUFFER_DISK} for buffering.
  */
@@ -27,4 +29,14 @@ public class ITestS3ABlockOutputDisk extends ITestS3ABlockOutputArray {
     return Constants.FAST_UPLOAD_BUFFER_DISK;
   }
 
+  /**
+   * The disk stream doesn't support mark/reset; calls
+   * {@code Assume} to skip the test.
+   * @param fileSystem source FS
+   * @return null
+   */
+  protected S3ADataBlocks.BlockFactory createFactory(S3AFileSystem fileSystem) {
+    Assume.assumeTrue("mark/reset nopt supoprted", false);
+    return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
index 567bacb..9528967 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.s3a;
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.Path;
 import org.junit.Assert;
@@ -544,4 +545,16 @@ public final class S3ATestUtils {
     }
     Assume.assumeTrue(message, condition);
   }
+
+  /**
+   * Get the statistics from a wrapped block output stream.
+   * @param out output stream
+   * @return the (active) stats of the write
+   */
+  public static S3AInstrumentation.OutputStreamStatistics
+      getOutputStreamStatistics(FSDataOutputStream out) {
+    S3ABlockOutputStream blockOutputStream
+        = (S3ABlockOutputStream) out.getWrappedStream();
+    return blockOutputStream.getStatistics();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java
index 9fa95fd..700ef5c 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/TestDataBlocks.java
@@ -51,9 +51,8 @@ public class TestDataBlocks extends Assert {
              new S3ADataBlocks.ByteBufferBlockFactory(null)) {
       int limit = 128;
       S3ADataBlocks.ByteBufferBlockFactory.ByteBufferBlock block
-          = factory.create(limit);
-      assertEquals("outstanding buffers in " + factory,
-          1, factory.getOutstandingBufferCount());
+          = factory.create(1, limit, null);
+      assertOutstandingBuffers(factory, 1);
 
       byte[] buffer = ContractTestUtils.toAsciiByteArray("test data");
       int bufferLen = buffer.length;
@@ -66,24 +65,23 @@ public class TestDataBlocks extends Assert {
           block.hasCapacity(limit - bufferLen));
 
       // now start the write
-      S3ADataBlocks.ByteBufferBlockFactory.ByteBufferInputStream
-          stream = block.startUpload();
+      S3ADataBlocks.BlockUploadData blockUploadData = block.startUpload();
+      S3ADataBlocks.ByteBufferBlockFactory.ByteBufferBlock.ByteBufferInputStream
+          stream =
+          (S3ADataBlocks.ByteBufferBlockFactory.ByteBufferBlock.ByteBufferInputStream)
+              blockUploadData.getUploadStream();
+      assertTrue("Mark not supported in " + stream, stream.markSupported());
       assertTrue("!hasRemaining() in " + stream, stream.hasRemaining());
       int expected = bufferLen;
       assertEquals("wrong available() in " + stream,
           expected, stream.available());
 
       assertEquals('t', stream.read());
+      stream.mark(limit);
       expected--;
       assertEquals("wrong available() in " + stream,
           expected, stream.available());
 
-      // close the block. The buffer must remain outstanding here;
-      // the stream manages the lifecycle of it now
-      block.close();
-      assertEquals("outstanding buffers in " + factory,
-          1, factory.getOutstandingBufferCount());
-      block.close();
 
       // read into a byte array with an offset
       int offset = 5;
@@ -109,16 +107,31 @@ public class TestDataBlocks extends Assert {
           0, stream.available());
       assertTrue("hasRemaining() in " + stream, !stream.hasRemaining());
 
+      // go the mark point
+      stream.reset();
+      assertEquals('e', stream.read());
+
       // when the stream is closed, the data should be returned
       stream.close();
-      assertEquals("outstanding buffers in " + factory,
-          0, factory.getOutstandingBufferCount());
+      assertOutstandingBuffers(factory, 1);
+      block.close();
+      assertOutstandingBuffers(factory, 0);
       stream.close();
-      assertEquals("outstanding buffers in " + factory,
-          0, factory.getOutstandingBufferCount());
-
+      assertOutstandingBuffers(factory, 0);
     }
 
   }
 
+  /**
+   * Assert the number of buffers active for a block factory.
+   * @param factory factory
+   * @param expectedCount expected count.
+   */
+  private static void assertOutstandingBuffers(
+      S3ADataBlocks.ByteBufferBlockFactory factory,
+      int expectedCount) {
+    assertEquals("outstanding buffers in " + factory,
+        expectedCount, factory.getOutstandingBufferCount());
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dab00da1/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
index fcb6444..89fae82 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/scale/AbstractSTestS3AHugeFiles.java
@@ -34,11 +34,13 @@ import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageStatistics;
 import org.apache.hadoop.fs.contract.ContractTestUtils;
 import org.apache.hadoop.fs.s3a.S3AFileStatus;
 import org.apache.hadoop.fs.s3a.S3AFileSystem;
+import org.apache.hadoop.fs.s3a.S3AInstrumentation;
 import org.apache.hadoop.fs.s3a.Statistic;
 import org.apache.hadoop.util.Progressable;
 
@@ -159,13 +161,20 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
     Statistic putBytesPending = Statistic.OBJECT_PUT_BYTES_PENDING;
 
     ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
-
+    S3AInstrumentation.OutputStreamStatistics streamStatistics;
     long blocksPer10MB = blocksPerMB * 10;
     ProgressCallback progress = new ProgressCallback(timer);
     try (FSDataOutputStream out = fs.create(hugefile,
         true,
         uploadBlockSize,
         progress)) {
+      try {
+        streamStatistics = getOutputStreamStatistics(out);
+      } catch (ClassCastException e) {
+        LOG.info("Wrapped output stream is not block stream: {}",
+            out.getWrappedStream());
+        streamStatistics = null;
+      }
 
       for (long block = 1; block <= blocks; block++) {
         out.write(data);
@@ -190,7 +199,8 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
         }
       }
       // now close the file
-      LOG.info("Closing file and completing write operation");
+      LOG.info("Closing stream {}", out);
+      LOG.info("Statistics : {}", streamStatistics);
       ContractTestUtils.NanoTimer closeTimer
           = new ContractTestUtils.NanoTimer();
       out.close();
@@ -201,6 +211,7 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
         filesizeMB, uploadBlockSize);
     logFSState();
     bandwidth(timer, filesize);
+    LOG.info("Statistics after stream closed: {}", streamStatistics);
     long putRequestCount = storageStatistics.getLong(putRequests);
     Long putByteCount = storageStatistics.getLong(putBytes);
     LOG.info("PUT {} bytes in {} operations; {} MB/operation",
@@ -214,7 +225,14 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
     S3AFileStatus status = fs.getFileStatus(hugefile);
     ContractTestUtils.assertIsFile(hugefile, status);
     assertEquals("File size in " + status, filesize, status.getLen());
-    progress.verifyNoFailures("Put file " + hugefile + " of size " + filesize);
+    if (progress != null) {
+      progress.verifyNoFailures("Put file " + hugefile
+          + " of size " + filesize);
+    }
+    if (streamStatistics != null) {
+      assertEquals("actively allocated blocks in " + streamStatistics,
+          0, streamStatistics.blocksActivelyAllocated());
+    }
   }
 
   /**
@@ -285,7 +303,9 @@ public abstract class AbstractSTestS3AHugeFiles extends S3AScaleTestBase {
   void assumeHugeFileExists() throws IOException {
     S3AFileSystem fs = getFileSystem();
     ContractTestUtils.assertPathExists(fs, "huge file not created", hugefile);
-    ContractTestUtils.assertIsFile(fs, hugefile);
+    FileStatus status = fs.getFileStatus(hugefile);
+    ContractTestUtils.assertIsFile(hugefile, status);
+    assertTrue("File " + hugefile + " is empty", status.getLen() > 0);
   }
 
   private void logFSState() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[28/31] hadoop git commit: HADOOP-14119. Remove unused imports from GzipCodec.java. Contributed by Yiqun Lin.

Posted by st...@apache.org.
HADOOP-14119. Remove unused imports from GzipCodec.java. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9db2e0c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9db2e0c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9db2e0c8

Branch: refs/heads/HADOOP-13345
Commit: 9db2e0c8d4b29971da60e2628ff55b5dabed2f7b
Parents: fbfe86d
Author: Yiqun Lin <yq...@apache.org>
Authored: Mon Feb 27 18:39:14 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Mon Feb 27 18:39:14 2017 +0800

----------------------------------------------------------------------
 .../org/apache/hadoop/io/compress/GzipCodec.java | 19 +++++++++++--------
 1 file changed, 11 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9db2e0c8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index d079412..11fcf60 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -18,18 +18,21 @@
 
 package org.apache.hadoop.io.compress;
 
-import java.io.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.zip.GZIPOutputStream;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.io.compress.zlib.*;
-import org.apache.hadoop.io.compress.zlib.ZlibDecompressor.ZlibDirectDecompressor;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibCompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibDecompressor;
+import org.apache.hadoop.io.compress.zlib.ZlibFactory;
 
 /**
  * This class creates gzip compressors/decompressors. 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[06/31] hadoop git commit: YARN-6211. Synchronization improvement for moveApplicationAcrossQueues and updateApplicationPriority. Contributed by Bibin A Chundatt.

Posted by st...@apache.org.
YARN-6211. Synchronization improvement for moveApplicationAcrossQueues and updateApplicationPriority. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a207aa99
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a207aa99
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a207aa99

Branch: refs/heads/HADOOP-13345
Commit: a207aa9930e7ee4f10228e2db4b4e733794eb8ea
Parents: 13d4bcf
Author: Sunil G <su...@apache.org>
Authored: Thu Feb 23 14:19:07 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Thu Feb 23 14:19:07 2017 +0530

----------------------------------------------------------------------
 .../hadoop/yarn/server/resourcemanager/ClientRMService.java    | 6 ++++--
 .../hadoop/yarn/server/resourcemanager/RMAppManager.java       | 5 ++---
 2 files changed, 6 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a207aa99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
index 0c87ede..48bccfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java
@@ -1228,7 +1228,8 @@ public class ClientRMService extends AbstractService implements
     }
 
     try {
-      this.rmAppManager.moveApplicationAcrossQueue(applicationId,
+      this.rmAppManager.moveApplicationAcrossQueue(
+          application.getApplicationId(),
           request.getTargetQueue());
     } catch (YarnException ex) {
       RMAuditLogger.logFailure(callerUGI.getShortUserName(),
@@ -1662,7 +1663,8 @@ public class ClientRMService extends AbstractService implements
     }
 
     try {
-      rmAppManager.updateApplicationPriority(callerUGI, applicationId,
+      rmAppManager.updateApplicationPriority(callerUGI,
+          application.getApplicationId(),
           newAppPriority);
     } catch (YarnException ex) {
       RMAuditLogger.logFailure(callerUGI.getShortUserName(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a207aa99/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index cc796e3..e211867 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -53,7 +53,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppRecoverEvent;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
@@ -612,7 +611,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     RMApp app = this.rmContext.getRMApps().get(applicationId);
 
     synchronized (applicationId) {
-      if (app.isAppInCompletedStates()) {
+      if (app == null || app.isAppInCompletedStates()) {
         return;
       }
 
@@ -658,7 +657,7 @@ public class RMAppManager implements EventHandler<RMAppManagerEvent>,
     // 2. Update this information to state-store
     // 3. Perform real move operation and update in-memory data structures.
     synchronized (applicationId) {
-      if (app.isAppInCompletedStates()) {
+      if (app == null || app.isAppInCompletedStates()) {
         return;
       }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[15/31] hadoop git commit: HADOOP-13817. Add a finite shell command timeout to ShellBasedUnixGroupsMapping. (harsh)

Posted by st...@apache.org.
HADOOP-13817. Add a finite shell command timeout to ShellBasedUnixGroupsMapping. (harsh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8694deb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8694deb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8694deb

Branch: refs/heads/HADOOP-13345
Commit: e8694deb6ad180449f8ce6c1c8b4f84873c0587a
Parents: 50decd3
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Nov 14 15:59:58 2016 +0530
Committer: Harsh J <ha...@cloudera.com>
Committed: Fri Feb 24 21:34:00 2017 +0530

----------------------------------------------------------------------
 .../fs/CommonConfigurationKeysPublic.java       |  15 +++
 .../security/ShellBasedUnixGroupsMapping.java   | 114 +++++++++++++---
 .../main/java/org/apache/hadoop/util/Shell.java |  19 ++-
 .../src/main/resources/core-default.xml         |  13 ++
 .../hadoop/security/TestGroupsCaching.java      |  19 +--
 .../TestShellBasedUnixGroupsMapping.java        | 135 ++++++++++++++++++-
 6 files changed, 277 insertions(+), 38 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index f23dd51..e1feda1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -517,6 +517,21 @@ public class CommonConfigurationKeysPublic {
    * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
    * core-default.xml</a>
    */
+  public static final String HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS =
+      "hadoop.security.groups.shell.command.timeout";
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
+  public static final long
+          HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT =
+          0L;
+  /**
+   * @see
+   * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/core-default.xml">
+   * core-default.xml</a>
+   */
   public static final String  HADOOP_SECURITY_AUTHENTICATION =
     "hadoop.security.authentication";
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
index 9b80be9..4146e7b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ShellBasedUnixGroupsMapping.java
@@ -18,17 +18,25 @@
 package org.apache.hadoop.security;
 
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.StringTokenizer;
+import java.util.concurrent.TimeUnit;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A simple shell-based implementation of {@link GroupMappingServiceProvider} 
@@ -37,11 +45,28 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
-public class ShellBasedUnixGroupsMapping
+public class ShellBasedUnixGroupsMapping extends Configured
   implements GroupMappingServiceProvider {
-  
-  private static final Log LOG =
-    LogFactory.getLog(ShellBasedUnixGroupsMapping.class);
+
+  @VisibleForTesting
+  protected static final Logger LOG =
+      LoggerFactory.getLogger(ShellBasedUnixGroupsMapping.class);
+
+  private long timeout = 0L;
+  private static final List<String> EMPTY_GROUPS = new LinkedList<>();
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    if (conf != null) {
+      timeout = conf.getTimeDuration(
+          CommonConfigurationKeys.
+              HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
+          CommonConfigurationKeys.
+              HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT,
+          TimeUnit.SECONDS);
+    }
+  }
 
   @SuppressWarnings("serial")
   private static class PartialGroupNameException extends IOException {
@@ -98,7 +123,17 @@ public class ShellBasedUnixGroupsMapping
    */
   protected ShellCommandExecutor createGroupExecutor(String userName) {
     return new ShellCommandExecutor(
-        Shell.getGroupsForUserCommand(userName), null, null, 0L);
+        getGroupsForUserCommand(userName), null, null, timeout);
+  }
+
+  /**
+   * Returns just the shell command to be used to fetch a user's groups list.
+   * This is mainly separate to make some tests easier.
+   * @param userName The username that needs to be passed into the command built
+   * @return An appropriate shell command with arguments
+   */
+  protected String[] getGroupsForUserCommand(String userName) {
+    return Shell.getGroupsForUserCommand(userName);
   }
 
   /**
@@ -109,7 +144,17 @@ public class ShellBasedUnixGroupsMapping
    */
   protected ShellCommandExecutor createGroupIDExecutor(String userName) {
     return new ShellCommandExecutor(
-        Shell.getGroupsIDForUserCommand(userName), null, null, 0L);
+        getGroupsIDForUserCommand(userName), null, null, timeout);
+  }
+
+  /**
+   * Returns just the shell command to be used to fetch a user's group IDs list.
+   * This is mainly separate to make some tests easier.
+   * @param userName The username that needs to be passed into the command built
+   * @return An appropriate shell command with arguments
+   */
+  protected String[] getGroupsIDForUserCommand(String userName) {
+    return Shell.getGroupsIDForUserCommand(userName);
   }
 
   /**
@@ -133,8 +178,26 @@ public class ShellBasedUnixGroupsMapping
         groups = resolvePartialGroupNames(user, e.getMessage(),
             executor.getOutput());
       } catch (PartialGroupNameException pge) {
-        LOG.warn("unable to return groups for user " + user, pge);
-        return new LinkedList<>();
+        LOG.warn("unable to return groups for user {}", user, pge);
+        return EMPTY_GROUPS;
+      }
+    } catch (IOException ioe) {
+      // If its a shell executor timeout, indicate so in the message
+      // but treat the result as empty instead of throwing it up,
+      // similar to how partial resolution failures are handled above
+      if (executor.isTimedOut()) {
+        LOG.warn(
+            "Unable to return groups for user '{}' as shell group lookup " +
+            "command '{}' ran longer than the configured timeout limit of " +
+            "{} seconds.",
+            user,
+            Joiner.on(' ').join(executor.getExecString()),
+            timeout
+        );
+        return EMPTY_GROUPS;
+      } else {
+        // If its not an executor timeout, we should let the caller handle it
+        throw ioe;
       }
     }
 
@@ -196,7 +259,7 @@ public class ShellBasedUnixGroupsMapping
    * @param errMessage error message from the shell command
    * @param groupNames the incomplete list of group names
    * @return a list of resolved group names
-   * @throws PartialGroupNameException
+   * @throws PartialGroupNameException if the resolution fails or times out
    */
   private List<String> resolvePartialGroupNames(String userName,
       String errMessage, String groupNames) throws PartialGroupNameException {
@@ -212,21 +275,29 @@ public class ShellBasedUnixGroupsMapping
       throw new PartialGroupNameException("The user name '" + userName
           + "' is not found. " + errMessage);
     } else {
-      LOG.warn("Some group names for '" + userName + "' are not resolvable. "
-          + errMessage);
+      LOG.warn("Some group names for '{}' are not resolvable. {}",
+          userName, errMessage);
       // attempt to partially resolve group names
+      ShellCommandExecutor partialResolver = createGroupIDExecutor(userName);
       try {
-        ShellCommandExecutor exec2 = createGroupIDExecutor(userName);
-        exec2.execute();
-        return parsePartialGroupNames(groupNames, exec2.getOutput());
+        partialResolver.execute();
+        return parsePartialGroupNames(
+            groupNames, partialResolver.getOutput());
       } catch (ExitCodeException ece) {
         // If exception is thrown trying to get group id list,
         // something is terribly wrong, so give up.
-        throw new PartialGroupNameException("failed to get group id list for " +
-        "user '" + userName + "'", ece);
+        throw new PartialGroupNameException(
+            "failed to get group id list for user '" + userName + "'", ece);
       } catch (IOException ioe) {
-        throw new PartialGroupNameException("can't execute the shell command to"
-        + " get the list of group id for user '" + userName + "'", ioe);
+        String message =
+            "Can't execute the shell command to " +
+            "get the list of group id for user '" + userName + "'";
+        if (partialResolver.isTimedOut()) {
+          message +=
+              " because of the command taking longer than " +
+              "the configured timeout: " + timeout + " seconds";
+        }
+        throw new PartialGroupNameException(message, ioe);
       }
     }
   }
@@ -237,7 +308,8 @@ public class ShellBasedUnixGroupsMapping
    * @param groupNames a string representing the user's group names
    * @return a linked list of group names
    */
-  private List<String> resolveFullGroupNames(String groupNames) {
+  @VisibleForTesting
+  protected List<String> resolveFullGroupNames(String groupNames) {
     StringTokenizer tokenizer =
         new StringTokenizer(groupNames, Shell.TOKEN_SEPARATOR_REGEX);
     List<String> groups = new LinkedList<String>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
index ca59b0e..bfb8183 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
@@ -955,7 +955,15 @@ public abstract class Shell {
             line = errReader.readLine();
           }
         } catch(IOException ioe) {
-          LOG.warn("Error reading the error stream", ioe);
+          // Its normal to observe a "Stream closed" I/O error on
+          // command timeouts destroying the underlying process
+          // so only log a WARN if the command didn't time out
+          if (!isTimedOut()) {
+            LOG.warn("Error reading the error stream", ioe);
+          } else {
+            LOG.debug("Error reading the error stream due to shell "
+                + "command timeout", ioe);
+          }
         }
       }
     };
@@ -1181,6 +1189,15 @@ public abstract class Shell {
     }
 
     /**
+     * Returns the timeout value set for the executor's sub-commands.
+     * @return The timeout value in seconds
+     */
+    @VisibleForTesting
+    public long getTimeoutInterval() {
+      return timeOutInterval;
+    }
+
+    /**
      * Execute the shell command.
      * @throws IOException if the command fails, or if the command is
      * not well constructed.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 1941bec..d8136ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -188,6 +188,19 @@
 </property>
 
 <property>
+  <name>hadoop.security.groups.shell.command.timeout</name>
+  <value>0s</value>
+  <description>
+    Used by the ShellBasedUnixGroupsMapping class, this property controls how
+    long to wait for the underlying shell command that is run to fetch groups.
+    Expressed in seconds (e.g. 10s, 1m, etc.), if the running command takes
+    longer than the value configured, the command is aborted and the groups
+    resolver would return a result of no groups found. A value of 0s (default)
+    would mean an infinite wait (i.e. wait until the command exits on its own).
+  </description>
+</property>
+
+<property>
   <name>hadoop.security.group.mapping.ldap.connection.timeout.ms</name>
   <value>60000</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
index 58c2d1a..930c45e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestGroupsCaching.java
@@ -50,7 +50,7 @@ import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 
 
 public class TestGroupsCaching {
-  public static final Log LOG = LogFactory.getLog(TestGroupsCaching.class);
+  public static final Log TESTLOG = LogFactory.getLog(TestGroupsCaching.class);
   private static String[] myGroups = {"grp1", "grp2"};
   private Configuration conf;
 
@@ -76,7 +76,7 @@ public class TestGroupsCaching {
 
     @Override
     public List<String> getGroups(String user) throws IOException {
-      LOG.info("Getting groups for " + user);
+      TESTLOG.info("Getting groups for " + user);
       delayIfNecessary();
 
       requestCount++;
@@ -115,18 +115,18 @@ public class TestGroupsCaching {
 
     @Override
     public void cacheGroupsRefresh() throws IOException {
-      LOG.info("Cache is being refreshed.");
+      TESTLOG.info("Cache is being refreshed.");
       clearBlackList();
       return;
     }
 
     public static void clearBlackList() throws IOException {
-      LOG.info("Clearing the blacklist");
+      TESTLOG.info("Clearing the blacklist");
       blackList.clear();
     }
 
     public static void clearAll() throws IOException {
-      LOG.info("Resetting FakeGroupMapping");
+      TESTLOG.info("Resetting FakeGroupMapping");
       blackList.clear();
       allGroups.clear();
       requestCount = 0;
@@ -137,12 +137,12 @@ public class TestGroupsCaching {
 
     @Override
     public void cacheGroupsAdd(List<String> groups) throws IOException {
-      LOG.info("Adding " + groups + " to groups.");
+      TESTLOG.info("Adding " + groups + " to groups.");
       allGroups.addAll(groups);
     }
 
     public static void addToBlackList(String user) throws IOException {
-      LOG.info("Adding " + user + " to the blacklist");
+      TESTLOG.info("Adding " + user + " to the blacklist");
       blackList.add(user);
     }
 
@@ -226,11 +226,12 @@ public class TestGroupsCaching {
 
     // ask for a negative entry
     try {
-      LOG.error("We are not supposed to get here." + groups.getGroups("user1").toString());
+      TESTLOG.error("We are not supposed to get here."
+          + groups.getGroups("user1").toString());
       fail();
     } catch (IOException ioe) {
       if(!ioe.getMessage().startsWith("No groups found")) {
-        LOG.error("Got unexpected exception: " + ioe.getMessage());
+        TESTLOG.error("Got unexpected exception: " + ioe.getMessage());
         fail();
       }
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8694deb/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
index f28cc62..6d9ea08 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedUnixGroupsMapping.java
@@ -22,9 +22,15 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 import static org.mockito.Mockito.doNothing;
 import static org.mockito.Mockito.doThrow;
@@ -32,9 +38,13 @@ import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
 public class TestShellBasedUnixGroupsMapping {
-  private static final Log LOG =
+  private static final Log TESTLOG =
       LogFactory.getLog(TestShellBasedUnixGroupsMapping.class);
 
+  private final GenericTestUtils.LogCapturer shellMappingLog =
+      GenericTestUtils.LogCapturer.captureLogs(
+          ShellBasedUnixGroupsMapping.LOG);
+
   private class TestGroupUserNotExist
       extends ShellBasedUnixGroupsMapping {
     /**
@@ -55,7 +65,7 @@ public class TestShellBasedUnixGroupsMapping {
 
         when(executor.getOutput()).thenReturn("");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -90,7 +100,7 @@ public class TestShellBasedUnixGroupsMapping {
 
         when(executor.getOutput()).thenReturn("9999\n9999 abc def");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -133,7 +143,7 @@ public class TestShellBasedUnixGroupsMapping {
         doNothing().when(executor).execute();
         when(executor.getOutput()).thenReturn("23\n23 groupname zzz");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -146,7 +156,7 @@ public class TestShellBasedUnixGroupsMapping {
         doNothing().when(executor).execute();
         when(executor.getOutput()).thenReturn("111\n111 112 113");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -179,7 +189,7 @@ public class TestShellBasedUnixGroupsMapping {
         doNothing().when(executor).execute();
         when(executor.getOutput()).thenReturn("abc\ndef abc hij");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -192,7 +202,7 @@ public class TestShellBasedUnixGroupsMapping {
         doNothing().when(executor).execute();
         when(executor.getOutput()).thenReturn("1\n1 2 3");
       } catch (IOException e) {
-        LOG.warn(e.getMessage());
+        TESTLOG.warn(e.getMessage());
       }
       return executor;
     }
@@ -208,6 +218,117 @@ public class TestShellBasedUnixGroupsMapping {
     assertTrue(groups.contains("def"));
     assertTrue(groups.contains("hij"));
   }
+
+  private static class TestDelayedGroupCommand
+      extends ShellBasedUnixGroupsMapping {
+
+    private Long timeoutSecs = 2L;
+
+    TestDelayedGroupCommand() {
+      super();
+    }
+
+    @Override
+    protected String[] getGroupsForUserCommand(String userName) {
+      // Sleeps 2 seconds when executed and writes no output
+      if (Shell.WINDOWS) {
+        return new String[]{"timeout", timeoutSecs.toString()};
+      }
+      return new String[]{"sleep", timeoutSecs.toString()};
+    }
+
+    @Override
+    protected String[] getGroupsIDForUserCommand(String userName) {
+      return getGroupsForUserCommand(userName);
+    }
+  }
+
+  @Test(timeout=4000)
+  public void testFiniteGroupResolutionTime() throws Exception {
+    Configuration conf = new Configuration();
+    String userName = "foobarnonexistinguser";
+    String commandTimeoutMessage =
+        "ran longer than the configured timeout limit";
+    long testTimeout = 1L;
+
+    // Test a 1 second max-runtime timeout
+    conf.setLong(
+        CommonConfigurationKeys.
+            HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS,
+        testTimeout);
+
+    TestDelayedGroupCommand mapping =
+        ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
+
+    ShellCommandExecutor executor = mapping.createGroupExecutor(userName);
+    assertEquals(
+        "Expected the group names executor to carry the configured timeout",
+        testTimeout,
+        executor.getTimeoutInterval());
+
+    executor = mapping.createGroupIDExecutor(userName);
+    assertEquals(
+        "Expected the group ID executor to carry the configured timeout",
+        testTimeout,
+        executor.getTimeoutInterval());
+
+    assertEquals(
+        "Expected no groups to be returned given a shell command timeout",
+        0,
+        mapping.getGroups(userName).size());
+    assertTrue(
+        "Expected the logs to carry " +
+            "a message about command timeout but was: " +
+            shellMappingLog.getOutput(),
+        shellMappingLog.getOutput().contains(commandTimeoutMessage));
+    shellMappingLog.clearOutput();
+
+    // Test also the parent Groups framework for expected behaviour
+    conf.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING,
+        TestDelayedGroupCommand.class,
+        GroupMappingServiceProvider.class);
+    Groups groups = new Groups(conf);
+    try {
+      groups.getGroups(userName);
+      fail(
+          "The groups framework call should " +
+              "have failed with a command timeout");
+    } catch (IOException e) {
+      assertTrue(
+          "Expected the logs to carry " +
+              "a message about command timeout but was: " +
+              shellMappingLog.getOutput(),
+          shellMappingLog.getOutput().contains(commandTimeoutMessage));
+    }
+    shellMappingLog.clearOutput();
+
+    // Test the no-timeout (default) configuration
+    conf = new Configuration();
+    long defaultTimeout =
+        CommonConfigurationKeys.
+            HADOOP_SECURITY_GROUP_SHELL_COMMAND_TIMEOUT_SECS_DEFAULT;
+
+    mapping =
+        ReflectionUtils.newInstance(TestDelayedGroupCommand.class, conf);
+
+    executor = mapping.createGroupExecutor(userName);
+    assertEquals(
+        "Expected the group names executor to carry the default timeout",
+        defaultTimeout,
+        executor.getTimeoutInterval());
+
+    executor = mapping.createGroupIDExecutor(userName);
+    assertEquals(
+        "Expected the group ID executor to carry the default timeout",
+        defaultTimeout,
+        executor.getTimeoutInterval());
+
+    mapping.getGroups(userName);
+    assertFalse(
+        "Didn't expect a timeout of command in execution but logs carry it: " +
+            shellMappingLog.getOutput(),
+        shellMappingLog.getOutput().contains(commandTimeoutMessage));
+  }
 }
 
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[10/31] hadoop git commit: YARN-6222. TestFairScheduler.testReservationMetrics is flaky. (Yufei Gu via kasha)

Posted by st...@apache.org.
YARN-6222. TestFairScheduler.testReservationMetrics is flaky. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/694e680d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/694e680d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/694e680d

Branch: refs/heads/HADOOP-13345
Commit: 694e680d20dc07f634b539537021b09d9316601c
Parents: 159d6c5
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Thu Feb 23 15:21:52 2017 -0800
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Thu Feb 23 15:21:52 2017 -0800

----------------------------------------------------------------------
 .../yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java   | 3 +--
 .../server/resourcemanager/scheduler/fair/TestFairScheduler.java  | 1 +
 2 files changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/694e680d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
index 59bde5b..d0e0961 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
@@ -407,8 +407,7 @@ public class FSLeafQueue extends FSQueue {
     readLock.lock();
     try {
       for (FSAppAttempt app : runnableApps) {
-        Resource pending = app.getAppAttemptResourceUsage().getPending();
-        if (!Resources.isNone(pending) &&
+        if (!Resources.isNone(app.getPendingDemand()) &&
             (assignment || app.shouldCheckForStarvation())) {
           pendingForResourceApps.add(app);
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/694e680d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 62430bf..31dd7fe 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -5079,6 +5079,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     scheduler.handle(updateEvent);
 
     createSchedulingRequestExistingApplication(1024, 1, 1, appAttemptId);
+    scheduler.update();
     scheduler.handle(updateEvent);
 
     // no reservation yet


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[11/31] hadoop git commit: HDFS-11426. Refactor EC CLI to be similar to storage policies CLI.

Posted by st...@apache.org.
HDFS-11426. Refactor EC CLI to be similar to storage policies CLI.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/132f758e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/132f758e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/132f758e

Branch: refs/heads/HADOOP-13345
Commit: 132f758e3dbe3a3f11c0d9b2de8edbee594fb475
Parents: 694e680
Author: Andrew Wang <wa...@apache.org>
Authored: Thu Feb 23 16:00:11 2017 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Thu Feb 23 16:00:11 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/cli/CLITestHelper.java    |  15 +-
 .../hadoop-hdfs/src/main/bin/hdfs               |   2 +-
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   | 320 +++++++++++++++++++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../hadoop/hdfs/tools/erasurecode/ECCli.java    |  62 ----
 .../hdfs/tools/erasurecode/ECCommand.java       | 248 --------------
 .../src/site/markdown/HDFSErasureCoding.md      |  16 +-
 .../hadoop/cli/CLITestCmdErasureCoding.java     |   4 +-
 .../cli/util/ErasureCodingCliCmdExecutor.java   |   6 +-
 .../test/resources/testErasureCodingConf.xml    | 135 ++++----
 10 files changed, 405 insertions(+), 405 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
index b08af16..89d4e30 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java
@@ -18,8 +18,6 @@
 
 package org.apache.hadoop.cli;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.cli.util.*;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
 import org.apache.hadoop.conf.Configuration;
@@ -28,6 +26,9 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.xml.sax.Attributes;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.DefaultHandler;
@@ -41,9 +42,9 @@ import java.util.ArrayList;
  * Tests for the Command Line Interface (CLI)
  */
 public class CLITestHelper {
-  private static final Log LOG =
-    LogFactory.getLog(CLITestHelper.class.getName());
-  
+  private static final Logger LOG = LoggerFactory.getLogger(CLITestHelper
+      .class);
+
   // In this mode, it runs the command and compares the actual output
   // with the expected output  
   public static final String TESTMODE_TEST = "test"; // Run the tests
@@ -62,7 +63,6 @@ public class CLITestHelper {
   // Storage for tests read in from the config file
   protected ArrayList<CLITestData> testsFromConfigFile = null;
   protected ArrayList<ComparatorData> testComparators = null;
-  protected String thisTestCaseName = null;
   protected ComparatorData comparatorData = null;
   protected Configuration conf = null;
   protected String clitestDataDir = null;
@@ -80,7 +80,8 @@ public class CLITestHelper {
         p.parse(testConfigFile, getConfigParser());
         success = true;
       } catch (Exception e) {
-        LOG.info("File: " + testConfigFile + " not found");
+        LOG.info("Exception while reading test config file {}:",
+            testConfigFile, e);
         success = false;
       }
       assertTrue("Error reading test config file", success);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 617adbe..cf6d94a 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -130,7 +130,7 @@ function hdfscmd_case
       exit 0
     ;;
     ec)
-      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.erasurecode.ECCli
+      HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.ECAdmin
     ;;
     fetchdt)
       HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
new file mode 100644
index 0000000..29c65b1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
@@ -0,0 +1,320 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.tools.TableListing;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * CLI for the erasure code encoding operations.
+ */
+@InterfaceAudience.Private
+public class ECAdmin extends Configured implements Tool {
+
+  public static final String NAME = "ec";
+
+  public static void main(String[] args) throws Exception {
+    final ECAdmin admin = new ECAdmin(new Configuration());
+    int res = ToolRunner.run(admin, args);
+    System.exit(res);
+  }
+
+  public ECAdmin(Configuration conf) {
+    super(conf);
+  }
+
+  @Override
+  public int run(String[] args) throws Exception {
+    if (args.length == 0) {
+      AdminHelper.printUsage(false, NAME, COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
+      return 1;
+    }
+    final AdminHelper.Command command = AdminHelper.determineCommand(args[0],
+        COMMANDS);
+    if (command == null) {
+      System.err.println("Can't understand command '" + args[0] + "'");
+      if (!args[0].startsWith("-")) {
+        System.err.println("Command names must start with dashes.");
+      }
+      AdminHelper.printUsage(false, NAME, COMMANDS);
+      ToolRunner.printGenericCommandUsage(System.err);
+      return 1;
+    }
+    final List<String> argsList = new LinkedList<>();
+    argsList.addAll(Arrays.asList(args).subList(1, args.length));
+    try {
+      return command.run(getConf(), argsList);
+    } catch (IllegalArgumentException e) {
+      System.err.println(AdminHelper.prettifyException(e));
+      return -1;
+    }
+  }
+
+  /** Command to list the set of available erasure coding policies */
+  private static class ListECPoliciesCommand
+      implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-listPolicies";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + "]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      return getShortUsage() + "\n" +
+          "Get the list of supported erasure coding policies.\n";
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        Collection<ErasureCodingPolicy> policies =
+            dfs.getAllErasureCodingPolicies();
+        System.out.println("Erasure Coding Policies:");
+        for (ErasureCodingPolicy policy : policies) {
+          if (policy != null) {
+            System.out.println("\t" + policy.getName());
+          }
+        }
+      } catch (IOException e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to get the erasure coding policy for a file or directory */
+  private static class GetECPolicyCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-getPolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -path <path>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      final TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<path>",
+          "The path of the file/directory for getting the erasure coding " +
+              "policy");
+      return getShortUsage() + "\n" +
+          "Get the erasure coding policy of a file/directory.\n\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      if (path == null) {
+        System.err.println("Please specify the path with -path.\nUsage: " +
+            getLongUsage());
+        return 1;
+      }
+
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final Path p = new Path(path);
+      final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
+      try {
+        ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(p);
+        if (ecPolicy != null) {
+          System.out.println(ecPolicy.getName());
+        } else {
+          System.out.println("The erasure coding policy of " + path + " is " +
+              "unspecified");
+        }
+      } catch (Exception e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to set the erasure coding policy to a file/directory */
+  private static class SetECPolicyCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-setPolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -path <path> -policy <policy>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<path>", "The path of the file/directory to set " +
+          "the erasure coding policy");
+      listing.addRow("<policy>", "The name of the erasure coding policy");
+      return getShortUsage() + "\n" +
+          "Set the erasure coding policy for a file/directory.\n\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      if (path == null) {
+        System.err.println("Please specify the path for setting the EC " +
+            "policy.\nUsage: " + getLongUsage());
+        return 1;
+      }
+
+      final String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
+          args);
+      if (ecPolicyName == null) {
+        System.err.println("Please specify the policy name.\nUsage: " +
+            getLongUsage());
+        return 1;
+      }
+
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final Path p = new Path(path);
+      final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
+      try {
+        ErasureCodingPolicy ecPolicy = null;
+        ErasureCodingPolicy[] ecPolicies =
+            dfs.getClient().getErasureCodingPolicies();
+        for (ErasureCodingPolicy policy : ecPolicies) {
+          if (ecPolicyName.equals(policy.getName())) {
+            ecPolicy = policy;
+            break;
+          }
+        }
+        if (ecPolicy == null) {
+          StringBuilder sb = new StringBuilder();
+          sb.append("Policy '");
+          sb.append(ecPolicyName);
+          sb.append("' does not match any of the supported policies.");
+          sb.append(" Please select any one of ");
+          List<String> ecPolicyNames = new ArrayList<String>();
+          for (ErasureCodingPolicy policy : ecPolicies) {
+            ecPolicyNames.add(policy.getName());
+          }
+          sb.append(ecPolicyNames);
+          System.err.println(sb.toString());
+          return 3;
+        }
+        dfs.setErasureCodingPolicy(p, ecPolicy);
+        System.out.println("Set erasure coding policy " + ecPolicyName +
+            " on " + path);
+      } catch (Exception e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to unset the erasure coding policy set for a file/directory */
+  private static class UnsetECPolicyCommand
+      implements AdminHelper.Command {
+
+    @Override
+    public String getName() {
+      return "-unsetPolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -path <path>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<path>", "The path of the directory "
+          + "from which the erasure coding policy will be unset.");
+      return getShortUsage() + "\n"
+          + "Unset the erasure coding policy for a directory.\n\n"
+          + listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String path = StringUtils.popOptionWithArgument("-path", args);
+      if (path == null) {
+        System.err.println("Please specify a path.\nUsage: " + getLongUsage());
+        return 1;
+      }
+
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final Path p = new Path(path);
+      final DistributedFileSystem dfs = AdminHelper.getDFS(p.toUri(), conf);
+      try {
+        dfs.unsetErasureCodingPolicy(p);
+        System.out.println("Unset erasure coding policy from " + path);
+      } catch (Exception e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  private static final AdminHelper.Command[] COMMANDS = {
+      new ListECPoliciesCommand(),
+      new GetECPolicyCommand(),
+      new SetECPolicyCommand(),
+      new UnsetECPolicyCommand()
+  };
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index f0643b2..9c7d048 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -143,7 +143,7 @@ public class StoragePolicyAdmin extends Configured implements Tool {
     public int run(Configuration conf, List<String> args) throws IOException {
       final String path = StringUtils.popOptionWithArgument("-path", args);
       if (path == null) {
-        System.err.println("Please specify the path with -path.\nUsage:" +
+        System.err.println("Please specify the path with -path.\nUsage: " +
             getLongUsage());
         return 1;
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
deleted file mode 100644
index 89dd4ee..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCli.java
+++ /dev/null
@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdfs.tools.erasurecode;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.shell.CommandFactory;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.util.ToolRunner;
-
-import java.io.IOException;
-
-/**
- * CLI for the erasure code encoding operations.
- */
-@InterfaceAudience.Private
-public class ECCli extends FsShell {
-
-  private final static String usagePrefix =
-      "Usage: hdfs ec [generic options]";
-
-  @Override
-  protected String getUsagePrefix() {
-    return usagePrefix;
-  }
-
-  @Override
-  protected void init() throws IOException {
-    getConf().setQuietMode(true);
-    if (commandFactory == null) {
-      commandFactory = new CommandFactory(getConf());
-      commandFactory.addObject(getHelp(), "-help");
-      registerCommands(commandFactory);
-    }
-  }
-
-  @Override
-  protected void registerCommands(CommandFactory factory) {
-    factory.registerCommands(ECCommand.class);
-  }
-
-  public static void main(String[] args) throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    int res = ToolRunner.run(conf, new ECCli(), args);
-    System.exit(res);
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
deleted file mode 100644
index fc732e0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/erasurecode/ECCommand.java
+++ /dev/null
@@ -1,248 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdfs.tools.erasurecode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.shell.Command;
-import org.apache.hadoop.fs.shell.CommandFactory;
-import org.apache.hadoop.fs.shell.PathData;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Erasure Coding CLI commands
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public abstract class ECCommand extends Command {
-
-  public static void registerCommands(CommandFactory factory) {
-    // Register all commands of Erasure CLI, with a '-' at the beginning in name
-    // of the command.
-    factory.addClass(SetECPolicyCommand.class, "-" + SetECPolicyCommand.NAME);
-    factory.addClass(GetECPolicyCommand.class, "-"
-        + GetECPolicyCommand.NAME);
-    factory.addClass(UnsetECPolicyCommand.class, "-"
-        + UnsetECPolicyCommand.NAME);
-    factory.addClass(ListPolicies.class, "-" + ListPolicies.NAME);
-  }
-
-  @Override
-  public String getCommandName() {
-    return getName();
-  }
-
-  @Override
-  protected void run(Path path) throws IOException {
-    throw new RuntimeException("Not suppose to get here");
-  }
-
-  @Deprecated
-  @Override
-  public int runAll() {
-    return run(args);
-  }
-
-  @Override
-  protected void processPath(PathData item) throws IOException {
-    if (!(item.fs instanceof DistributedFileSystem)) {
-      throw new UnsupportedActionException(
-          "Erasure commands are only supported for the HDFS paths");
-    }
-  }
-
-  /**
-   * A command to set the erasure coding policy for a directory, with the name
-   * of the policy.
-   */
-  static class SetECPolicyCommand extends ECCommand {
-    public static final String NAME = "setPolicy";
-    public static final String USAGE = "[-p <policyName>] <path>";
-    public static final String DESCRIPTION = 
-        "Set a specified erasure coding policy to a directory\n"
-        + "Options :\n"
-        + "  -p <policyName> : erasure coding policy name to encode files. "
-        + "If not passed the default policy will be used\n"
-        + "  <path>  : Path to a directory. Under this directory "
-        + "files will be encoded using specified erasure coding policy";
-    private String ecPolicyName;
-    private ErasureCodingPolicy ecPolicy = null;
-
-    @Override
-    protected void processOptions(LinkedList<String> args) throws IOException {
-      ecPolicyName = StringUtils.popOptionWithArgument("-p", args);
-      if (args.isEmpty()) {
-        throw new HadoopIllegalArgumentException("<path> is missing");
-      }
-      if (args.size() > 1) {
-        throw new HadoopIllegalArgumentException("Too many arguments");
-      }
-    }
-
-    @Override
-    protected void processPath(PathData item) throws IOException {
-      super.processPath(item);
-      DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
-      try {
-        if (ecPolicyName != null) {
-          ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
-          for (ErasureCodingPolicy ecPolicy : ecPolicies) {
-            if (ecPolicyName.equals(ecPolicy.getName())) {
-              this.ecPolicy = ecPolicy;
-              break;
-            }
-          }
-          if (ecPolicy == null) {
-            StringBuilder sb = new StringBuilder();
-            sb.append("Policy '");
-            sb.append(ecPolicyName);
-            sb.append("' does not match any of the supported policies.");
-            sb.append(" Please select any one of ");
-            List<String> ecPolicyNames = new ArrayList<String>();
-            for (ErasureCodingPolicy ecPolicy : ecPolicies) {
-              ecPolicyNames.add(ecPolicy.getName());
-            }
-            sb.append(ecPolicyNames);
-            throw new HadoopIllegalArgumentException(sb.toString());
-          }
-        }
-        dfs.setErasureCodingPolicy(item.path, ecPolicy);
-        out.println("EC policy set successfully at " + item.path);
-      } catch (IOException e) {
-        throw new IOException("Unable to set EC policy for the path "
-            + item.path + ". " + e.getMessage());
-      }
-    }
-  }
-
-  /**
-   * Get the erasure coding policy of a file or directory
-   */
-  static class GetECPolicyCommand extends ECCommand {
-    public static final String NAME = "getPolicy";
-    public static final String USAGE = "<path>";
-    public static final String DESCRIPTION =
-        "Get erasure coding policy information about at specified path\n";
-
-    @Override
-    protected void processOptions(LinkedList<String> args) throws IOException {
-      if (args.isEmpty()) {
-        throw new HadoopIllegalArgumentException("<path> is missing");
-      }
-      if (args.size() > 1) {
-        throw new HadoopIllegalArgumentException("Too many arguments");
-      }
-    }
-
-    @Override
-    protected void processPath(PathData item) throws IOException {
-      super.processPath(item);
-      DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
-      try {
-        ErasureCodingPolicy ecPolicy = dfs.getErasureCodingPolicy(item.path);
-        if (ecPolicy != null) {
-          out.println(ecPolicy.toString());
-        } else {
-          out.println("Path " + item.path + " is not erasure coded.");
-        }
-      } catch (IOException e) {
-        throw new IOException("Unable to get EC policy for the path "
-            + item.path + ". " + e.getMessage());
-      }
-    }
-  }
-
-  /**
-   * List all supported erasure coding policies
-   */
-  static class ListPolicies extends ECCommand {
-    public static final String NAME = "listPolicies";
-    public static final String USAGE = "";
-    public static final String DESCRIPTION = 
-        "Get the list of erasure coding policies supported\n";
-
-    @Override
-    protected void processOptions(LinkedList<String> args) throws IOException {
-      if (!args.isEmpty()) {
-        throw new HadoopIllegalArgumentException("Too many parameters");
-      }
-
-      FileSystem fs = FileSystem.get(getConf());
-      if (fs instanceof DistributedFileSystem == false) {
-        throw new UnsupportedActionException(
-            "Erasure commands are only supported for the HDFS");
-      }
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-
-      ErasureCodingPolicy[] ecPolicies = dfs.getClient().getErasureCodingPolicies();
-      StringBuilder sb = new StringBuilder();
-      int i = 0;
-      while (i < ecPolicies.length) {
-        ErasureCodingPolicy ecPolicy = ecPolicies[i];
-        sb.append(ecPolicy.getName());
-        i++;
-        if (i < ecPolicies.length) {
-          sb.append(", ");
-        }
-      }
-      out.println(sb.toString());
-    }
-  }
-
-  /**
-   * Unset the erasure coding policy from a directory.
-   */
-  static class UnsetECPolicyCommand extends ECCommand {
-    public static final String NAME = "unsetPolicy";
-    public static final String USAGE = "<path>";
-    public static final String DESCRIPTION =
-        "Unset erasure coding policy from a directory\n";
-
-    @Override
-    protected void processOptions(LinkedList<String> args) throws IOException {
-      if (args.isEmpty()) {
-        throw new HadoopIllegalArgumentException("<path> is missing");
-      }
-      if (args.size() > 1) {
-        throw new HadoopIllegalArgumentException("Too many arguments");
-      }
-    }
-
-    @Override
-    protected void processPath(PathData item) throws IOException {
-      super.processPath(item);
-      DistributedFileSystem dfs = (DistributedFileSystem) item.fs;
-      try {
-        dfs.unsetErasureCodingPolicy(item.path);
-      } catch (IOException e) {
-        throw new IOException("Unable to unset EC policy from directory "
-            + item.path + ". " + e.getMessage());
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 6e4891f..0283e2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -119,30 +119,30 @@ Deployment
   HDFS provides an `ec` subcommand to perform administrative commands related to erasure coding.
 
        hdfs ec [generic options]
-         [-setPolicy [-p <policyName>] <path>]
-         [-getPolicy <path>]
-         [-unsetPolicy <path>]
+         [-setPolicy -policy <policyName> -path <path>]
+         [-getPolicy -path <path>]
+         [-unsetPolicy -path <path>]
          [-listPolicies]
          [-usage [cmd ...]]
          [-help [cmd ...]]
 
 Below are the details about each command.
 
- *  `[-setPolicy [-p <policyName>] <path>]`
+ *  `[-setPolicy -policy <policyName> -path <path>]`
 
     Sets an ErasureCoding policy on a directory at the specified path.
 
       `path`: An directory in HDFS. This is a mandatory parameter. Setting a policy only affects newly created files, and does not affect existing files.
 
-      `policyName`: The ErasureCoding policy to be used for files under this directory. This is an optional parameter, specified using \u2018-p\u2019 flag. If no policy is specified, the system default ErasureCodingPolicy will be used.
+      `policyName`: The ErasureCoding policy to be used for files under this directory.
 
- *  `[-getPolicy <path>]`
+ *  `[-getPolicy -path <path>]`
 
      Get details of the ErasureCoding policy of a file or directory at the specified path.
 
- *  `[-unsetPolicy <path>]`
+ *  `[-unsetPolicy -path <path>]`
 
-     Unset an ErasureCoding policy from a directory at the specified path when previously user sets the ErasureCoding policy on this directory via "setPolicy" command. If the directory inherits the ErasureCoding policy from its parent group, "unsetPolicy" command on this directory will not have any effect. Unset ErasureCoding policy on a directory which doesn't have ErasureCoding policy will not return an error.
+     Unset an ErasureCoding policy set by a previous call to "setPolicy" on a directory. If the directory inherits the ErasureCoding policy from an ancestor directory, "unsetPolicy" is a no-op. Unsetting the policy on a directory which doesn't have an explicit policy set will not return an error.
 
  *  `[-listPolicies]`
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdErasureCoding.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdErasureCoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdErasureCoding.java
index 0499a2b..28e5f98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdErasureCoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdErasureCoding.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.ErasureCodingCliCmdExecutor;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.tools.erasurecode.ECCli;
+import org.apache.hadoop.hdfs.tools.ECAdmin;
 
 public class CLITestCmdErasureCoding extends CLITestCmd {
   public CLITestCmdErasureCoding(String str, CLICommandTypes type) {
@@ -33,7 +33,7 @@ public class CLITestCmdErasureCoding extends CLITestCmd {
   @Override
   public CommandExecutor getExecutor(String tag, Configuration conf) throws IllegalArgumentException {
     if (getType() instanceof CLICommandErasureCodingCli)
-      return new ErasureCodingCliCmdExecutor(tag, new ECCli());
+      return new ErasureCodingCliCmdExecutor(tag, new ECAdmin(conf));
     return super.getExecutor(tag, conf);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/ErasureCodingCliCmdExecutor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/ErasureCodingCliCmdExecutor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/ErasureCodingCliCmdExecutor.java
index e993313..59b2a73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/ErasureCodingCliCmdExecutor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/util/ErasureCodingCliCmdExecutor.java
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.cli.util;
 
-import org.apache.hadoop.hdfs.tools.erasurecode.ECCli;
+import org.apache.hadoop.hdfs.tools.ECAdmin;
 import org.apache.hadoop.util.ToolRunner;
 
 public class ErasureCodingCliCmdExecutor extends CommandExecutor {
   protected String namenode = null;
-  protected ECCli admin = null;
+  protected ECAdmin admin = null;
 
-  public ErasureCodingCliCmdExecutor(String namenode, ECCli admin) {
+  public ErasureCodingCliCmdExecutor(String namenode, ECAdmin admin) {
     this.namenode = namenode;
     this.admin = admin;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/132f758e/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 0753d4d..82b71def 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -42,7 +42,11 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>-usage: Unknown command</expected-output>
+          <expected-output>Can't understand command '-usage'</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Usage: bin/hdfs ec [COMMAND]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -58,7 +62,11 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Usage: hdfs ec [generic options]</expected-output>
+          <expected-output>[-listPolicies]</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>[-unsetPolicy -path</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -72,12 +80,12 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^[ \t]*Set a specified erasure coding policy to a directory( )*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Set the erasure coding policy for a file/directory.</expected-output>
         </comparator>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-setPolicy \[-p &lt;policyName&gt;\] &lt;path&gt;(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>[-setPolicy -path &lt;path&gt; -policy &lt;policy&gt;]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -92,11 +100,11 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Get erasure coding policy information about at specified path</expected-output>
+          <expected-output>Get the erasure coding policy of a file/directory</expected-output>
         </comparator>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-getPolicy &lt;path&gt;(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>[-getPolicy -path &lt;path&gt;]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -111,11 +119,11 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Get the list of erasure coding policies supported</expected-output>
+          <expected-output>Get the list of supported erasure coding policies</expected-output>
         </comparator>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-listPolicies (.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>[-listPolicies]</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -125,7 +133,7 @@
       <description>setPolicy : set erasure coding policy on a directory to encode files</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p RS-DEFAULT-6-3-64k /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -133,7 +141,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>EC policy set successfully at NAMENODE/ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-DEFAULT-6-3-64k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -142,8 +150,8 @@
       <description>setPolicy : set a policy twice</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -151,25 +159,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>EC policy set successfully at NAMENODE/ecdir</expected-output>
-        </comparator>
-      </comparators>
-    </test>
-
-    <test>
-      <description>setPolicy : default policy</description>
-      <test-commands>
-        <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
-      </test-commands>
-      <cleanup-commands>
-        <command>-fs NAMENODE -rmdir /ecdir</command>
-      </cleanup-commands>
-      <comparators>
-        <comparator>
-          <type>SubstringComparator</type>
-          <expected-output>ErasureCodingPolicy=[Name=</expected-output>
+          <expected-output>Set erasure coding policy RS-DEFAULT-6-3-64k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -178,9 +168,9 @@
       <description>unsetPolicy : unset policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -unsetPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -188,7 +178,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>is not erasure coded.</expected-output>
+          <expected-output>is unspecified</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -197,10 +187,9 @@
       <description>setPolicy : change different policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p RS-DEFAULT-3-2-64k
-          /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-3-2-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -208,7 +197,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-3-2-64k</expected-output>
+          <expected-output>RS-DEFAULT-3-2-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -218,10 +207,10 @@
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <command>-fs NAMENODE -mkdir /ecdir/child</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -unsetPolicy /ecdir/child</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir/child</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/child/ecfile</command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir/child/ecfile</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/child/ecfile</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm /ecdir/child/ecfile</command>
@@ -231,7 +220,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -240,7 +229,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /noec</command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /noec</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /noec</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /noec</command>
@@ -248,7 +237,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Path NAMENODE/noec is not erasure coded</expected-output>
+          <expected-output>is unspecified</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -257,8 +246,8 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p RS-DEFAULT-6-3-64k /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -266,7 +255,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -275,9 +264,9 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p RS-DEFAULT-6-3-64k /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/ecfile</command>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir/ecfile</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/ecfile</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm /ecdir/ecfile</command>
@@ -286,7 +275,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>ErasureCodingPolicy=[Name=RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -318,8 +307,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-setPolicy: &lt;path&gt; is missing(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Please specify the path for setting the EC policy.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -328,15 +317,15 @@
       <description>setPolicy : illegal parameters - policy name is missing</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-setPolicy: option -p requires 1 argument(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>option -path requires 1 argument</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -345,7 +334,7 @@
       <description>setPolicy : illegal parameters - too many arguments</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir1 /ecdir2</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-DEFAULT-3-2-64k /ecdir2</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -362,7 +351,7 @@
       <description>setPolicy : illegal parameters - invalidpolicy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -p invalidpolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy invalidpolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -378,14 +367,14 @@
     <test>
       <description>setPolicy : illegal parameters - no such file</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -setPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-DEFAULT-3-2-64k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^setPolicy: `/ecdir': No such file or directory(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Path not found: /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -399,8 +388,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^-getPolicy: &lt;path&gt; is missing(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Please specify the path with -path</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -408,7 +397,7 @@
     <test>
       <description>getPolicy : illegal parameters - too many arguments</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm /ecdir</command>
@@ -424,14 +413,14 @@
     <test>
       <description>getPolicy : illegal parameters - no such file</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -getPolicy /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^getPolicy: `/ecdir': No such file or directory(.)*</expected-output>
+          <type>SubstringComparator</type>
+          <expected-output>Path not found: /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -446,7 +435,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>-listPolicies: Too many parameters</expected-output>
+          <expected-output>-listPolicies: Too many arguments</expected-output>
         </comparator>
       </comparators>
     </test>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[12/31] hadoop git commit: HADOOP-14114 S3A can no longer handle unencoded + in URIs. Contributed by Sean Mackrory.

Posted by st...@apache.org.
HADOOP-14114 S3A can no longer handle unencoded + in URIs. Contributed by Sean Mackrory.

(cherry picked from commit ff87ca84418a710c6dc884fe8c70947fcc6489d5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c22a916
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c22a916
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c22a916

Branch: refs/heads/HADOOP-13345
Commit: 9c22a91662af24569191ce45289ef8266e8755cc
Parents: 132f758
Author: Steve Loughran <st...@apache.org>
Authored: Fri Feb 24 10:41:36 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Feb 24 10:41:36 2017 +0000

----------------------------------------------------------------------
 .../hadoop/fs/s3native/S3xLoginHelper.java      | 15 ++++++++++-
 .../hadoop/fs/s3native/TestS3xLoginHelper.java  | 28 ++++++++++++++++++++
 2 files changed, 42 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c22a916/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
index 97ece37..862ce6b 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/S3xLoginHelper.java
@@ -48,6 +48,13 @@ public final class S3xLoginHelper {
       "The Filesystem URI contains login details."
       +" This is insecure and may be unsupported in future.";
 
+  public static final String PLUS_WARNING =
+      "Secret key contains a special character that should be URL encoded! " +
+          "Attempting to resolve...";
+
+  public static final String PLUS_UNENCODED = "+";
+  public static final String PLUS_ENCODED = "%2B";
+
   /**
    * Build the filesystem URI. This can include stripping down of part
    * of the URI.
@@ -112,7 +119,13 @@ public final class S3xLoginHelper {
       int loginSplit = login.indexOf(':');
       if (loginSplit > 0) {
         String user = login.substring(0, loginSplit);
-        String password = URLDecoder.decode(login.substring(loginSplit + 1),
+        String encodedPassword = login.substring(loginSplit + 1);
+        if (encodedPassword.contains(PLUS_UNENCODED)) {
+          LOG.warn(PLUS_WARNING);
+          encodedPassword = encodedPassword.replaceAll("\\" + PLUS_UNENCODED,
+              PLUS_ENCODED);
+        }
+        String password = URLDecoder.decode(encodedPassword,
             "UTF-8");
         return new Login(user, password);
       } else if (loginSplit == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c22a916/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
index bd2ac1e..3761cb7 100644
--- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
+++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3native/TestS3xLoginHelper.java
@@ -32,9 +32,13 @@ public class TestS3xLoginHelper extends Assert {
   public static final String BUCKET = "s3a://bucket";
   private static final URI ENDPOINT = uri(BUCKET);
   public static final String S = "%2f";
+  public static final String P = "%2b";
+  public static final String P_RAW = "+";
   public static final String USER = "user";
   public static final String PASS = "pass";
   public static final String PASLASHSLASH = "pa" + S + S;
+  public static final String PAPLUS = "pa" + P;
+  public static final String PAPLUS_RAW = "pa" + P_RAW;
 
   public static final URI WITH_USER_AND_PASS = uri("s3a://user:pass@bucket");
   public static final Path PATH_WITH_LOGIN =
@@ -42,6 +46,10 @@ public class TestS3xLoginHelper extends Assert {
 
   public static final URI WITH_SLASH_IN_PASS = uri(
       "s3a://user:" + PASLASHSLASH + "@bucket");
+  public static final URI WITH_PLUS_IN_PASS = uri(
+      "s3a://user:" + PAPLUS + "@bucket");
+  public static final URI WITH_PLUS_RAW_IN_PASS = uri(
+      "s3a://user:" + PAPLUS_RAW + "@bucket");
   public static final URI USER_NO_PASS = uri("s3a://user@bucket");
   public static final URI WITH_USER_AND_COLON = uri("s3a://user:@bucket");
   public static final URI NO_USER = uri("s3a://:pass@bucket");
@@ -117,6 +125,16 @@ public class TestS3xLoginHelper extends Assert {
   }
 
   @Test
+  public void testLoginWithPlusInPass() throws Throwable {
+    assertMatchesLogin(USER, "pa+", WITH_PLUS_IN_PASS);
+  }
+
+  @Test
+  public void testLoginWithPlusRawInPass() throws Throwable {
+    assertMatchesLogin(USER, "pa+", WITH_PLUS_RAW_IN_PASS);
+  }
+
+  @Test
   public void testLoginWithUser() throws Throwable {
     assertMatchesLogin(USER, "", USER_NO_PASS);
   }
@@ -152,6 +170,16 @@ public class TestS3xLoginHelper extends Assert {
   }
 
   @Test
+  public void testFsUriWithPlusInPass() throws Throwable {
+    assertMatchesEndpoint(WITH_PLUS_IN_PASS);
+  }
+
+  @Test
+  public void testFsUriWithPlusRawInPass() throws Throwable {
+    assertMatchesEndpoint(WITH_PLUS_RAW_IN_PASS);
+  }
+
+  @Test
   public void testFsUriWithUser() throws Throwable {
     assertMatchesEndpoint(USER_NO_PASS);
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[26/31] hadoop git commit: YARN-6215. FairScheduler preemption and update should not run concurrently. (Tao Jie via kasha)

Posted by st...@apache.org.
YARN-6215. FairScheduler preemption and update should not run concurrently. (Tao Jie via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/815d5350
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/815d5350
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/815d5350

Branch: refs/heads/HADOOP-13345
Commit: 815d53506fb0c5ca029c993d6b094db2ac0ca6eb
Parents: 05391c1
Author: Karthik Kambatla <ka...@apache.org>
Authored: Sun Feb 26 20:16:36 2017 -0800
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Sun Feb 26 20:16:36 2017 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FSPreemptionThread.java              | 12 +++++++++++-
 .../resourcemanager/scheduler/fair/FairScheduler.java   |  5 +++++
 2 files changed, 16 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/815d5350/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
index af73c10..65df0c2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java
@@ -32,6 +32,7 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Timer;
 import java.util.TimerTask;
+import java.util.concurrent.locks.Lock;
 
 /**
  * Thread that handles FairScheduler preemption.
@@ -43,6 +44,7 @@ class FSPreemptionThread extends Thread {
   private final long warnTimeBeforeKill;
   private final long delayBeforeNextStarvationCheck;
   private final Timer preemptionTimer;
+  private final Lock schedulerReadLock;
 
   FSPreemptionThread(FairScheduler scheduler) {
     setDaemon(true);
@@ -61,6 +63,7 @@ class FSPreemptionThread extends Thread {
         : 4 * scheduler.getNMHeartbeatInterval()); // 4 heartbeats
     delayBeforeNextStarvationCheck = warnTimeBeforeKill + allocDelay +
         fsConf.getWaitTimeBeforeNextStarvationCheck();
+    schedulerReadLock = scheduler.getSchedulerReadLock();
   }
 
   public void run() {
@@ -68,7 +71,14 @@ class FSPreemptionThread extends Thread {
       FSAppAttempt starvedApp;
       try{
         starvedApp = context.getStarvedApps().take();
-        preemptContainers(identifyContainersToPreempt(starvedApp));
+        // Hold the scheduler readlock so this is not concurrent with the
+        // update thread.
+        schedulerReadLock.lock();
+        try {
+          preemptContainers(identifyContainersToPreempt(starvedApp));
+        } finally {
+          schedulerReadLock.unlock();
+        }
         starvedApp.preemptionTriggered(delayBeforeNextStarvationCheck);
       } catch (InterruptedException e) {
         LOG.info("Preemption thread interrupted! Exiting.");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/815d5350/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index c946bfb..3246778 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -104,6 +104,7 @@ import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
 
 /**
  * A scheduler that schedules resources between a set of queues. The scheduler
@@ -1782,4 +1783,8 @@ public class FairScheduler extends
   long getNMHeartbeatInterval() {
     return nmHeartbeatInterval;
   }
+
+  ReadLock getSchedulerReadLock() {
+    return this.readLock;
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[03/31] hadoop git commit: YARN-6210. FairScheduler: Node reservations can interfere with preemption. (kasha)

Posted by st...@apache.org.
YARN-6210. FairScheduler: Node reservations can interfere with preemption. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/718ad9f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/718ad9f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/718ad9f6

Branch: refs/heads/HADOOP-13345
Commit: 718ad9f6ee93d4145f2bb19b7582ce4e1174feaf
Parents: 732ee6f
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Wed Feb 22 15:45:45 2017 -0800
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Wed Feb 22 15:46:07 2017 -0800

----------------------------------------------------------------------
 .../resource/DefaultResourceCalculator.java     |   3 +-
 .../resource/DominantResourceCalculator.java    |  13 +-
 .../yarn/util/resource/ResourceCalculator.java  |  32 ++++-
 .../scheduler/fair/FSAppAttempt.java            |  61 ++++++---
 .../DominantResourceFairnessPolicy.java         |   8 +-
 .../fair/policies/FairSharePolicy.java          |   3 +-
 .../scheduler/fair/TestFairScheduler.java       | 127 ++++++++-----------
 .../fair/TestFairSchedulerPreemption.java       |  44 +++++--
 8 files changed, 180 insertions(+), 111 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index 42c45ad..ef7229c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -30,7 +30,8 @@ public class DefaultResourceCalculator extends ResourceCalculator {
       LogFactory.getLog(DefaultResourceCalculator.class);
 
   @Override
-  public int compare(Resource unused, Resource lhs, Resource rhs) {
+  public int compare(Resource unused, Resource lhs, Resource rhs,
+      boolean singleType) {
     // Only consider memory
     return Long.compare(lhs.getMemorySize(), rhs.getMemorySize());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 9f1c8d7..032aa02 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -51,17 +51,18 @@ public class DominantResourceCalculator extends ResourceCalculator {
       LogFactory.getLog(DominantResourceCalculator.class);
 
   @Override
-  public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
+  public int compare(Resource clusterResource, Resource lhs, Resource rhs,
+      boolean singleType) {
     
     if (lhs.equals(rhs)) {
       return 0;
     }
     
     if (isInvalidDivisor(clusterResource)) {
-      if ((lhs.getMemorySize() < rhs.getMemorySize() && lhs.getVirtualCores() > rhs
-          .getVirtualCores())
-          || (lhs.getMemorySize() > rhs.getMemorySize() && lhs.getVirtualCores() < rhs
-              .getVirtualCores())) {
+      if ((lhs.getMemorySize() < rhs.getMemorySize() &&
+          lhs.getVirtualCores() > rhs.getVirtualCores()) ||
+          (lhs.getMemorySize() > rhs.getMemorySize() &&
+          lhs.getVirtualCores() < rhs.getVirtualCores())) {
         return 0;
       } else if (lhs.getMemorySize() > rhs.getMemorySize()
           || lhs.getVirtualCores() > rhs.getVirtualCores()) {
@@ -79,7 +80,7 @@ public class DominantResourceCalculator extends ResourceCalculator {
       return -1;
     } else if (l > r) {
       return 1;
-    } else {
+    } else if (!singleType) {
       l = getResourceAsValue(clusterResource, lhs, false);
       r = getResourceAsValue(clusterResource, rhs, false);
       if (l < r) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 50ce04c..a2f85b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -28,8 +28,36 @@ import org.apache.hadoop.yarn.api.records.Resource;
 @Unstable
 public abstract class ResourceCalculator {
 
-  public abstract int 
-  compare(Resource clusterResource, Resource lhs, Resource rhs);
+  /**
+   * On a cluster with capacity {@code clusterResource}, compare {@code lhs}
+   * and {@code rhs}. Consider all resources unless {@code singleType} is set
+   * to true. When {@code singleType} is set to true, consider only one
+   * resource as per the {@link ResourceCalculator} implementation; the
+   * {@link DefaultResourceCalculator} considers memory and
+   * {@link DominantResourceCalculator} considers the dominant resource.
+   *
+   * @param clusterResource cluster capacity
+   * @param lhs First {@link Resource} to compare
+   * @param rhs Second {@link Resource} to compare
+   * @param singleType Whether to consider a single resource type or all
+   *                   resource types
+   * @return -1 if {@code lhs} is smaller, 0 if equal and 1 if it is larger
+   */
+  public abstract int compare(
+      Resource clusterResource, Resource lhs, Resource rhs, boolean singleType);
+
+  /**
+   * On a cluster with capacity {@code clusterResource}, compare {@code lhs}
+   * and {@code rhs} considering all resources.
+   *
+   * @param clusterResource cluster capacity
+   * @param lhs First {@link Resource} to compare
+   * @param rhs Second {@link Resource} to compare
+   * @return -1 if {@code lhs} is smaller, 0 if equal and 1 if it is larger
+   */
+  public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
+    return compare(clusterResource, lhs, rhs, false);
+  }
 
   public static int divideAndCeil(int a, int b) {
     if (b == 0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 6ed0660..6c61b45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -605,8 +604,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     Resource usageAfterPreemption = Resources.subtract(
         getResourceUsage(), container.getAllocatedResource());
 
-    return !Resources.lessThan(fsQueue.getPolicy().getResourceCalculator(),
-        scheduler.getClusterResource(), usageAfterPreemption, getFairShare());
+    return !isUsageBelowShare(usageAfterPreemption, getFairShare());
   }
 
   /**
@@ -833,9 +831,9 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
     }
 
     // The desired container won't fit here, so reserve
-    if (isReservable(capability) && reserve(
-        pendingAsk.getPerAllocationResource(), node, reservedContainer, type,
-        schedulerKey)) {
+    if (isReservable(capability) &&
+        reserve(pendingAsk.getPerAllocationResource(), node, reservedContainer,
+            type, schedulerKey)) {
       if (isWaitingForAMContainer()) {
         updateAMDiagnosticMsg(capability,
             " exceed the available resources of the node and the request is"
@@ -857,8 +855,11 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   }
 
   private boolean isReservable(Resource capacity) {
-    return scheduler.isAtLeastReservationThreshold(
-        getQueue().getPolicy().getResourceCalculator(), capacity);
+    // Reserve only when the app is starved and the requested container size
+    // is larger than the configured threshold
+    return isStarved() &&
+        scheduler.isAtLeastReservationThreshold(
+            getQueue().getPolicy().getResourceCalculator(), capacity);
   }
 
   /**
@@ -1089,34 +1090,51 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
    * @return freshly computed fairshare starvation
    */
   Resource fairShareStarvation() {
+    long now = scheduler.getClock().getTime();
     Resource threshold = Resources.multiply(
         getFairShare(), fsQueue.getFairSharePreemptionThreshold());
-    Resource starvation = Resources.componentwiseMin(threshold, demand);
-    Resources.subtractFromNonNegative(starvation, getResourceUsage());
+    Resource fairDemand = Resources.componentwiseMin(threshold, demand);
 
-    long now = scheduler.getClock().getTime();
-    boolean starved = !Resources.isNone(starvation);
+    // Check if the queue is starved for fairshare
+    boolean starved = isUsageBelowShare(getResourceUsage(), fairDemand);
 
     if (!starved) {
       lastTimeAtFairShare = now;
     }
 
-    if (starved &&
-        (now - lastTimeAtFairShare > fsQueue.getFairSharePreemptionTimeout())) {
-      this.fairshareStarvation = starvation;
+    if (!starved ||
+        now - lastTimeAtFairShare < fsQueue.getFairSharePreemptionTimeout()) {
+      fairshareStarvation = Resources.none();
     } else {
-      this.fairshareStarvation = Resources.none();
+      // The app has been starved for longer than preemption-timeout.
+      fairshareStarvation =
+          Resources.subtractFromNonNegative(fairDemand, getResourceUsage());
     }
-    return this.fairshareStarvation;
+    return fairshareStarvation;
+  }
+
+  /**
+   * Helper method that checks if {@code usage} is strictly less than
+   * {@code share}.
+   */
+  private boolean isUsageBelowShare(Resource usage, Resource share) {
+    return fsQueue.getPolicy().getResourceCalculator().compare(
+        scheduler.getClusterResource(), usage, share, true) < 0;
   }
 
   /**
    * Helper method that captures if this app is identified to be starved.
    * @return true if the app is starved for fairshare, false otherwise
    */
-  @VisibleForTesting
   boolean isStarvedForFairShare() {
-    return !Resources.isNone(fairshareStarvation);
+    return isUsageBelowShare(getResourceUsage(), getFairShare());
+  }
+
+  /**
+   * Is application starved for fairshare or minshare
+   */
+  private boolean isStarved() {
+    return isStarvedForFairShare() || !Resources.isNone(minshareStarvation);
   }
 
   /**
@@ -1333,6 +1351,11 @@ public class FSAppAttempt extends SchedulerApplicationAttempt
   }
 
   @Override
+  public String toString() {
+    return getApplicationAttemptId() + " Alloc: " + getCurrentConsumption();
+  }
+
+  @Override
   public boolean isPreemptable() {
     return getQueue().isPreemptable();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 6f04cb7..369b8a1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -155,8 +155,12 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
             resourceOrder1, resourceOrder2);
       }
       if (res == 0) {
-        // Apps are tied in fairness ratio. Break the tie by submit time.
-        res = (int)(s1.getStartTime() - s2.getStartTime());
+        // Apps are tied in fairness ratio. Break the tie by submit time and job
+        // name to get a deterministic ordering, which is useful for unit tests.
+        res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
+        if (res == 0) {
+          res = s1.getName().compareTo(s2.getName());
+        }
       }
       return res;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index 9036a03..f8cdb45 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -131,8 +131,9 @@ public class FairSharePolicy extends SchedulingPolicy {
         // Apps are tied in fairness ratio. Break the tie by submit time and job
         // name to get a deterministic ordering, which is useful for unit tests.
         res = (int) Math.signum(s1.getStartTime() - s2.getStartTime());
-        if (res == 0)
+        if (res == 0) {
           res = s1.getName().compareTo(s2.getName());
+        }
       }
       return res;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 0c3a614..4def53f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -119,6 +119,7 @@ import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.Mock;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
@@ -2627,71 +2628,57 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     assertEquals(1, scheduler.getSchedulerApp(attId4).getLiveContainers().size());
   }
 
+  /**
+   * Reserve at a lower priority and verify the lower priority request gets
+   * allocated
+   */
   @Test (timeout = 5000)
-  public void testReservationWhileMultiplePriorities() throws IOException {
+  public void testReservationWithMultiplePriorities() throws IOException {
     scheduler.init(conf);
     scheduler.start();
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
     // Add a node
-    RMNode node1 =
-        MockNodes
-            .newNodeInfo(1, Resources.createResource(1024, 4), 1, "127.0.0.1");
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 2));
     NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
     scheduler.handle(nodeEvent1);
-
-    ApplicationAttemptId attId = createSchedulingRequest(1024, 4, "queue1",
-        "user1", 1, 2);
-    scheduler.update();
     NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1);
-    scheduler.handle(updateEvent);
-    
-    FSAppAttempt app = scheduler.getSchedulerApp(attId);
-    assertEquals(1, app.getLiveContainers().size());
-    
-    ContainerId containerId = scheduler.getSchedulerApp(attId)
-        .getLiveContainers().iterator().next().getContainerId();
 
-    // Cause reservation to be created
-    createSchedulingRequestExistingApplication(1024, 4, 2, attId);
+    // Create first app and take up half resources so the second app that asks
+    // for the entire node won't have enough.
+    FSAppAttempt app1 = scheduler.getSchedulerApp(
+        createSchedulingRequest(1024, 1, "queue", "user", 1));
     scheduler.update();
     scheduler.handle(updateEvent);
+    assertEquals("Basic allocation failed", 1, app1.getLiveContainers().size());
 
-    assertEquals(1, app.getLiveContainers().size());
-    assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB());
-    assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
-    
-    // Create request at higher priority
-    createSchedulingRequestExistingApplication(1024, 4, 1, attId);
+    // Create another app and reserve at a lower priority first
+    ApplicationAttemptId attId =
+        createSchedulingRequest(2048, 2, "queue1", "user1", 1, 2);
+    FSAppAttempt app2 = scheduler.getSchedulerApp(attId);
     scheduler.update();
     scheduler.handle(updateEvent);
-    
-    assertEquals(1, app.getLiveContainers().size());
-    // Reserved container should still be at lower priority
-    for (RMContainer container : app.getReservedContainers()) {
-      assertEquals(2,
-          container.getReservedSchedulerKey().getPriority().getPriority());
-    }
-    
-    // Complete container
-    scheduler.allocate(attId, new ArrayList<ResourceRequest>(),
+    assertEquals("Reservation at lower priority failed",
+        1, app2.getReservedContainers().size());
+
+    // Request container on the second app at a higher priority
+    createSchedulingRequestExistingApplication(2048, 2, 1, attId);
+
+    // Complete the first container so we can trigger allocation for app2
+    ContainerId containerId =
+        app1.getLiveContainers().iterator().next().getContainerId();
+    scheduler.allocate(app1.getApplicationAttemptId(), new ArrayList<>(),
         Arrays.asList(containerId), null, null, NULL_UPDATE_REQUESTS);
-    assertEquals(1024, scheduler.getRootQueueMetrics().getAvailableMB());
-    assertEquals(4, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
-    
-    // Schedule at opening
-    scheduler.update();
+
+    // Trigger allocation for app2
     scheduler.handle(updateEvent);
-    
+
     // Reserved container (at lower priority) should be run
-    Collection<RMContainer> liveContainers = app.getLiveContainers();
-    assertEquals(1, liveContainers.size());
-    for (RMContainer liveContainer : liveContainers) {
-      Assert.assertEquals(2, liveContainer.getContainer().getPriority()
-          .getPriority());
-    }
-    assertEquals(0, scheduler.getRootQueueMetrics().getAvailableMB());
-    assertEquals(0, scheduler.getRootQueueMetrics().getAvailableVirtualCores());
+    Collection<RMContainer> liveContainers = app2.getLiveContainers();
+    assertEquals("Allocation post completion failed", 1, liveContainers.size());
+    assertEquals("High prio container allocated against low prio reservation",
+        2, liveContainers.iterator().next().getContainer().
+            getPriority().getPriority());
   }
   
   @Test
@@ -3222,8 +3209,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
   }
 
   /**
-   * If we update our ask to strictly request a node, it doesn't make sense to keep
-   * a reservation on another.
+   * Strict locality requests shouldn't reserve resources on another node.
    */
   @Test
   public void testReservationsStrictLocality() throws IOException {
@@ -3231,40 +3217,39 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     scheduler.start();
     scheduler.reinitialize(conf, resourceManager.getRMContext());
 
-    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1");
-    RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024), 2, "127.0.0.2");
+    // Add two nodes
+    RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024, 1));
     NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
     scheduler.handle(nodeEvent1);
+    RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024, 1));
+    NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
+    scheduler.handle(nodeEvent2);
 
-    ApplicationAttemptId attId = createSchedulingRequest(1024, "queue1",
-        "user1", 0);
+    // Submit application without container requests
+    ApplicationAttemptId attId =
+        createSchedulingRequest(1024, "queue1", "user1", 0);
     FSAppAttempt app = scheduler.getSchedulerApp(attId);
-    
-    ResourceRequest nodeRequest = createResourceRequest(1024, node2.getHostName(), 1, 2, true);
-    ResourceRequest rackRequest = createResourceRequest(1024, "rack1", 1, 2, true);
-    ResourceRequest anyRequest = createResourceRequest(1024, ResourceRequest.ANY,
-        1, 2, false);
+
+    // Request a container on node2
+    ResourceRequest nodeRequest =
+        createResourceRequest(1024, node2.getHostName(), 1, 1, true);
+    ResourceRequest rackRequest =
+        createResourceRequest(1024, "rack1", 1, 1, false);
+    ResourceRequest anyRequest =
+        createResourceRequest(1024, ResourceRequest.ANY, 1, 1, false);
     createSchedulingRequestExistingApplication(nodeRequest, attId);
     createSchedulingRequestExistingApplication(rackRequest, attId);
     createSchedulingRequestExistingApplication(anyRequest, attId);
-    
     scheduler.update();
 
+    // Heartbeat from node1. App shouldn't get an allocation or reservation
     NodeUpdateSchedulerEvent nodeUpdateEvent = new NodeUpdateSchedulerEvent(node1);
     scheduler.handle(nodeUpdateEvent);
-    assertEquals(1, app.getLiveContainers().size());
-    scheduler.handle(nodeUpdateEvent);
-    assertEquals(1, app.getReservedContainers().size());
-    
-    // now, make our request node-specific (on a different node)
-    rackRequest = createResourceRequest(1024, "rack1", 1, 1, false);
-    anyRequest = createResourceRequest(1024, ResourceRequest.ANY,
-        1, 1, false);
-    scheduler.allocate(attId, Arrays.asList(rackRequest, anyRequest),
-        new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
-
+    assertEquals("App assigned a container on the wrong node",
+        0, app.getLiveContainers().size());
     scheduler.handle(nodeUpdateEvent);
-    assertEquals(0, app.getReservedContainers().size());
+    assertEquals("App reserved a container on the wrong node",
+        0, app.getReservedContainers().size());
   }
   
   @Test

http://git-wip-us.apache.org/repos/asf/hadoop/blob/718ad9f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
index 480a329..322ad5b 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java
@@ -72,7 +72,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
         {"MinSharePreemptionWithDRF", 1},
         {"FairSharePreemption", 2},
         {"FairSharePreemptionWithDRF", 3}
-        });
+    });
   }
 
   public TestFairSchedulerPreemption(String name, int mode)
@@ -110,6 +110,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
      * |--- preemptable
      *      |--- child-1
      *      |--- child-2
+     * |--- preemptable-sibling
      * |--- nonpreemptible
      *      |--- child-1
      *      |--- child-2
@@ -133,6 +134,10 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
 
     out.println("</queue>"); // end of preemptable queue
 
+    out.println("<queue name=\"preemptable-sibling\">");
+    writePreemptionParams(out);
+    out.println("</queue>");
+
     // Queue with preemption disallowed
     out.println("<queue name=\"nonpreemptable\">");
     out.println("<allowPreemptionFrom>false" +
@@ -269,10 +274,11 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     preemptHalfResources(queue2);
   }
 
-  private void verifyPreemption() throws InterruptedException {
+  private void verifyPreemption(int numStarvedAppContainers)
+      throws InterruptedException {
     // Sleep long enough for four containers to be preempted.
     for (int i = 0; i < 1000; i++) {
-      if (greedyApp.getLiveContainers().size() == 4) {
+      if (greedyApp.getLiveContainers().size() == 2 * numStarvedAppContainers) {
         break;
       }
       Thread.sleep(10);
@@ -280,13 +286,13 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
 
     // Verify the right amount of containers are preempted from greedyApp
     assertEquals("Incorrect number of containers on the greedy app",
-        4, greedyApp.getLiveContainers().size());
+        2 * numStarvedAppContainers, greedyApp.getLiveContainers().size());
 
     sendEnoughNodeUpdatesToAssignFully();
 
     // Verify the preempted containers are assigned to starvingApp
     assertEquals("Starved app is not assigned the right number of containers",
-        2, starvingApp.getLiveContainers().size());
+        numStarvedAppContainers, starvingApp.getLiveContainers().size());
   }
 
   private void verifyNoPreemption() throws InterruptedException {
@@ -305,7 +311,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     String queue = "root.preemptable.child-1";
     submitApps(queue, queue);
     if (fairsharePreemption) {
-      verifyPreemption();
+      verifyPreemption(2);
     } else {
       verifyNoPreemption();
     }
@@ -314,13 +320,13 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
   @Test
   public void testPreemptionBetweenTwoSiblingLeafQueues() throws Exception {
     submitApps("root.preemptable.child-1", "root.preemptable.child-2");
-    verifyPreemption();
+    verifyPreemption(2);
   }
 
   @Test
   public void testPreemptionBetweenNonSiblingQueues() throws Exception {
     submitApps("root.preemptable.child-1", "root.nonpreemptable.child-1");
-    verifyPreemption();
+    verifyPreemption(2);
   }
 
   @Test
@@ -354,7 +360,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     setNumAMContainersPerNode(2);
     preemptHalfResources("root.preemptable.child-2");
 
-    verifyPreemption();
+    verifyPreemption(2);
 
     ArrayList<RMContainer> containers =
         (ArrayList<RMContainer>) starvingApp.getLiveContainers();
@@ -365,4 +371,24 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase {
     assertTrue("Preempted containers should come from two different "
         + "nodes.", !host0.equals(host1));
   }
+
+  @Test
+  public void testPreemptionBetweenSiblingQueuesWithParentAtFairShare()
+      throws InterruptedException {
+    // Run this test only for fairshare preemption
+    if (!fairsharePreemption) {
+      return;
+    }
+
+    // Let one of the child queues take over the entire cluster
+    takeAllResources("root.preemptable.child-1");
+
+    // Submit a job so half the resources go to parent's sibling
+    preemptHalfResources("root.preemptable-sibling");
+    verifyPreemption(2);
+
+    // Submit a job to the child's sibling to force preemption from the child
+    preemptHalfResources("root.preemptable.child-2");
+    verifyPreemption(1);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[22/31] hadoop git commit: HDFS-11433. Document missing usages of OfflineEditsViewer processors. Contributed by Yiqun Lin.

Posted by st...@apache.org.
HDFS-11433. Document missing usages of OfflineEditsViewer processors. Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a58870a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a58870a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a58870a

Branch: refs/heads/HADOOP-13345
Commit: 4a58870a0427ecdf4c5c895088b96a0b4d1c0c19
Parents: e24ed47
Author: Yiqun Lin <yq...@apache.org>
Authored: Sat Feb 25 10:00:05 2017 +0800
Committer: Yiqun Lin <yq...@apache.org>
Committed: Sat Feb 25 10:00:05 2017 +0800

----------------------------------------------------------------------
 .../src/site/markdown/HdfsEditsViewer.md        | 73 +++++++++++++++++++-
 1 file changed, 72 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a58870a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
index 7a46eb0..5e069bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsEditsViewer.md
@@ -40,9 +40,80 @@ The Offline Edits Viewer provides several output processors (unless stated other
 Usage
 -----
 
+### XML Processor
+
+XML processor can create an XML file that contains the edits log information. Users can specify input and output file via -i and -o command-line.
+
+       bash$ bin/hdfs oev -p xml -i edits -o edits.xml
+
+XML processor is the default processor in Offline Edits Viewer, users can also use the following command:
+
        bash$ bin/hdfs oev -i edits -o edits.xml
 
-|                                       Flag | Description |
+This would result in the following output:
+
+       <?xml version="1.0" encoding="UTF-8"?>
+       <EDITS>
+         <EDITS_VERSION>-64</EDITS_VERSION>
+         <RECORD>
+           <OPCODE>OP_START_LOG_SEGMENT</OPCODE>
+           <DATA>
+             <TXID>1</TXID>
+           </DATA>
+         </RECORD>
+         <RECORD>
+           <OPCODE>OP_UPDATE_MASTER_KEY</OPCODE>
+           <DATA>
+             <TXID>2</TXID>
+             <DELEGATION_KEY>
+               <KEY_ID>1</KEY_ID>
+               <EXPIRY_DATE>1487921580728</EXPIRY_DATE>
+               <KEY>2e127ca41c7de215</KEY>
+             </DELEGATION_KEY>
+           </DATA>
+         </RECORD>
+         <RECORD>
+       ...remaining output omitted...
+
+### Binary Processor
+
+Binary processor is the opposite of the XML processor. Users can specify input XML file and output file via -i and -o command-line.
+
+       bash$ bin/hdfs oev -p binary -i edits.xml -o edits
+
+This will reconstruct an edits log file from an XML file.
+
+### Stats Processor
+
+Stats processor is used to aggregate counts of op codes contained in the edits log file. Users can specify this processor by -p option.
+
+       bash$ bin/hdfs oev -p stats -i edits -o edits.stats
+
+The output result of this processor should be like the following output:
+
+       VERSION                             : -64
+       OP_ADD                         (  0): 8
+       OP_RENAME_OLD                  (  1): 1
+       OP_DELETE                      (  2): 1
+       OP_MKDIR                       (  3): 1
+       OP_SET_REPLICATION             (  4): 1
+       OP_DATANODE_ADD                (  5): 0
+       OP_DATANODE_REMOVE             (  6): 0
+       OP_SET_PERMISSIONS             (  7): 1
+       OP_SET_OWNER                   (  8): 1
+       OP_CLOSE                       (  9): 9
+       OP_SET_GENSTAMP_V1             ( 10): 0
+       ...some output omitted...
+       OP_APPEND                      ( 47): 1
+       OP_SET_QUOTA_BY_STORAGETYPE    ( 48): 1
+       OP_INVALID                     ( -1): 0
+
+The output is formatted as a colon separated two column table: OpCode and OpCodeCount. Each OpCode corresponding to the specific operation(s) in NameNode.
+
+Options
+-------
+
+| Flag | Description |
 |:---- |:---- |
 | [`-i` ; `--inputFile`] *input file* | Specify the input edits log file to process. Xml (case insensitive) extension means XML format otherwise binary format is assumed. Required. |
 | [`-o` ; `--outputFile`] *output file* | Specify the output filename, if the specified output processor generates one. If the specified file already exists, it is silently overwritten. Required. |


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[02/31] hadoop git commit: MAPREDUCE-6825. YARNRunner#createApplicationSubmissionContext method is longer than 150 lines (Contributed by Gergely Novák via Daniel Templeton)

Posted by st...@apache.org.
MAPREDUCE-6825. YARNRunner#createApplicationSubmissionContext method is longer than 150 lines (Contributed by Gergely Nov�k via Daniel Templeton)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/732ee6f0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/732ee6f0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/732ee6f0

Branch: refs/heads/HADOOP-13345
Commit: 732ee6f0b58a12500198c0d934cc570c7490b520
Parents: d150f06
Author: Daniel Templeton <te...@apache.org>
Authored: Wed Feb 22 15:38:11 2017 -0800
Committer: Daniel Templeton <te...@apache.org>
Committed: Wed Feb 22 15:38:11 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/mapred/YARNRunner.java    | 141 +++++++++++--------
 1 file changed, 86 insertions(+), 55 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/732ee6f0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 98fe553..228c6af 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -291,8 +291,7 @@ public class YARNRunner implements ClientProtocol {
   throws IOException, InterruptedException {
     
     addHistoryToken(ts);
-    
-    // Construct necessary information to start the MR AM
+
     ApplicationSubmissionContext appContext =
       createApplicationSubmissionContext(conf, jobSubmitDir, ts);
 
@@ -331,34 +330,15 @@ public class YARNRunner implements ClientProtocol {
     return rsrc;
   }
 
-  public ApplicationSubmissionContext createApplicationSubmissionContext(
-      Configuration jobConf,
-      String jobSubmitDir, Credentials ts) throws IOException {
-    ApplicationId applicationId = resMgrDelegate.getApplicationId();
-
-    // Setup resource requirements
-    Resource capability = recordFactory.newRecordInstance(Resource.class);
-    capability.setMemorySize(
-        conf.getInt(
-            MRJobConfig.MR_AM_VMEM_MB, MRJobConfig.DEFAULT_MR_AM_VMEM_MB
-            )
-        );
-    capability.setVirtualCores(
-        conf.getInt(
-            MRJobConfig.MR_AM_CPU_VCORES, MRJobConfig.DEFAULT_MR_AM_CPU_VCORES
-            )
-        );
-    LOG.debug("AppMaster capability = " + capability);
-
-    // Setup LocalResources
-    Map<String, LocalResource> localResources =
-        new HashMap<String, LocalResource>();
+  private Map<String, LocalResource> setupLocalResources(Configuration jobConf,
+      String jobSubmitDir) throws IOException {
+    Map<String, LocalResource> localResources = new HashMap<>();
 
     Path jobConfPath = new Path(jobSubmitDir, MRJobConfig.JOB_CONF_FILE);
 
-    URL yarnUrlForJobSubmitDir = URL.fromPath(defaultFileContext.getDefaultFileSystem()
-            .resolvePath(
-                defaultFileContext.makeQualified(new Path(jobSubmitDir))));
+    URL yarnUrlForJobSubmitDir = URL.fromPath(defaultFileContext
+        .getDefaultFileSystem().resolvePath(
+            defaultFileContext.makeQualified(new Path(jobSubmitDir))));
     LOG.debug("Creating setup context, jobSubmitDir url is "
         + yarnUrlForJobSubmitDir);
 
@@ -371,7 +351,7 @@ public class YARNRunner implements ClientProtocol {
           FileContext.getFileContext(jobJarPath.toUri(), jobConf),
           jobJarPath,
           LocalResourceType.PATTERN);
-      String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN, 
+      String pattern = conf.getPattern(JobContext.JAR_UNPACK_PATTERN,
           JobConf.UNPACK_JAR_PATTERN_DEFAULT).pattern();
       rc.setPattern(pattern);
       localResources.put(MRJobConfig.JOB_JAR, rc);
@@ -392,13 +372,11 @@ public class YARNRunner implements ClientProtocol {
               new Path(jobSubmitDir, s), LocalResourceType.FILE));
     }
 
-    // Setup security tokens
-    DataOutputBuffer dob = new DataOutputBuffer();
-    ts.writeTokenStorageToStream(dob);
-    ByteBuffer securityTokens  = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+    return localResources;
+  }
 
-    // Setup the command to run the AM
-    List<String> vargs = new ArrayList<String>(8);
+  private List<String> setupAMCommand(Configuration jobConf) {
+    List<String> vargs = new ArrayList<>(8);
     vargs.add(MRApps.crossPlatformifyMREnv(jobConf, Environment.JAVA_HOME)
         + "/bin/java");
 
@@ -409,27 +387,35 @@ public class YARNRunner implements ClientProtocol {
     MRApps.addLog4jSystemProperties(null, vargs, conf);
 
     // Check for Java Lib Path usage in MAP and REDUCE configs
-    warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS,""), "map", 
-        MRJobConfig.MAP_JAVA_OPTS, MRJobConfig.MAP_ENV);
-    warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,""), "map", 
-        MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
-    warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS,""), "reduce", 
-        MRJobConfig.REDUCE_JAVA_OPTS, MRJobConfig.REDUCE_ENV);
-    warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,""), "reduce", 
-        MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV);
+    warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS, ""),
+        "map",
+        MRJobConfig.MAP_JAVA_OPTS,
+        MRJobConfig.MAP_ENV);
+    warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, ""),
+        "map",
+        MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,
+        MRJobConfig.MAPRED_ADMIN_USER_ENV);
+    warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS, ""),
+        "reduce",
+        MRJobConfig.REDUCE_JAVA_OPTS,
+        MRJobConfig.REDUCE_ENV);
+    warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, ""),
+        "reduce",
+        MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,
+        MRJobConfig.MAPRED_ADMIN_USER_ENV);
 
     // Add AM admin command opts before user command opts
     // so that it can be overridden by user
     String mrAppMasterAdminOptions = conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS,
         MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS);
-    warnForJavaLibPath(mrAppMasterAdminOptions, "app master", 
+    warnForJavaLibPath(mrAppMasterAdminOptions, "app master",
         MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV);
     vargs.add(mrAppMasterAdminOptions);
-    
+
     // Add AM user command opts
     String mrAppMasterUserOptions = conf.get(MRJobConfig.MR_AM_COMMAND_OPTS,
         MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS);
-    warnForJavaLibPath(mrAppMasterUserOptions, "app master", 
+    warnForJavaLibPath(mrAppMasterUserOptions, "app master",
         MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV);
     vargs.add(mrAppMasterUserOptions);
 
@@ -449,9 +435,14 @@ public class YARNRunner implements ClientProtocol {
         Path.SEPARATOR + ApplicationConstants.STDOUT);
     vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR +
         Path.SEPARATOR + ApplicationConstants.STDERR);
+    return vargs;
+  }
 
+  private ContainerLaunchContext setupContainerLaunchContextForAM(
+      Configuration jobConf, Map<String, LocalResource> localResources,
+      ByteBuffer securityTokens, List<String> vargs) throws IOException {
 
-    Vector<String> vargsFinal = new Vector<String>(8);
+    Vector<String> vargsFinal = new Vector<>(8);
     // Final command
     StringBuilder mergedCommand = new StringBuilder();
     for (CharSequence str : vargs) {
@@ -464,7 +455,7 @@ public class YARNRunner implements ClientProtocol {
 
     // Setup the CLASSPATH in environment
     // i.e. add { Hadoop jars, job jar, CWD } to classpath.
-    Map<String, String> environment = new HashMap<String, String>();
+    Map<String, String> environment = new HashMap<>();
     MRApps.setClasspath(environment, conf);
 
     // Shell
@@ -477,28 +468,68 @@ public class YARNRunner implements ClientProtocol {
         MRApps.crossPlatformifyMREnv(conf, Environment.PWD), conf);
 
     // Setup the environment variables for Admin first
-    MRApps.setEnvFromInputString(environment, 
+    MRApps.setEnvFromInputString(environment,
         conf.get(MRJobConfig.MR_AM_ADMIN_USER_ENV,
             MRJobConfig.DEFAULT_MR_AM_ADMIN_USER_ENV), conf);
     // Setup the environment variables (LD_LIBRARY_PATH, etc)
-    MRApps.setEnvFromInputString(environment, 
+    MRApps.setEnvFromInputString(environment,
         conf.get(MRJobConfig.MR_AM_ENV), conf);
 
     // Parse distributed cache
     MRApps.setupDistributedCache(jobConf, localResources);
 
-    Map<ApplicationAccessType, String> acls
-        = new HashMap<ApplicationAccessType, String>(2);
+    Map<ApplicationAccessType, String> acls = new HashMap<>(2);
     acls.put(ApplicationAccessType.VIEW_APP, jobConf.get(
         MRJobConfig.JOB_ACL_VIEW_JOB, MRJobConfig.DEFAULT_JOB_ACL_VIEW_JOB));
     acls.put(ApplicationAccessType.MODIFY_APP, jobConf.get(
         MRJobConfig.JOB_ACL_MODIFY_JOB,
         MRJobConfig.DEFAULT_JOB_ACL_MODIFY_JOB));
 
+    return ContainerLaunchContext.newInstance(localResources, environment,
+        vargsFinal, null, securityTokens, acls);
+  }
+
+  /**
+   * Constructs all the necessary information to start the MR AM.
+   * @param jobConf the configuration for the MR job
+   * @param jobSubmitDir the directory path for the job
+   * @param ts the security credentials for the job
+   * @return ApplicationSubmissionContext
+   * @throws IOException on IO error (e.g. path resolution)
+   */
+  public ApplicationSubmissionContext createApplicationSubmissionContext(
+      Configuration jobConf, String jobSubmitDir, Credentials ts)
+      throws IOException {
+    ApplicationId applicationId = resMgrDelegate.getApplicationId();
+
+    // Setup resource requirements
+    Resource capability = recordFactory.newRecordInstance(Resource.class);
+    capability.setMemorySize(
+        conf.getInt(
+            MRJobConfig.MR_AM_VMEM_MB, MRJobConfig.DEFAULT_MR_AM_VMEM_MB
+        )
+    );
+    capability.setVirtualCores(
+        conf.getInt(
+            MRJobConfig.MR_AM_CPU_VCORES, MRJobConfig.DEFAULT_MR_AM_CPU_VCORES
+        )
+    );
+    LOG.debug("AppMaster capability = " + capability);
+
+    // Setup LocalResources
+    Map<String, LocalResource> localResources =
+        setupLocalResources(jobConf, jobSubmitDir);
+
+    // Setup security tokens
+    DataOutputBuffer dob = new DataOutputBuffer();
+    ts.writeTokenStorageToStream(dob);
+    ByteBuffer securityTokens =
+        ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+
     // Setup ContainerLaunchContext for AM container
-    ContainerLaunchContext amContainer =
-        ContainerLaunchContext.newInstance(localResources, environment,
-          vargsFinal, null, securityTokens, acls);
+    List<String> vargs = setupAMCommand(jobConf);
+    ContainerLaunchContext amContainer = setupContainerLaunchContextForAM(
+        jobConf, localResources, securityTokens, vargs);
 
     String regex = conf.get(MRJobConfig.MR_JOB_SEND_TOKEN_CONF);
     if (regex != null && !regex.isEmpty()) {
@@ -566,7 +597,7 @@ public class YARNRunner implements ClientProtocol {
 
     appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE);
     if (tagsFromConf != null && !tagsFromConf.isEmpty()) {
-      appContext.setApplicationTags(new HashSet<String>(tagsFromConf));
+      appContext.setApplicationTags(new HashSet<>(tagsFromConf));
     }
 
     String jobPriority = jobConf.get(MRJobConfig.PRIORITY);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[08/31] hadoop git commit: HADOOP-14091. AbstractFileSystem implementaion for 'wasbs' scheme. Contributed Varada Hemeswari.

Posted by st...@apache.org.
HADOOP-14091. AbstractFileSystem implementaion for 'wasbs' scheme. Contributed Varada Hemeswari.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/82607fce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/82607fce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/82607fce

Branch: refs/heads/HADOOP-13345
Commit: 82607fce39151fc6ba5bced738088e2bc176dc77
Parents: a4d4a23
Author: Mingliang Liu <li...@apache.org>
Authored: Thu Feb 23 13:48:44 2017 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Thu Feb 23 13:48:44 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/azure/Wasbs.java  | 47 ++++++++++++++++
 .../fs/azure/TestWasbUriAndConfiguration.java   | 57 ++++++++++++++++++++
 2 files changed, 104 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/82607fce/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
new file mode 100644
index 0000000..0b4a782
--- /dev/null
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/Wasbs.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.azure;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DelegateToFileSystem;
+
+/**
+ * WASB implementation of AbstractFileSystem for wasbs scheme.
+ * This impl delegates to the old FileSystem
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class Wasbs extends DelegateToFileSystem {
+
+  Wasbs(final URI theUri, final Configuration conf) throws IOException,
+      URISyntaxException {
+    super(theUri, new NativeAzureFileSystem(), conf, "wasbs", false);
+  }
+
+  @Override
+  public int getUriDefaultPort() {
+    return -1;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/82607fce/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
index 9d2770e..194a831 100644
--- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
+++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestWasbUriAndConfiguration.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.azure.AzureBlobStorageTestAccount.CreateOptions;
 import org.junit.After;
 import org.junit.Assert;
@@ -471,6 +472,62 @@ public class TestWasbUriAndConfiguration {
       assertTrue(afs instanceof Wasb);
       assertEquals(-1, afs.getUri().getPort());
     } finally {
+      testAccount.cleanup();
+      FileSystem.closeAll();
+    }
+  }
+
+   /**
+   * Tests the cases when the scheme specified is 'wasbs'.
+   */
+  @Test
+  public void testAbstractFileSystemImplementationForWasbsScheme() throws Exception {
+    try {
+      testAccount = AzureBlobStorageTestAccount.createMock();
+      Configuration conf = testAccount.getFileSystem().getConf();
+      String authority = testAccount.getFileSystem().getUri().getAuthority();
+      URI defaultUri = new URI("wasbs", authority, null, null, null);
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+      conf.set("fs.AbstractFileSystem.wasbs.impl", "org.apache.hadoop.fs.azure.Wasbs");
+      conf.addResource("azure-test.xml");
+
+      FileSystem fs = FileSystem.get(conf);
+      assertTrue(fs instanceof NativeAzureFileSystem);
+      assertEquals("wasbs", fs.getScheme());
+
+      AbstractFileSystem afs = FileContext.getFileContext(conf)
+          .getDefaultFileSystem();
+      assertTrue(afs instanceof Wasbs);
+      assertEquals(-1, afs.getUri().getPort());
+      assertEquals("wasbs", afs.getUri().getScheme());
+    } finally {
+      testAccount.cleanup();
+      FileSystem.closeAll();
+    }
+  }
+
+  @Test
+  public void testNoAbstractFileSystemImplementationSpecifiedForWasbsScheme() throws Exception {
+    try {
+      testAccount = AzureBlobStorageTestAccount.createMock();
+      Configuration conf = testAccount.getFileSystem().getConf();
+      String authority = testAccount.getFileSystem().getUri().getAuthority();
+      URI defaultUri = new URI("wasbs", authority, null, null, null);
+      conf.set(FS_DEFAULT_NAME_KEY, defaultUri.toString());
+
+      FileSystem fs = FileSystem.get(conf);
+      assertTrue(fs instanceof NativeAzureFileSystem);
+      assertEquals("wasbs", fs.getScheme());
+
+      // should throw if 'fs.AbstractFileSystem.wasbs.impl'' is not specified
+      try{
+        FileContext.getFileContext(conf).getDefaultFileSystem();
+        fail("Should've thrown.");
+      }catch(UnsupportedFileSystemException e){
+      }
+
+    } finally {
+      testAccount.cleanup();
       FileSystem.closeAll();
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[20/31] hadoop git commit: HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.

Posted by st...@apache.org.
HDFS-11295. Check storage remaining instead of node remaining in BlockPlacementPolicyDefault.chooseReplicaToDelete(). Contributed by Marton Elek.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2b3ba9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2b3ba9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2b3ba9b

Branch: refs/heads/HADOOP-13345
Commit: d2b3ba9b8fb76753fa1b51661dacbde74aa5c6df
Parents: 289bc50
Author: Arpit Agarwal <ar...@apache.org>
Authored: Fri Feb 24 15:44:11 2017 -0800
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Fri Feb 24 15:44:11 2017 -0800

----------------------------------------------------------------------
 .../BlockPlacementPolicyDefault.java            |  2 +-
 .../blockmanagement/DatanodeStorageInfo.java    |  5 +++
 .../blockmanagement/TestReplicationPolicy.java  | 35 ++++++++++++++------
 .../TestReplicationPolicyWithNodeGroup.java     | 23 ++++++++++---
 4 files changed, 49 insertions(+), 16 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index eb54667..7676334 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -968,7 +968,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       }
 
       final DatanodeDescriptor node = storage.getDatanodeDescriptor();
-      long free = node.getRemaining();
+      long free = storage.getRemaining();
       long lastHeartbeat = node.getLastUpdateMonotonic();
       if (lastHeartbeat < oldestHeartbeat) {
         oldestHeartbeat = lastHeartbeat;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index b4c8aaa..ab666b7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -388,6 +388,11 @@ public class DatanodeStorageInfo {
     return null;
   }
 
+  @VisibleForTesting
+  void setRemainingForTests(int remaining) {
+    this.remaining = remaining;
+  }
+
   static enum AddBlockResult {
     ADDED, REPLACED, ALREADY_EXIST
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 1af013d..27dcbf1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -950,24 +950,31 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     List<DatanodeStorageInfo> replicaList = new ArrayList<>();
     final Map<String, List<DatanodeStorageInfo>> rackMap
         = new HashMap<String, List<DatanodeStorageInfo>>();
-    
-    dataNodes[0].setRemaining(4*1024*1024);
+
+    storages[0].setRemainingForTests(4*1024*1024);
+    dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
     replicaList.add(storages[0]);
-    
-    dataNodes[1].setRemaining(3*1024*1024);
+
+    storages[1].setRemainingForTests(3*1024*1024);
+    dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
     replicaList.add(storages[1]);
-    
-    dataNodes[2].setRemaining(2*1024*1024);
+
+    storages[2].setRemainingForTests(2*1024*1024);
+    dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
     replicaList.add(storages[2]);
-    
-    dataNodes[5].setRemaining(1*1024*1024);
+
+    //Even if this node has the most space, because the storage[5] has
+    //the lowest it should be chosen in case of block delete.
+    storages[4].setRemainingForTests(100 * 1024 * 1024);
+    storages[5].setRemainingForTests(512 * 1024);
+    dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
     replicaList.add(storages[5]);
-    
+
     // Refresh the last update time for all the datanodes
     for (int i = 0; i < dataNodes.length; i++) {
       DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
     }
-    
+
     List<DatanodeStorageInfo> first = new ArrayList<>();
     List<DatanodeStorageInfo> second = new ArrayList<>();
     replicator.splitNodesWithRack(replicaList, replicaList, rackMap, first,
@@ -999,6 +1006,14 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     assertEquals(chosen, storages[1]);
   }
 
+  private long calculateRemaining(DatanodeDescriptor dataNode) {
+    long sum = 0;
+    for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+      sum += storageInfo.getRemaining();
+    }
+    return sum;
+  }
+
   @Test
   public void testChooseReplicasToDelete() throws Exception {
     Collection<DatanodeStorageInfo> nonExcess = new ArrayList<>();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2b3ba9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index 2f184bb..ebd4b81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -625,16 +625,21 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
   public void testChooseReplicaToDelete() throws Exception {
     List<DatanodeStorageInfo> replicaList = new ArrayList<>();
     final Map<String, List<DatanodeStorageInfo>> rackMap = new HashMap<>();
-    dataNodes[0].setRemaining(4*1024*1024);
+    storages[0].setRemainingForTests(4*1024*1024);
+    dataNodes[0].setRemaining(calculateRemaining(dataNodes[0]));
     replicaList.add(storages[0]);
 
-    dataNodes[1].setRemaining(3*1024*1024);
+    storages[1].setRemainingForTests(3*1024*1024);
+    dataNodes[1].setRemaining(calculateRemaining(dataNodes[1]));
     replicaList.add(storages[1]);
 
-    dataNodes[2].setRemaining(2*1024*1024);
+    storages[2].setRemainingForTests(2*1024*1024);
+    dataNodes[2].setRemaining(calculateRemaining(dataNodes[2]));
     replicaList.add(storages[2]);
 
-    dataNodes[5].setRemaining(1*1024*1024);
+    storages[4].setRemainingForTests(100 * 1024 * 1024);
+    storages[5].setRemainingForTests(512 * 1024);
+    dataNodes[5].setRemaining(calculateRemaining(dataNodes[5]));
     replicaList.add(storages[5]);
 
     List<DatanodeStorageInfo> first = new ArrayList<>();
@@ -671,7 +676,15 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
         first, second, excessTypes, rackMap);
     assertEquals(chosen, storages[5]);
   }
-  
+
+  private long calculateRemaining(DatanodeDescriptor dataNode) {
+    long sum = 0;
+    for (DatanodeStorageInfo storageInfo: dataNode.getStorageInfos()){
+      sum += storageInfo.getRemaining();
+    }
+    return sum;
+  }
+
   /**
    * Test replica placement policy in case of boundary topology.
    * Rack 2 has only 1 node group & can't be placed with two replicas


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[16/31] hadoop git commit: YARN-4779. Fix AM container allocation logic in SLS. Contributed by Wangda Tan.

Posted by st...@apache.org.
YARN-4779. Fix AM container allocation logic in SLS. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b32ffa27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b32ffa27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b32ffa27

Branch: refs/heads/HADOOP-13345
Commit: b32ffa2753e83615b980721b6067fcc35ce54372
Parents: e8694de
Author: Sunil G <su...@apache.org>
Authored: Fri Feb 24 21:39:25 2017 +0530
Committer: Sunil G <su...@apache.org>
Committed: Fri Feb 24 21:39:25 2017 +0530

----------------------------------------------------------------------
 .../org/apache/hadoop/yarn/sls/SLSRunner.java   |  20 +-
 .../hadoop/yarn/sls/appmaster/AMSimulator.java  |  89 +++++---
 .../yarn/sls/appmaster/MRAMSimulator.java       | 218 ++++++++-----------
 .../sls/resourcemanager/MockAMLauncher.java     | 115 ++++++++++
 .../sls/scheduler/SLSCapacityScheduler.java     |  24 ++
 5 files changed, 305 insertions(+), 161 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
index 61738fb..61b7f36 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
@@ -32,6 +32,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
 
 import com.fasterxml.jackson.core.JsonFactory;
 import com.fasterxml.jackson.databind.ObjectMapper;
@@ -55,12 +56,14 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.apache.hadoop.yarn.sls.appmaster.AMSimulator;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.nodemanager.NMSimulator;
+import org.apache.hadoop.yarn.sls.resourcemanager.MockAMLauncher;
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.ResourceSchedulerWrapper;
 import org.apache.hadoop.yarn.sls.scheduler.SLSCapacityScheduler;
@@ -119,10 +122,10 @@ public class SLSRunner {
     this.printSimulation = printsimulation;
     metricsOutputDir = outputDir;
     
-    nmMap = new HashMap<NodeId, NMSimulator>();
-    queueAppNumMap = new HashMap<String, Integer>();
-    amMap = new HashMap<String, AMSimulator>();
-    amClassMap = new HashMap<String, Class>();
+    nmMap = new HashMap<>();
+    queueAppNumMap = new HashMap<>();
+    amMap = new ConcurrentHashMap<>();
+    amClassMap = new HashMap<>();
     
     // runner configuration
     conf = new Configuration(false);
@@ -179,7 +182,14 @@ public class SLSRunner {
     }
 
     rmConf.set(SLSConfiguration.METRICS_OUTPUT_DIR, metricsOutputDir);
-    rm = new ResourceManager();
+
+    final SLSRunner se = this;
+    rm = new ResourceManager() {
+      @Override
+      protected ApplicationMasterLauncher createAMLauncher() {
+        return new MockAMLauncher(se, this.rmContext, amMap);
+      }
+    };
     rm.init(rmConf);
     rm.start();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
index d61bf02..5b03d51 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
@@ -49,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
+import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
@@ -66,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.apache.log4j.Logger;
 
 import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
@@ -107,11 +109,19 @@ public abstract class AMSimulator extends TaskRunner.Task {
   // progress
   protected int totalContainers;
   protected int finishedContainers;
+
+  // waiting for AM container
+  volatile boolean isAMContainerRunning = false;
+  volatile Container amContainer;
   
   protected final Logger LOG = Logger.getLogger(AMSimulator.class);
-  
+
+  // resource for AM container
+  private final static int MR_AM_CONTAINER_RESOURCE_MEMORY_MB = 1024;
+  private final static int MR_AM_CONTAINER_RESOURCE_VCORES = 1;
+
   public AMSimulator() {
-    this.responseQueue = new LinkedBlockingQueue<AllocateResponse>();
+    this.responseQueue = new LinkedBlockingQueue<>();
   }
 
   public void init(int id, int heartbeatInterval, 
@@ -142,23 +152,30 @@ public abstract class AMSimulator extends TaskRunner.Task {
     // submit application, waiting until ACCEPTED
     submitApp();
 
-    // register application master
-    registerAM();
-
     // track app metrics
     trackApp();
   }
 
+  public synchronized void notifyAMContainerLaunched(Container masterContainer)
+      throws Exception {
+    this.amContainer = masterContainer;
+    this.appAttemptId = masterContainer.getId().getApplicationAttemptId();
+    registerAM();
+    isAMContainerRunning = true;
+  }
+
   @Override
   public void middleStep() throws Exception {
-    // process responses in the queue
-    processResponseQueue();
-    
-    // send out request
-    sendContainerRequest();
-    
-    // check whether finish
-    checkStop();
+    if (isAMContainerRunning) {
+      // process responses in the queue
+      processResponseQueue();
+
+      // send out request
+      sendContainerRequest();
+
+      // check whether finish
+      checkStop();
+    }
   }
 
   @Override
@@ -168,6 +185,22 @@ public abstract class AMSimulator extends TaskRunner.Task {
     if (isTracked) {
       untrackApp();
     }
+
+    // Finish AM container
+    if (amContainer != null) {
+      LOG.info("AM container = " + amContainer.getId() + " reported to finish");
+      se.getNmMap().get(amContainer.getNodeId()).cleanupContainer(
+          amContainer.getId());
+    } else {
+      LOG.info("AM container is null");
+    }
+
+    if (null == appAttemptId) {
+      // If appAttemptId == null, AM is not launched from RM's perspective, so
+      // it's unnecessary to finish am as well
+      return;
+    }
+
     // unregister application master
     final FinishApplicationMasterRequest finishAMRequest = recordFactory
                   .newRecordInstance(FinishApplicationMasterRequest.class);
@@ -256,7 +289,9 @@ public abstract class AMSimulator extends TaskRunner.Task {
     conLauContext.setLocalResources(new HashMap<String, LocalResource>());
     conLauContext.setServiceData(new HashMap<String, ByteBuffer>());
     appSubContext.setAMContainerSpec(conLauContext);
-    appSubContext.setUnmanagedAM(true);
+    appSubContext.setResource(Resources
+        .createResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB,
+            MR_AM_CONTAINER_RESOURCE_VCORES));
     subAppRequest.setApplicationSubmissionContext(appSubContext);
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
     ugi.doAs(new PrivilegedExceptionAction<Object>() {
@@ -267,22 +302,6 @@ public abstract class AMSimulator extends TaskRunner.Task {
       }
     });
     LOG.info(MessageFormat.format("Submit a new application {0}", appId));
-    
-    // waiting until application ACCEPTED
-    RMApp app = rm.getRMContext().getRMApps().get(appId);
-    while(app.getState() != RMAppState.ACCEPTED) {
-      Thread.sleep(10);
-    }
-
-    // Waiting until application attempt reach LAUNCHED
-    // "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
-    this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
-        .getCurrentAppAttempt().getAppAttemptId();
-    RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
-        .getCurrentAppAttempt();
-    while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
-      Thread.sleep(10);
-    }
   }
 
   private void registerAM()
@@ -335,7 +354,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
     for (ContainerSimulator cs : csList) {
       String rackHostNames[] = SLSUtils.getRackHostName(cs.getHostname());
       // check rack local
-      String rackname = rackHostNames[0];
+      String rackname = "/" + rackHostNames[0];
       if (rackLocalRequestMap.containsKey(rackname)) {
         rackLocalRequestMap.get(rackname).setNumContainers(
             rackLocalRequestMap.get(rackname).getNumContainers() + 1);
@@ -383,4 +402,12 @@ public abstract class AMSimulator extends TaskRunner.Task {
   public int getNumTasks() {
     return totalContainers;
   }
+
+  public ApplicationId getApplicationId() {
+    return appId;
+  }
+
+  public ApplicationAttemptId getApplicationAttemptId() {
+    return appAttemptId;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
index da267a1..e726b09 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
@@ -27,6 +27,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 
+import org.apache.avro.Protocol;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -63,10 +64,10 @@ public class MRAMSimulator extends AMSimulator {
   
   private static final int PRIORITY_REDUCE = 10;
   private static final int PRIORITY_MAP = 20;
-  
+
   // pending maps
   private LinkedList<ContainerSimulator> pendingMaps =
-          new LinkedList<ContainerSimulator>();
+          new LinkedList<>();
   
   // pending failed maps
   private LinkedList<ContainerSimulator> pendingFailedMaps =
@@ -107,14 +108,9 @@ public class MRAMSimulator extends AMSimulator {
   private int mapTotal = 0;
   private int reduceFinished = 0;
   private int reduceTotal = 0;
-  // waiting for AM container 
-  private boolean isAMContainerRunning = false;
-  private Container amContainer;
+
   // finished
   private boolean isFinished = false;
-  // resource for AM container
-  private final static int MR_AM_CONTAINER_RESOURCE_MEMORY_MB = 1024;
-  private final static int MR_AM_CONTAINER_RESOURCE_VCORES = 1;
 
   public final Logger LOG = Logger.getLogger(MRAMSimulator.class);
 
@@ -131,83 +127,34 @@ public class MRAMSimulator extends AMSimulator {
     for (ContainerSimulator cs : containerList) {
       if (cs.getType().equals("map")) {
         cs.setPriority(PRIORITY_MAP);
-        pendingMaps.add(cs);
+        allMaps.add(cs);
       } else if (cs.getType().equals("reduce")) {
         cs.setPriority(PRIORITY_REDUCE);
-        pendingReduces.add(cs);
+        allReduces.add(cs);
       }
     }
-    allMaps.addAll(pendingMaps);
-    allReduces.addAll(pendingReduces);
-    mapTotal = pendingMaps.size();
-    reduceTotal = pendingReduces.size();
+
+    LOG.info(MessageFormat
+        .format("Added new job with {0} mapper and {1} reducers",
+            allMaps.size(), allReduces.size()));
+
+    mapTotal = allMaps.size();
+    reduceTotal = allReduces.size();
     totalContainers = mapTotal + reduceTotal;
   }
 
   @Override
-  public void firstStep() throws Exception {
-    super.firstStep();
-    
-    requestAMContainer();
-  }
-
-  /**
-   * send out request for AM container
-   */
-  protected void requestAMContainer()
-          throws YarnException, IOException, InterruptedException {
-    List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
-    ResourceRequest amRequest = createResourceRequest(
-            BuilderUtils.newResource(MR_AM_CONTAINER_RESOURCE_MEMORY_MB,
-                    MR_AM_CONTAINER_RESOURCE_VCORES),
-            ResourceRequest.ANY, 1, 1);
-    ask.add(amRequest);
-    LOG.debug(MessageFormat.format("Application {0} sends out allocate " +
-            "request for its AM", appId));
-    final AllocateRequest request = this.createAllocateRequest(ask);
-
-    UserGroupInformation ugi =
-            UserGroupInformation.createRemoteUser(appAttemptId.toString());
-    Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps()
-            .get(appAttemptId.getApplicationId())
-            .getRMAppAttempt(appAttemptId).getAMRMToken();
-    ugi.addTokenIdentifier(token.decodeIdentifier());
-    AllocateResponse response = ugi.doAs(
-            new PrivilegedExceptionAction<AllocateResponse>() {
-      @Override
-      public AllocateResponse run() throws Exception {
-        return rm.getApplicationMasterService().allocate(request);
-      }
-    });
-    if (response != null) {
-      responseQueue.put(response);
+  public synchronized void notifyAMContainerLaunched(Container masterContainer)
+      throws Exception {
+    if (null != masterContainer) {
+      restart();
+      super.notifyAMContainerLaunched(masterContainer);
     }
   }
 
   @Override
   @SuppressWarnings("unchecked")
-  protected void processResponseQueue()
-          throws InterruptedException, YarnException, IOException {
-    // Check whether receive the am container
-    if (!isAMContainerRunning) {
-      if (!responseQueue.isEmpty()) {
-        AllocateResponse response = responseQueue.take();
-        if (response != null
-            && !response.getAllocatedContainers().isEmpty()) {
-          // Get AM container
-          Container container = response.getAllocatedContainers().get(0);
-          se.getNmMap().get(container.getNodeId())
-              .addNewContainer(container, -1L);
-          // Start AM container
-          amContainer = container;
-          LOG.debug(MessageFormat.format("Application {0} starts its " +
-              "AM container ({1}).", appId, amContainer.getId()));
-          isAMContainerRunning = true;
-        }
-      }
-      return;
-    }
-
+  protected void processResponseQueue() throws Exception {
     while (! responseQueue.isEmpty()) {
       AllocateResponse response = responseQueue.take();
 
@@ -228,12 +175,16 @@ public class MRAMSimulator extends AMSimulator {
               assignedReduces.remove(containerId);
               reduceFinished ++;
               finishedContainers ++;
-            } else {
+            } else if (amContainer.getId().equals(containerId)){
               // am container released event
               isFinished = true;
               LOG.info(MessageFormat.format("Application {0} goes to " +
                       "finish.", appId));
             }
+
+            if (mapFinished >= mapTotal && reduceFinished >= reduceTotal) {
+              lastStep();
+            }
           } else {
             // container to be killed
             if (assignedMaps.containsKey(containerId)) {
@@ -244,10 +195,9 @@ public class MRAMSimulator extends AMSimulator {
               LOG.debug(MessageFormat.format("Application {0} has one " +
                       "reducer killed ({1}).", appId, containerId));
               pendingFailedReduces.add(assignedReduces.remove(containerId));
-            } else {
+            } else if (amContainer.getId().equals(containerId)){
               LOG.info(MessageFormat.format("Application {0}'s AM is " +
-                      "going to be killed. Restarting...", appId));
-              restart();
+                      "going to be killed. Waiting for rescheduling...", appId));
             }
           }
         }
@@ -255,11 +205,8 @@ public class MRAMSimulator extends AMSimulator {
       
       // check finished
       if (isAMContainerRunning &&
-              (mapFinished == mapTotal) &&
-              (reduceFinished == reduceTotal)) {
-        // to release the AM container
-        se.getNmMap().get(amContainer.getNodeId())
-                .cleanupContainer(amContainer.getId());
+              (mapFinished >= mapTotal) &&
+              (reduceFinished >= reduceTotal)) {
         isAMContainerRunning = false;
         LOG.debug(MessageFormat.format("Application {0} sends out event " +
                 "to clean up its AM container.", appId));
@@ -293,21 +240,38 @@ public class MRAMSimulator extends AMSimulator {
    */
   private void restart()
           throws YarnException, IOException, InterruptedException {
-    // clear 
-    finishedContainers = 0;
+    // clear
     isFinished = false;
-    mapFinished = 0;
-    reduceFinished = 0;
     pendingFailedMaps.clear();
     pendingMaps.clear();
     pendingReduces.clear();
     pendingFailedReduces.clear();
-    pendingMaps.addAll(allMaps);
-    pendingReduces.addAll(pendingReduces);
-    isAMContainerRunning = false;
+
+    // Only add totalMaps - finishedMaps
+    int added = 0;
+    for (ContainerSimulator cs : allMaps) {
+      if (added >= mapTotal - mapFinished) {
+        break;
+      }
+      pendingMaps.add(cs);
+    }
+
+    // And same, only add totalReduces - finishedReduces
+    added = 0;
+    for (ContainerSimulator cs : allReduces) {
+      if (added >= reduceTotal - reduceFinished) {
+        break;
+      }
+      pendingReduces.add(cs);
+    }
     amContainer = null;
-    // resent am container request
-    requestAMContainer();
+  }
+
+  private List<ContainerSimulator> mergeLists(List<ContainerSimulator> left, List<ContainerSimulator> right) {
+    List<ContainerSimulator> list = new ArrayList<>();
+    list.addAll(left);
+    list.addAll(right);
+    return list;
   }
 
   @Override
@@ -319,44 +283,48 @@ public class MRAMSimulator extends AMSimulator {
 
     // send out request
     List<ResourceRequest> ask = null;
-    if (isAMContainerRunning) {
-      if (mapFinished != mapTotal) {
-        // map phase
-        if (! pendingMaps.isEmpty()) {
-          ask = packageRequests(pendingMaps, PRIORITY_MAP);
-          LOG.debug(MessageFormat.format("Application {0} sends out " +
-                  "request for {1} mappers.", appId, pendingMaps.size()));
-          scheduledMaps.addAll(pendingMaps);
-          pendingMaps.clear();
-        } else if (! pendingFailedMaps.isEmpty() && scheduledMaps.isEmpty()) {
-          ask = packageRequests(pendingFailedMaps, PRIORITY_MAP);
-          LOG.debug(MessageFormat.format("Application {0} sends out " +
-                  "requests for {1} failed mappers.", appId,
-                  pendingFailedMaps.size()));
-          scheduledMaps.addAll(pendingFailedMaps);
-          pendingFailedMaps.clear();
-        }
-      } else if (reduceFinished != reduceTotal) {
-        // reduce phase
-        if (! pendingReduces.isEmpty()) {
-          ask = packageRequests(pendingReduces, PRIORITY_REDUCE);
-          LOG.debug(MessageFormat.format("Application {0} sends out " +
-                  "requests for {1} reducers.", appId, pendingReduces.size()));
-          scheduledReduces.addAll(pendingReduces);
-          pendingReduces.clear();
-        } else if (! pendingFailedReduces.isEmpty()
-                && scheduledReduces.isEmpty()) {
-          ask = packageRequests(pendingFailedReduces, PRIORITY_REDUCE);
-          LOG.debug(MessageFormat.format("Application {0} sends out " +
-                  "request for {1} failed reducers.", appId,
-                  pendingFailedReduces.size()));
-          scheduledReduces.addAll(pendingFailedReduces);
-          pendingFailedReduces.clear();
-        }
+    if (mapFinished != mapTotal) {
+      // map phase
+      if (!pendingMaps.isEmpty()) {
+        ask = packageRequests(mergeLists(pendingMaps, scheduledMaps),
+            PRIORITY_MAP);
+        LOG.debug(MessageFormat
+            .format("Application {0} sends out " + "request for {1} mappers.",
+                appId, pendingMaps.size()));
+        scheduledMaps.addAll(pendingMaps);
+        pendingMaps.clear();
+      } else if (!pendingFailedMaps.isEmpty()) {
+        ask = packageRequests(mergeLists(pendingFailedMaps, scheduledMaps),
+            PRIORITY_MAP);
+        LOG.debug(MessageFormat.format(
+            "Application {0} sends out " + "requests for {1} failed mappers.",
+            appId, pendingFailedMaps.size()));
+        scheduledMaps.addAll(pendingFailedMaps);
+        pendingFailedMaps.clear();
+      }
+    } else if (reduceFinished != reduceTotal) {
+      // reduce phase
+      if (!pendingReduces.isEmpty()) {
+        ask = packageRequests(mergeLists(pendingReduces, scheduledReduces),
+            PRIORITY_REDUCE);
+        LOG.debug(MessageFormat
+            .format("Application {0} sends out " + "requests for {1} reducers.",
+                appId, pendingReduces.size()));
+        scheduledReduces.addAll(pendingReduces);
+        pendingReduces.clear();
+      } else if (!pendingFailedReduces.isEmpty()) {
+        ask = packageRequests(mergeLists(pendingFailedReduces, scheduledReduces),
+            PRIORITY_REDUCE);
+        LOG.debug(MessageFormat.format(
+            "Application {0} sends out " + "request for {1} failed reducers.",
+            appId, pendingFailedReduces.size()));
+        scheduledReduces.addAll(pendingFailedReduces);
+        pendingFailedReduces.clear();
       }
     }
+
     if (ask == null) {
-      ask = new ArrayList<ResourceRequest>();
+      ask = new ArrayList<>();
     }
     
     final AllocateRequest request = createAllocateRequest(ask);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java
new file mode 100644
index 0000000..20cf3e5
--- /dev/null
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/resourcemanager/MockAMLauncher.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.sls.resourcemanager;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl;
+import org.apache.hadoop.yarn.sls.SLSRunner;
+import org.apache.hadoop.yarn.sls.appmaster.AMSimulator;
+
+import java.util.Map;
+
+public class MockAMLauncher extends ApplicationMasterLauncher
+    implements EventHandler<AMLauncherEvent> {
+  private static final Log LOG = LogFactory.getLog(
+      MockAMLauncher.class);
+
+  Map<String, AMSimulator> amMap;
+  SLSRunner se;
+
+  public MockAMLauncher(SLSRunner se, RMContext rmContext,
+      Map<String, AMSimulator> amMap) {
+    super(rmContext);
+    this.amMap = amMap;
+    this.se = se;
+  }
+
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    // Do nothing
+  }
+
+  @Override
+  protected void serviceStart() throws Exception {
+    // Do nothing
+  }
+
+  private void setupAMRMToken(RMAppAttempt appAttempt) {
+    // Setup AMRMToken
+    Token<AMRMTokenIdentifier> amrmToken =
+        super.context.getAMRMTokenSecretManager().createAndGetAMRMToken(
+            appAttempt.getAppAttemptId());
+    ((RMAppAttemptImpl) appAttempt).setAMRMToken(amrmToken);
+  }
+
+  @Override
+  @SuppressWarnings("unchecked")
+  public void handle(AMLauncherEvent event) {
+    if (AMLauncherEventType.LAUNCH == event.getType()) {
+      ApplicationId appId =
+          event.getAppAttempt().getAppAttemptId().getApplicationId();
+
+      // find AMSimulator
+      for (AMSimulator ams : amMap.values()) {
+        if (ams.getApplicationId() != null && ams.getApplicationId().equals(
+            appId)) {
+          try {
+            Container amContainer = event.getAppAttempt().getMasterContainer();
+
+            setupAMRMToken(event.getAppAttempt());
+
+            // Notify RMAppAttempt to change state
+            super.context.getDispatcher().getEventHandler().handle(
+                new RMAppAttemptEvent(event.getAppAttempt().getAppAttemptId(),
+                    RMAppAttemptEventType.LAUNCHED));
+
+            ams.notifyAMContainerLaunched(
+                event.getAppAttempt().getMasterContainer());
+            LOG.info("Notify AM launcher launched:" + amContainer.getId());
+
+            se.getNmMap().get(amContainer.getNodeId())
+                .addNewContainer(amContainer, 100000000L);
+
+            return;
+          } catch (Exception e) {
+            throw new YarnRuntimeException(e);
+          }
+        }
+      }
+
+      throw new YarnRuntimeException(
+          "Didn't find any AMSimulator for applicationId=" + appId);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b32ffa27/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
index 8388273..cd4377e 100644
--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
+++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
@@ -556,6 +556,30 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
         }
       }
     );
+    metrics.register("variable.cluster.reserved.memory",
+        new Gauge<Long>() {
+          @Override
+          public Long getValue() {
+            if(getRootQueueMetrics() == null) {
+              return 0L;
+            } else {
+              return getRootQueueMetrics().getReservedMB();
+            }
+          }
+        }
+    );
+    metrics.register("variable.cluster.reserved.vcores",
+        new Gauge<Integer>() {
+          @Override
+          public Integer getValue() {
+            if(getRootQueueMetrics() == null) {
+              return 0;
+            } else {
+              return getRootQueueMetrics().getReservedVirtualCores();
+            }
+          }
+        }
+    );
   }
 
   private void registerContainerAppNumMetrics() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[19/31] hadoop git commit: HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover on certain exception. Contributed by Jian He

Posted by st...@apache.org.
HADOOP-14116:FailoverOnNetworkExceptionRetry does not wait when failover
on certain exception. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/289bc50e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/289bc50e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/289bc50e

Branch: refs/heads/HADOOP-13345
Commit: 289bc50e663b882956878eeaefe0eaa1ef4ed39e
Parents: 53d372a
Author: Xuan <xg...@apache.org>
Authored: Fri Feb 24 11:42:23 2017 -0800
Committer: Xuan <xg...@apache.org>
Committed: Fri Feb 24 11:42:23 2017 -0800

----------------------------------------------------------------------
 .../src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/289bc50e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 0c523a5..d6f3e04 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -683,7 +683,8 @@ public class RetryPolicies {
       } else if (e instanceof SocketException
           || (e instanceof IOException && !(e instanceof RemoteException))) {
         if (isIdempotentOrAtMostOnce) {
-          return RetryAction.FAILOVER_AND_RETRY;
+          return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+              getFailoverOrRetrySleepTime(retries));
         } else {
           return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
               "the invoked method is not idempotent, and unable to determine "


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[29/31] hadoop git commit: MAPREDUCE-6841. Fix dead link in MapReduce tutorial document. Contributed by Victor Nee.

Posted by st...@apache.org.
MAPREDUCE-6841. Fix dead link in MapReduce tutorial document. Contributed by Victor Nee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d336838
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d336838
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d336838

Branch: refs/heads/HADOOP-13345
Commit: 4d33683882cc41939c512db221fd8ee59ecd52a8
Parents: 9db2e0c
Author: Akira Ajisaka <aa...@apache.org>
Authored: Mon Feb 27 19:45:26 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Mon Feb 27 19:46:37 2017 +0900

----------------------------------------------------------------------
 .../src/site/markdown/MapReduceTutorial.md                         | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d336838/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
index 6747adc..574c404 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/MapReduceTutorial.md
@@ -417,7 +417,7 @@ indicates the set of input files
 [FileInputFormat.addInputPath(Job, Path)](../../api/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.html)) and
 ([FileInputFormat.setInputPaths(Job, String...)](../../api/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.html)/
 [FileInputFormat.addInputPaths(Job, String))](../../api/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.html) and where the output files should be written
-([FileOutputFormat.setOutputPath(Path)](../../api/org/apache/hadoop/mapreduce/lib/input/FileOutputFormat.html)).
+([FileOutputFormat.setOutputPath(Path)](../../api/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.html)).
 
 Optionally, `Job` is used to specify other advanced facets of the job such as the `Comparator` to be used, files to be put in the `DistributedCache`, whether intermediate and/or job outputs are to be compressed (and how), whether job tasks can be executed in a *speculative* manner
 ([setMapSpeculativeExecution(boolean)](../../api/org/apache/hadoop/mapreduce/Job.html))/


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[25/31] hadoop git commit: HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot details. Contributed by Akira Ajisaka.

Posted by st...@apache.org.
HDFS-10506. OIV's ReverseXML processor cannot reconstruct some snapshot details. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05391c18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05391c18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05391c18

Branch: refs/heads/HADOOP-13345
Commit: 05391c1845639d4f01da8e5df966e2dc2682f2ca
Parents: dab00da
Author: Wei-Chiu Chuang <we...@apache.org>
Authored: Sat Feb 25 14:38:23 2017 -0800
Committer: Wei-Chiu Chuang <we...@apache.org>
Committed: Sat Feb 25 14:38:50 2017 -0800

----------------------------------------------------------------------
 .../OfflineImageReconstructor.java              | 85 ++++++++++++++------
 .../offlineImageViewer/PBImageXmlWriter.java    | 30 ++++++-
 .../TestOfflineImageViewer.java                 | 15 +++-
 3 files changed, 101 insertions(+), 29 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05391c18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
index a2fa315..ed348d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
@@ -567,6 +567,13 @@ class OfflineImageReconstructor {
   private void processFileXml(Node node, INodeSection.INode.Builder inodeBld)
       throws IOException {
     inodeBld.setType(INodeSection.INode.Type.FILE);
+    INodeSection.INodeFile.Builder bld = createINodeFileBuilder(node);
+    inodeBld.setFile(bld);
+    // Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeFile.Builder createINodeFileBuilder(Node node)
+      throws IOException {
     INodeSection.INodeFile.Builder bld = INodeSection.INodeFile.newBuilder();
     Integer ival = node.removeChildInt(SECTION_REPLICATION);
     if (ival != null) {
@@ -595,24 +602,7 @@ class OfflineImageReconstructor {
         if (block == null) {
           break;
         }
-        HdfsProtos.BlockProto.Builder blockBld =
-            HdfsProtos.BlockProto.newBuilder();
-        Long id = block.removeChildLong(SECTION_ID);
-        if (id == null) {
-          throw new IOException("<block> found without <id>");
-        }
-        blockBld.setBlockId(id);
-        Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
-        if (genstamp == null) {
-          throw new IOException("<block> found without <genstamp>");
-        }
-        blockBld.setGenStamp(genstamp);
-        Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
-        if (numBytes == null) {
-          throw new IOException("<block> found without <numBytes>");
-        }
-        blockBld.setNumBytes(numBytes);
-        bld.addBlocks(blockBld);
+        bld.addBlocks(createBlockBuilder(block));
       }
     }
     Node fileUnderConstruction =
@@ -663,14 +653,43 @@ class OfflineImageReconstructor {
             blockType);
       }
     }
-    inodeBld.setFile(bld);
-    // Will check remaining keys and serialize in processINodeXml
+    return bld;
+  }
+
+  private HdfsProtos.BlockProto.Builder createBlockBuilder(Node block)
+      throws IOException {
+    HdfsProtos.BlockProto.Builder blockBld =
+        HdfsProtos.BlockProto.newBuilder();
+    Long id = block.removeChildLong(SECTION_ID);
+    if (id == null) {
+      throw new IOException("<block> found without <id>");
+    }
+    blockBld.setBlockId(id);
+    Long genstamp = block.removeChildLong(INODE_SECTION_GEMSTAMP);
+    if (genstamp == null) {
+      throw new IOException("<block> found without <genstamp>");
+    }
+    blockBld.setGenStamp(genstamp);
+    Long numBytes = block.removeChildLong(INODE_SECTION_NUM_BYTES);
+    if (numBytes == null) {
+      throw new IOException("<block> found without <numBytes>");
+    }
+    blockBld.setNumBytes(numBytes);
+    return blockBld;
   }
 
   private void processDirectoryXml(Node node,
           INodeSection.INode.Builder inodeBld) throws IOException {
     inodeBld.setType(INodeSection.INode.Type.DIRECTORY);
     INodeSection.INodeDirectory.Builder bld =
+        createINodeDirectoryBuilder(node);
+    inodeBld.setDirectory(bld);
+    // Will check remaining keys and serialize in processINodeXml
+  }
+
+  private INodeSection.INodeDirectory.Builder
+      createINodeDirectoryBuilder(Node node) throws IOException {
+    INodeSection.INodeDirectory.Builder bld =
         INodeSection.INodeDirectory.newBuilder();
     Long lval = node.removeChildLong(INODE_SECTION_MTIME);
     if (lval != null) {
@@ -723,8 +742,7 @@ class OfflineImageReconstructor {
       qf.addQuotas(qbld);
     }
     bld.setTypeQuotas(qf);
-    inodeBld.setDirectory(bld);
-    // Will check remaining keys and serialize in processINodeXml
+    return bld;
   }
 
   private void processSymlinkXml(Node node,
@@ -1368,7 +1386,11 @@ class OfflineImageReconstructor {
         if (name != null) {
           bld.setName(ByteString.copyFrom(name, "UTF8"));
         }
-        // TODO: add missing snapshotCopy field to XML
+        Node snapshotCopy = dirDiff.removeChild(
+            SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY);
+        if (snapshotCopy != null) {
+          bld.setSnapshotCopy(createINodeDirectoryBuilder(snapshotCopy));
+        }
         Integer expectedCreatedListSize = dirDiff.removeChildInt(
             SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE);
         if (expectedCreatedListSize == null) {
@@ -1467,8 +1489,21 @@ class OfflineImageReconstructor {
         if (name != null) {
           bld.setName(ByteString.copyFrom(name, "UTF8"));
         }
-        // TODO: missing snapshotCopy
-        // TODO: missing blocks
+        Node snapshotCopy = fileDiff.removeChild(
+            SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY);
+        if (snapshotCopy != null) {
+          bld.setSnapshotCopy(createINodeFileBuilder(snapshotCopy));
+        }
+        Node blocks = fileDiff.removeChild(INODE_SECTION_BLOCKS);
+        if (blocks != null) {
+          while (true) {
+            Node block = blocks.removeChild(INODE_SECTION_BLOCK);
+            if (block == null) {
+              break;
+            }
+            bld.addBlocks(createBlockBuilder(block));
+          }
+        }
         fileDiff.verifyNoRemainingKeys("fileDiff");
         bld.build().writeDelimitedTo(out);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05391c18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 8df55bd..7f0bf38 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -190,6 +190,8 @@ public final class PBImageXmlWriter {
       "childrenSize";
   public static final String SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT =
       "isSnapshotRoot";
+  public static final String SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY =
+      "snapshotCopy";
   public static final String SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE =
       "createdListSize";
   public static final String SNAPSHOT_DIFF_SECTION_DELETED_INODE =
@@ -667,6 +669,23 @@ public final class PBImageXmlWriter {
           o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, f.getSnapshotId())
               .o(SNAPSHOT_DIFF_SECTION_SIZE, f.getFileSize())
               .o(SECTION_NAME, f.getName().toStringUtf8());
+          INodeSection.INodeFile snapshotCopy = f.getSnapshotCopy();
+          if (snapshotCopy != null) {
+            out.print("<" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">");
+            dumpINodeFile(snapshotCopy);
+            out.print("</" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">\n");
+          }
+          if (f.getBlocksCount() > 0) {
+            out.print("<" + INODE_SECTION_BLOCKS + ">");
+            for (BlockProto b : f.getBlocksList()) {
+              out.print("<" + INODE_SECTION_BLOCK + ">");
+              o(SECTION_ID, b.getBlockId())
+                  .o(INODE_SECTION_GEMSTAMP, b.getGenStamp())
+                  .o(INODE_SECTION_NUM_BYTES, b.getNumBytes());
+              out.print("</" + INODE_SECTION_BLOCK + ">\n");
+            }
+            out.print("</" + INODE_SECTION_BLOCKS + ">\n");
+          }
           out.print("</" + SNAPSHOT_DIFF_SECTION_FILE_DIFF + ">\n");
         }
       }
@@ -679,9 +698,14 @@ public final class PBImageXmlWriter {
           o(SNAPSHOT_DIFF_SECTION_SNAPSHOT_ID, d.getSnapshotId())
               .o(SNAPSHOT_DIFF_SECTION_CHILDREN_SIZE, d.getChildrenSize())
               .o(SNAPSHOT_DIFF_SECTION_IS_SNAPSHOT_ROOT, d.getIsSnapshotRoot())
-              .o(SECTION_NAME, d.getName().toStringUtf8())
-              .o(SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE,
-                  d.getCreatedListSize());
+              .o(SECTION_NAME, d.getName().toStringUtf8());
+          INodeDirectory snapshotCopy = d.getSnapshotCopy();
+          if (snapshotCopy != null) {
+            out.print("<" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">");
+            dumpINodeDirectory(snapshotCopy);
+            out.print("</" + SNAPSHOT_DIFF_SECTION_SNAPSHOT_COPY + ">\n");
+          }
+          o(SNAPSHOT_DIFF_SECTION_CREATED_LIST_SIZE, d.getCreatedListSize());
           for (long did : d.getDeletedINodeList()) {
             o(SNAPSHOT_DIFF_SECTION_DELETED_INODE, did);
           }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05391c18/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
index dacbb85..f77911d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
@@ -181,14 +181,27 @@ public class TestOfflineImageViewer {
       hdfs.mkdirs(src);
       dirCount++;
       writtenFiles.put(src.toString(), hdfs.getFileStatus(src));
+
+      // Create snapshot and snapshotDiff.
       final Path orig = new Path("/src/orig");
       hdfs.mkdirs(orig);
+      final Path file1 = new Path("/src/file");
+      FSDataOutputStream o = hdfs.create(file1);
+      o.write(23);
+      o.write(45);
+      o.close();
       hdfs.allowSnapshot(src);
       hdfs.createSnapshot(src, "snapshot");
       final Path dst = new Path("/dst");
+      // Rename a directory in the snapshot directory to add snapshotCopy
+      // field to the dirDiff entry.
       hdfs.rename(orig, dst);
       dirCount++;
       writtenFiles.put(dst.toString(), hdfs.getFileStatus(dst));
+      // Truncate a file in the snapshot directory to add snapshotCopy and
+      // blocks fields to the fileDiff entry.
+      hdfs.truncate(file1, 1);
+      writtenFiles.put(file1.toString(), hdfs.getFileStatus(file1));
 
       // Set XAttrs so the fsimage contains XAttr ops
       final Path xattr = new Path("/xattr");
@@ -279,7 +292,7 @@ public class TestOfflineImageViewer {
     Matcher matcher = p.matcher(outputString);
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalFiles = Integer.parseInt(matcher.group(1));
-    assertEquals(NUM_DIRS * FILES_PER_DIR, totalFiles);
+    assertEquals(NUM_DIRS * FILES_PER_DIR + 1, totalFiles);
 
     p = Pattern.compile("totalDirectories = (\\d+)\n");
     matcher = p.matcher(outputString);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[21/31] hadoop git commit: HDFS-11421. Make WebHDFS' ACLs RegEx configurable. Contributed by Harsh J.

Posted by st...@apache.org.
HDFS-11421. Make WebHDFS' ACLs RegEx configurable. Contributed by Harsh J.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e24ed47d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e24ed47d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e24ed47d

Branch: refs/heads/HADOOP-13345
Commit: e24ed47d9a19f34a4dd8d4bad9b5c78ca3dd1c2e
Parents: d2b3ba9
Author: Xiao Chen <xi...@apache.org>
Authored: Fri Feb 24 16:49:16 2017 -0800
Committer: Xiao Chen <xi...@apache.org>
Committed: Fri Feb 24 16:49:46 2017 -0800

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |  2 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  6 +++-
 .../hdfs/web/resources/AclPermissionParam.java  | 17 +++++++++-
 .../datanode/web/webhdfs/WebHdfsHandler.java    | 10 ++++--
 .../server/namenode/NameNodeHttpServer.java     |  4 +++
 .../src/main/resources/hdfs-default.xml         |  8 +++++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 29 ++++++++++++++++-
 .../hadoop/hdfs/web/resources/TestParam.java    | 34 ++++++++++++++++++++
 8 files changed, 104 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 7ad79e0..6f8c661 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -35,6 +35,8 @@ public interface HdfsClientConfigKeys {
   String  DFS_WEBHDFS_USER_PATTERN_KEY =
       "dfs.webhdfs.user.provider.user.pattern";
   String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
+  String  DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY =
+      "dfs.webhdfs.acl.provider.permission.pattern";
   String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 135eef7..a9bc795 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -183,10 +183,14 @@ public class WebHdfsFileSystem extends FileSystem
   ) throws IOException {
     super.initialize(uri, conf);
     setConf(conf);
-    /** set user pattern based on configuration file */
+
+    // set user and acl patterns based on configuration file
     UserParam.setUserPattern(conf.get(
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+    AclPermissionParam.setAclPermissionPattern(conf.get(
+        HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
+        HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
 
     boolean isOAuth = conf.getBoolean(
         HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
index 130c8fd..9ab3ad5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
@@ -24,6 +24,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.regex.Pattern;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.permission.AclEntry;
 
 /** AclPermission parameter. */
@@ -33,7 +34,7 @@ public class AclPermissionParam extends StringParam {
   /** Default parameter value. */
   public static final String DEFAULT = "";
 
-  private static final Domain DOMAIN = new Domain(NAME,
+  private static Domain DOMAIN = new Domain(NAME,
       Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
 
   /**
@@ -49,6 +50,20 @@ public class AclPermissionParam extends StringParam {
     super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl));
   }
 
+  @VisibleForTesting
+  public static Domain getAclPermissionPattern() {
+    return DOMAIN;
+  }
+
+  @VisibleForTesting
+  public static void setAclPermissionPattern(Domain dm) {
+    DOMAIN = dm;
+  }
+
+  public static void setAclPermissionPattern(String pattern) {
+    DOMAIN = new Domain(NAME, Pattern.compile(pattern));
+  }
+
   @Override
   public String getName() {
     return NAME;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
index f8c15fc..d2b2ec2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java
@@ -48,11 +48,12 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.permission.FsCreateModes;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.web.JsonUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
 import org.apache.hadoop.hdfs.web.resources.GetOpParam;
 import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
@@ -112,8 +113,11 @@ public class WebHdfsHandler extends SimpleChannelInboundHandler<HttpRequest> {
     this.confForCreate = confForCreate;
     /** set user pattern based on configuration file */
     UserParam.setUserPattern(
-            conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
-                    DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+        conf.get(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+            HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+    AclPermissionParam.setAclPermissionPattern(
+        conf.get(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
+            HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index a1959e4..e7e5f51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.hdfs.web.resources.AclPermissionParam;
 import org.apache.hadoop.hdfs.web.resources.Param;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.http.HttpConfig;
@@ -80,6 +81,9 @@ public class NameNodeHttpServer {
     UserParam.setUserPattern(conf.get(
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
         HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+    AclPermissionParam.setAclPermissionPattern(conf.get(
+        HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
+        HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
 
     // add authentication filter for webhdfs
     final String className = conf.get(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 652b216..d23b967 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2478,6 +2478,14 @@
 </property>
 
 <property>
+  <name>dfs.webhdfs.acl.provider.permission.pattern</name>
+  <value>^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$</value>
+  <description>
+    Valid pattern for user and group names in webhdfs acl operations, it must be a valid java regex.
+  </description>
+</property>
+
+<property>
   <name>dfs.webhdfs.socket.connect-timeout</name>
   <value>60s</value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index 1ff04de..7cfaa99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -49,6 +49,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.MediaType;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -64,6 +65,9 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -379,10 +383,17 @@ public class TestWebHDFS {
   }
 
   @Test(timeout=300000)
-  public void testNumericalUserName() throws Exception {
+  public void testCustomizedUserAndGroupNames() throws Exception {
     final Configuration conf = WebHdfsTestUtil.createConf();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    // Modify username pattern to allow numeric usernames
     conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9" +
         "._-]*[$]?$");
+    // Modify acl pattern to allow numeric and "@" characters user/groups in ACL spec
+    conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY,
+        "^(default:)?(user|group|mask|other):" +
+            "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?" +
+            "(user|group|mask|other):[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?)*$");
     final MiniDFSCluster cluster =
         new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     try {
@@ -391,6 +402,7 @@ public class TestWebHDFS {
           .setPermission(new Path("/"),
               new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
 
+      // Test a numeric username
       UserGroupInformation.createUserForTesting("123", new String[]{"my-group"})
         .doAs(new PrivilegedExceptionAction<Void>() {
           @Override
@@ -399,6 +411,21 @@ public class TestWebHDFS {
                 WebHdfsConstants.WEBHDFS_SCHEME);
             Path d = new Path("/my-dir");
             Assert.assertTrue(fs.mkdirs(d));
+            // Test also specifying a default ACL with a numeric username
+            // and another of a groupname with '@'
+            fs.modifyAclEntries(d, ImmutableList.of(
+                new AclEntry.Builder()
+                    .setPermission(FsAction.READ)
+                    .setScope(AclEntryScope.DEFAULT)
+                    .setType(AclEntryType.USER)
+                    .setName("11010")
+                    .build(),
+                new AclEntry.Builder()
+                    .setPermission(FsAction.READ_WRITE)
+                    .setType(AclEntryType.GROUP)
+                    .setName("foo@bar")
+                    .build()
+            ));
             return null;
           }
         });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e24ed47d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
index 6449bf7..d444cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
@@ -351,6 +351,40 @@ public class TestParam {
       LOG.info("EXPECTED: " + e);
     }
   }
+
+  @Test
+  public void testUserGroupOkAfterAlteringAclPattern() {
+    // Preserve default pattern value
+    AclPermissionParam.Domain oldDomain =
+        AclPermissionParam.getAclPermissionPattern();
+
+    // Override the pattern with one that accepts '@' and numbers
+    // in the first character of usernames/groupnames
+    String newPattern =
+        "^(default:)?(user|group|mask|other):" +
+            "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?" +
+            "(,(default:)?(user|group|mask|other):" +
+            "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
+
+    try {
+      AclPermissionParam.setAclPermissionPattern(newPattern);
+
+      String numericUserSpec = "user:110201:rwx";
+      AclPermissionParam aclNumericUserParam =
+          new AclPermissionParam(numericUserSpec);
+      Assert.assertEquals(numericUserSpec, aclNumericUserParam.getValue());
+
+      String oddGroupSpec = "group:foo@bar:rwx";
+      AclPermissionParam aclGroupWithDomainParam =
+          new AclPermissionParam(oddGroupSpec);
+      Assert.assertEquals(oddGroupSpec, aclGroupWithDomainParam.getValue());
+
+    } finally {
+      // Revert back to the default rules for remainder of tests
+      AclPermissionParam.setAclPermissionPattern(oldDomain);
+    }
+
+  }
  
   @Test
   public void testXAttrNameParam() {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[17/31] hadoop git commit: HDFS-11427. Rename rs-default to rs.

Posted by st...@apache.org.
HDFS-11427. Rename rs-default to rs.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1a52b04
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1a52b04
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1a52b04

Branch: refs/heads/HADOOP-13345
Commit: c1a52b04d0cc5ad5c86ae93043655f313386f7f9
Parents: b32ffa2
Author: Andrew Wang <wa...@apache.org>
Authored: Fri Feb 24 10:58:45 2017 -0800
Committer: Andrew Wang <wa...@apache.org>
Committed: Fri Feb 24 10:58:45 2017 -0800

----------------------------------------------------------------------
 .../apache/hadoop/io/erasurecode/CodecUtil.java | 28 +++++++--------
 .../io/erasurecode/ErasureCodeConstants.java    |  8 ++---
 .../erasurecode/coder/HHXORErasureDecoder.java  |  2 +-
 .../erasurecode/coder/HHXORErasureEncoder.java  |  2 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  |  2 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  2 +-
 .../src/main/resources/core-default.xml         |  4 +--
 .../erasurecode/TestCodecRawCoderMapping.java   | 10 +++---
 .../coder/TestHHXORErasureCoder.java            |  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   |  2 +-
 .../src/site/markdown/HDFSErasureCoding.md      |  6 ++--
 .../TestDFSRSDefault10x4StripedInputStream.java |  2 +-
 ...TestDFSRSDefault10x4StripedOutputStream.java |  2 +-
 ...fault10x4StripedOutputStreamWithFailure.java |  4 +--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../TestDFSStripedOutputStreamWithFailure.java  |  2 +-
 .../hadoop/hdfs/TestReconstructStripedFile.java |  2 +-
 .../TestUnsetAndChangeDirectoryEcPolicy.java    |  2 +-
 .../test/resources/testErasureCodingConf.xml    | 36 ++++++++++----------
 20 files changed, 61 insertions(+), 61 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
index 977bacb..861451a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java
@@ -55,9 +55,9 @@ public final class CodecUtil {
   public static final String IO_ERASURECODE_CODEC_XOR =
       XORErasureCodec.class.getCanonicalName();
   /** Erasure coder Reed-Solomon codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_KEY =
+  public static final String IO_ERASURECODE_CODEC_RS_KEY =
       "io.erasurecode.codec.rs";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT =
+  public static final String IO_ERASURECODE_CODEC_RS =
       RSErasureCodec.class.getCanonicalName();
   /** Erasure coder hitch hiker XOR codec. */
   public static final String IO_ERASURECODE_CODEC_HHXOR_KEY =
@@ -67,10 +67,10 @@ public final class CodecUtil {
 
   /** Supported erasure codec classes. */
 
-  /** Raw coder factory for the RS default codec. */
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY =
-      "io.erasurecode.codec.rs-default.rawcoder";
-  public static final String IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT =
+  /** Raw coder factory for the RS codec. */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+      "io.erasurecode.codec.rs.rawcoder";
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT =
       RSRawErasureCoderFactory.class.getCanonicalName();
 
   /** Raw coder factory for the RS legacy codec. */
@@ -183,10 +183,10 @@ public final class CodecUtil {
   private static String getRawCoderFactNameFromCodec(Configuration conf,
                                                      String codec) {
     switch (codec) {
-    case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+    case ErasureCodeConstants.RS_CODEC_NAME:
       return conf.get(
-          IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
-          IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_DEFAULT);
+          IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
+          IO_ERASURECODE_CODEC_RS_RAWCODER_DEFAULT);
     case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
       return conf.get(
           IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY,
@@ -233,15 +233,15 @@ public final class CodecUtil {
 
   private static String getCodecClassName(Configuration conf, String codec) {
     switch (codec) {
-    case ErasureCodeConstants.RS_DEFAULT_CODEC_NAME:
+    case ErasureCodeConstants.RS_CODEC_NAME:
       return conf.get(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
+          CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS);
     case ErasureCodeConstants.RS_LEGACY_CODEC_NAME:
       //TODO:rs-legacy should be handled differently.
       return conf.get(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_KEY,
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT);
+          CodecUtil.IO_ERASURECODE_CODEC_RS_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS);
     case ErasureCodeConstants.XOR_CODEC_NAME:
       return conf.get(
           CodecUtil.IO_ERASURECODE_CODEC_XOR_KEY,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
index e168909..c830bb2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
@@ -25,16 +25,16 @@ public final class ErasureCodeConstants {
   private ErasureCodeConstants() {
   }
 
-  public static final String RS_DEFAULT_CODEC_NAME = "rs-default";
+  public static final String RS_CODEC_NAME = "rs";
   public static final String RS_LEGACY_CODEC_NAME = "rs-legacy";
   public static final String XOR_CODEC_NAME = "xor";
   public static final String HHXOR_CODEC_NAME = "hhxor";
 
   public static final ECSchema RS_6_3_SCHEMA = new ECSchema(
-      RS_DEFAULT_CODEC_NAME, 6, 3);
+      RS_CODEC_NAME, 6, 3);
 
   public static final ECSchema RS_3_2_SCHEMA = new ECSchema(
-      RS_DEFAULT_CODEC_NAME, 3, 2);
+      RS_CODEC_NAME, 3, 2);
 
   public static final ECSchema RS_6_3_LEGACY_SCHEMA = new ECSchema(
       RS_LEGACY_CODEC_NAME, 6, 3);
@@ -43,5 +43,5 @@ public final class ErasureCodeConstants {
       XOR_CODEC_NAME, 2, 1);
 
   public static final ECSchema RS_10_4_SCHEMA = new ECSchema(
-      RS_DEFAULT_CODEC_NAME, 10, 4);
+      RS_CODEC_NAME, 10, 4);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
index 05e9384..4747e49 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureDecoder.java
@@ -67,7 +67,7 @@ public class HHXORErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder checkCreateRSRawDecoder() {
     if (rsRawDecoder == null) {
       rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-              ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+              ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rsRawDecoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java
index 7a15a05..6b858b6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/HHXORErasureEncoder.java
@@ -61,7 +61,7 @@ public class HHXORErasureEncoder extends ErasureEncoder {
   private RawErasureEncoder checkCreateRSRawEncoder() {
     if (rsRawEncoder == null) {
       rsRawEncoder = CodecUtil.createRawEncoder(getConf(),
-          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+          ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rsRawEncoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index 6e679c3..ba7f773 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -52,7 +52,7 @@ public class RSErasureDecoder extends ErasureDecoder {
   private RawErasureDecoder checkCreateRSRawDecoder() {
     if (rsRawDecoder == null) {
       rsRawDecoder = CodecUtil.createRawDecoder(getConf(),
-          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+          ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rsRawDecoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
index 7a09b92..193309d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
@@ -53,7 +53,7 @@ public class RSErasureEncoder extends ErasureEncoder {
     if (rawEncoder == null) {
       // TODO: we should create the raw coder according to codec.
       rawEncoder = CodecUtil.createRawEncoder(getConf(),
-          ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, getOptions());
+          ErasureCodeConstants.RS_CODEC_NAME, getOptions());
     }
     return rawEncoder;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index d8136ee..08ca05b 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -657,10 +657,10 @@
 </property>
 
 <property>
-  <name>io.erasurecode.codec.rs-default.rawcoder</name>
+  <name>io.erasurecode.codec.rs.rawcoder</name>
   <value>org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory</value>
   <description>
-    Raw coder implementation for the rs-default codec. The default value is a
+    Raw coder implementation for the rs codec. The default value is a
     pure Java implementation. There is also a native implementation. Its value
     is org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory.
   </description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
index 0db001a..7f7fcf3 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
@@ -47,12 +47,12 @@ public class TestCodecRawCoderMapping {
   public void testRSDefaultRawCoder() {
     ErasureCoderOptions coderOptions = new ErasureCoderOptions(
         numDataUnit, numParityUnit);
-    // should return default raw coder of rs-default codec
+    // should return default raw coder of rs codec
     RawErasureEncoder encoder = CodecUtil.createRawEncoder(
-        conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+        conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
     Assert.assertTrue(encoder instanceof RSRawEncoder);
     RawErasureDecoder decoder = CodecUtil.createRawDecoder(
-        conf, ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+        conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
     Assert.assertTrue(decoder instanceof RSRawDecoder);
 
     // should return default raw coder of rs-legacy codec
@@ -71,11 +71,11 @@ public class TestCodecRawCoderMapping {
 
     String dummyFactName = "DummyNoneExistingFactory";
     // set the dummy factory to rs-legacy and create a raw coder
-    // with rs-default, which is OK as the raw coder key is not used
+    // with rs, which is OK as the raw coder key is not used
     conf.set(CodecUtil.
         IO_ERASURECODE_CODEC_RS_LEGACY_RAWCODER_KEY, dummyFactName);
     RawErasureEncoder encoder = CodecUtil.createRawEncoder(conf,
-        ErasureCodeConstants.RS_DEFAULT_CODEC_NAME, coderOptions);
+        ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
     Assert.assertTrue(encoder instanceof RSRawEncoder);
     // now create the raw coder with rs-legacy, which should throw exception
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestHHXORErasureCoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestHHXORErasureCoder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestHHXORErasureCoder.java
index 1eca1e1..91c4ba8 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestHHXORErasureCoder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestHHXORErasureCoder.java
@@ -50,7 +50,7 @@ public class TestHHXORErasureCoder extends TestHHErasureCoderBase {
      * This tests if the configuration items work or not.
      */
     Configuration conf = new Configuration();
-    conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+    conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
         RSRawErasureCoderFactory.class.getCanonicalName());
     prepare(conf, 10, 4, new int[]{0}, new int[0]);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java
index 5f36c99..3b18347 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestRSErasureCoder.java
@@ -57,7 +57,7 @@ public class TestRSErasureCoder extends TestErasureCoderBase {
      * This tests if the configuration items work or not.
      */
     Configuration conf = new Configuration();
-    conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+    conf.set(CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
         RSRawErasureCoderFactory.class.getCanonicalName());
     prepare(conf, 10, 4, new int[]{0}, new int[0]);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
index 0283e2b..36fb61d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
@@ -64,7 +64,7 @@ Architecture
 
       2. _The size of a striping cell._ This determines the granularity of striped reads and writes, including buffer sizes and encoding work.
 
-    There are four policies currently being supported: RS-DEFAULT-3-2-64k, RS-DEFAULT-6-3-64k, RS-DEFAULT-10-4-64k and RS-LEGACY-6-3-64k. All with default cell size of 64KB. The system default policy is RS-DEFAULT-6-3-64k which use the default schema RS_6_3_SCHEMA with a cell size of 64KB.
+    Five policies are currently supported: RS-3-2-64k, RS-6-3-64k, RS-10-4-64k, RS-LEGACY-6-3-64k, and XOR-2-1-64k. All with default cell size of 64KB. The system default policy is RS-6-3-64k which use the default schema RS_6_3_SCHEMA with a cell size of 64KB.
 
  *  **Intel ISA-L**
     Intel ISA-L stands for Intel Intelligent Storage Acceleration Library. ISA-L is a collection of optimized low-level functions used primarily in storage applications. It includes a fast block Reed-Solomon type erasure codes optimized for Intel AVX and AVX2 instruction sets.
@@ -91,7 +91,7 @@ Deployment
 ### Configuration keys
 
   The codec implementation for Reed-Solomon and XOR can be configured with the following client and DataNode configuration keys:
-  `io.erasurecode.codec.rs-default.rawcoder` for the default RS codec,
+  `io.erasurecode.codec.rs.rawcoder` for the default RS codec,
   `io.erasurecode.codec.rs-legacy.rawcoder` for the legacy RS codec,
   `io.erasurecode.codec.xor.rawcoder` for the XOR codec.
   The default implementations for all of these codecs are pure Java. For default RS codec, there is also a native implementation which leverages Intel ISA-L library to improve the performance of codec. For XOR codec, a native implementation which leverages Intel ISA-L library to improve the performance of codec is also supported. Please refer to section "Enable Intel ISA-L" for more detail information.
@@ -108,7 +108,7 @@ Deployment
   HDFS native implementation of default RS codec leverages Intel ISA-L library to improve the encoding and decoding calculation. To enable and use Intel ISA-L, there are three steps.
   1. Build ISA-L library. Please refer to the offical site "https://github.com/01org/isa-l/" for detail information.
   2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build options" section in "Build instructions for Hadoop"(BUILDING.txt) document. Use -Dbundle.isal to copy the contents of the isal.lib directory into the final tar file. Deploy hadoop with the tar file. Make sure ISA-L library is available on both HDFS client and DataNodes.
-  3. Configure the `io.erasurecode.codec.rs-default.rawcoder` key with value `org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on HDFS client and DataNodes.
+  3. Configure the `io.erasurecode.codec.rs.rawcoder` key with value `org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on HDFS client and DataNodes.
 
   To check ISA-L library enable state, try "Hadoop checknative" command. It will tell you if ISA-L library is enabled or not.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
index fc0ee37..3e6d1e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedInputStream.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 
 /**
- * This tests read operation of DFS striped file with RS-DEFAULT-10-4-64k
+ * This tests read operation of DFS striped file with RS-10-4-64k
  *  erasure code policy.
  */
 public class TestDFSRSDefault10x4StripedInputStream extends

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java
index 37821c1..1ea839a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStream.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 
 /**
- * This tests write operation of DFS striped file with RS-DEFAULT-10-4-64k
+ * This tests write operation of DFS striped file with RS-10-4-64k
  *  erasure code policy.
  */
 public class TestDFSRSDefault10x4StripedOutputStream

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java
index 1b2ec42..340fec5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRSDefault10x4StripedOutputStreamWithFailure.java
@@ -22,7 +22,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
 
 /**
- * This tests write operation of DFS striped file with RS-DEFAULT-10-4-64k
+ * This tests write operation of DFS striped file with RS-10-4-64k
  *  erasure code policy under Datanode failure conditions.
  */
 public class TestDFSRSDefault10x4StripedOutputStreamWithFailure
@@ -33,4 +33,4 @@ public class TestDFSRSDefault10x4StripedOutputStreamWithFailure
     return ErasureCodingPolicyManager.getPolicyByPolicyID(
         HdfsConstants.RS_10_4_POLICY_ID);
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
index 121b9a4..9b69904 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
@@ -96,7 +96,7 @@ public class TestDFSStripedInputStream {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     SimulatedFSDataset.setFactory(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
index 5bde16e..8834e14 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStream.java
@@ -85,7 +85,7 @@ public class TestDFSStripedOutputStream {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index 0baf9cc..fabd417 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -214,7 +214,7 @@ public class TestDFSStripedOutputStreamWithFailure {
     final int numDNs = dataBlocks + parityBlocks;
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDNs).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
index 4960c58..08b53c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReconstructStripedFile.java
@@ -100,7 +100,7 @@ public class TestReconstructStripedFile {
         false);
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dnNum).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
index 1a4086e..f26aa06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java
@@ -69,7 +69,7 @@ public class TestUnsetAndChangeDirectoryEcPolicy {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
     if (ErasureCodeNative.isNativeCodeLoaded()) {
       conf.set(
-          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
+          CodecUtil.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY,
           NativeRSRawErasureCoderFactory.class.getCanonicalName());
     }
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1a52b04/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
index 82b71def..9b9003a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
@@ -133,7 +133,7 @@
       <description>setPolicy : set erasure coding policy on a directory to encode files</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -141,7 +141,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-DEFAULT-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -150,8 +150,8 @@
       <description>setPolicy : set a policy twice</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -159,7 +159,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>Set erasure coding policy RS-DEFAULT-6-3-64k on /ecdir</expected-output>
+          <expected-output>Set erasure coding policy RS-6-3-64k on /ecdir</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -168,7 +168,7 @@
       <description>unsetPolicy : unset policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
@@ -187,8 +187,8 @@
       <description>setPolicy : change different policy and get</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-3-2-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-3-2-64k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -197,7 +197,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-DEFAULT-3-2-64k</expected-output>
+          <expected-output>RS-3-2-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -207,7 +207,7 @@
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
         <command>-fs NAMENODE -mkdir /ecdir/child</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -unsetPolicy -path /ecdir/child</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/child/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/child/ecfile</ec-admin-command>
@@ -220,7 +220,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -246,7 +246,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir</ec-admin-command>
       </test-commands>
       <cleanup-commands>
@@ -255,7 +255,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -264,7 +264,7 @@
       <description>getPolicy : get EC policy information at specified path, which doesn't have an EC policy</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-DEFAULT-6-3-64k -path /ecdir</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -policy RS-6-3-64k -path /ecdir</ec-admin-command>
         <command>-fs NAMENODE -touchz /ecdir/ecfile</command>
         <ec-admin-command>-fs NAMENODE -getPolicy -path /ecdir/ecfile</ec-admin-command>
       </test-commands>
@@ -275,7 +275,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-DEFAULT-6-3-64k</expected-output>
+          <expected-output>RS-6-3-64k</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -290,7 +290,7 @@
       <comparators>
         <comparator>
           <type>SubstringComparator</type>
-          <expected-output>RS-DEFAULT-6-3</expected-output>
+          <expected-output>RS-6-3</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -334,7 +334,7 @@
       <description>setPolicy : illegal parameters - too many arguments</description>
       <test-commands>
         <command>-fs NAMENODE -mkdir /ecdir</command>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-DEFAULT-3-2-64k /ecdir2</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir1 -policy RS-3-2-64k /ecdir2</ec-admin-command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rmdir /ecdir</command>
@@ -367,7 +367,7 @@
     <test>
       <description>setPolicy : illegal parameters - no such file</description>
       <test-commands>
-        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-DEFAULT-3-2-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-3-2-64k</ec-admin-command>
       </test-commands>
       <cleanup-commands>
       </cleanup-commands>


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[04/31] hadoop git commit: YARN-6194. Cluster capacity in SchedulingPolicy is updated only on allocation file reload. (Yufei Gu via kasha)

Posted by st...@apache.org.
YARN-6194. Cluster capacity in SchedulingPolicy is updated only on allocation file reload. (Yufei Gu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b10e9622
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b10e9622
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b10e9622

Branch: refs/heads/HADOOP-13345
Commit: b10e962224a8ae1c6031a05322b0cc5e564bd078
Parents: 718ad9f
Author: Karthik Kambatla <ka...@cloudera.com>
Authored: Wed Feb 22 15:58:49 2017 -0800
Committer: Karthik Kambatla <ka...@cloudera.com>
Committed: Wed Feb 22 15:58:49 2017 -0800

----------------------------------------------------------------------
 .../scheduler/fair/FSContext.java               | 21 ++++++++++++----
 .../resourcemanager/scheduler/fair/FSQueue.java |  2 +-
 .../scheduler/fair/FairScheduler.java           |  6 ++---
 .../scheduler/fair/SchedulingPolicy.java        | 19 ++++++++++++++-
 .../DominantResourceFairnessPolicy.java         | 16 +++++++------
 .../scheduler/fair/TestFairScheduler.java       |  8 +++----
 .../TestDominantResourceFairnessPolicy.java     | 25 +++++++++++++++++++-
 7 files changed, 74 insertions(+), 23 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
index 56bc99c..a4aa8f4 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSContext.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
+import org.apache.hadoop.yarn.api.records.Resource;
+
 /**
  * Helper class that holds basic information to be passed around
  * FairScheduler classes. Think of this as a glorified map that holds key
@@ -27,28 +29,37 @@ public class FSContext {
   private boolean preemptionEnabled = false;
   private float preemptionUtilizationThreshold;
   private FSStarvedApps starvedApps;
+  private FairScheduler scheduler;
+
+  FSContext(FairScheduler scheduler) {
+    this.scheduler = scheduler;
+  }
 
-  public boolean isPreemptionEnabled() {
+  boolean isPreemptionEnabled() {
     return preemptionEnabled;
   }
 
-  public void setPreemptionEnabled() {
+  void setPreemptionEnabled() {
     this.preemptionEnabled = true;
     if (starvedApps == null) {
       starvedApps = new FSStarvedApps();
     }
   }
 
-  public FSStarvedApps getStarvedApps() {
+  FSStarvedApps getStarvedApps() {
     return starvedApps;
   }
 
-  public float getPreemptionUtilizationThreshold() {
+  float getPreemptionUtilizationThreshold() {
     return preemptionUtilizationThreshold;
   }
 
-  public void setPreemptionUtilizationThreshold(
+  void setPreemptionUtilizationThreshold(
       float preemptionUtilizationThreshold) {
     this.preemptionUtilizationThreshold = preemptionUtilizationThreshold;
   }
+
+  public Resource getClusterResource() {
+    return scheduler.getClusterResource();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
index 7e8b858..b5592c5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java
@@ -135,7 +135,7 @@ public abstract class FSQueue implements Queue, Schedulable {
   }
 
   public void setPolicy(SchedulingPolicy policy) {
-    policy.initialize(scheduler.getClusterResource());
+    policy.initialize(scheduler.getContext());
     this.policy = policy;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index a15e6b5..c946bfb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -205,13 +205,12 @@ public class FairScheduler extends
 
   public FairScheduler() {
     super(FairScheduler.class.getName());
-    context = new FSContext();
+    context = new FSContext(this);
     allocsLoader = new AllocationFileLoaderService();
     queueMgr = new QueueManager(this);
     maxRunningEnforcer = new MaxRunningAppsEnforcer(this);
   }
 
-  @VisibleForTesting
   public FSContext getContext() {
     return context;
   }
@@ -1452,8 +1451,7 @@ public class FairScheduler extends
         } else {
           allocConf = queueInfo;
           setQueueAcls(allocConf.getQueueAcls());
-          allocConf.getDefaultSchedulingPolicy().initialize(
-              getClusterResource());
+          allocConf.getDefaultSchedulingPolicy().initialize(getContext());
           queueMgr.updateAllocationConfiguration(allocConf);
           applyChildDefaults();
           maxRunningEnforcer.updateRunnabilityOnReload();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
index 3fe36f3..9a9be8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/SchedulingPolicy.java
@@ -91,10 +91,27 @@ public abstract class SchedulingPolicy {
     }
     return getInstance(clazz);
   }
-  
+
+  /**
+   * Initialize the scheduling policy with cluster resources.
+   * @deprecated  Since it doesn't track cluster resource changes, replaced by
+   * {@link #initialize(FSContext)}.
+   *
+   * @param clusterCapacity cluster resources
+   */
+  @Deprecated
   public void initialize(Resource clusterCapacity) {}
 
   /**
+   * Initialize the scheduling policy with a {@link FSContext} object, which has
+   * a pointer to the cluster resources among other information.
+   *
+   * @param fsContext a {@link FSContext} object which has a pointer to the
+   *                  cluster resources
+   */
+  public void initialize(FSContext fsContext) {}
+
+  /**
    * The {@link ResourceCalculator} returned by this method should be used
    * for any calculations involving resources.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
index 369b8a1..193ed4d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.SchedulingPolicy;
@@ -104,17 +105,17 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
   }
 
   @Override
-  public void initialize(Resource clusterCapacity) {
-    COMPARATOR.setClusterCapacity(clusterCapacity);
+  public void initialize(FSContext fsContext) {
+    COMPARATOR.setFSContext(fsContext);
   }
 
   public static class DominantResourceFairnessComparator implements Comparator<Schedulable> {
     private static final int NUM_RESOURCES = ResourceType.values().length;
-    
-    private Resource clusterCapacity;
 
-    public void setClusterCapacity(Resource clusterCapacity) {
-      this.clusterCapacity = clusterCapacity;
+    private FSContext fsContext;
+
+    public void setFSContext(FSContext fsContext) {
+      this.fsContext = fsContext;
     }
 
     @Override
@@ -125,7 +126,8 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy {
       ResourceWeights sharesOfMinShare2 = new ResourceWeights();
       ResourceType[] resourceOrder1 = new ResourceType[NUM_RESOURCES];
       ResourceType[] resourceOrder2 = new ResourceType[NUM_RESOURCES];
-      
+      Resource clusterCapacity = fsContext.getClusterResource();
+
       // Calculate shares of the cluster for each resource both schedulables.
       calculateShares(s1.getResourceUsage(),
           clusterCapacity, sharesOfCluster1, resourceOrder1, s1.getWeights());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 4def53f..62430bf 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3293,7 +3293,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSAppAttempt app2 = scheduler.getSchedulerApp(appAttId2);
 
     DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
-    drfPolicy.initialize(scheduler.getClusterResource());
+    drfPolicy.initialize(scheduler.getContext());
     scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
     scheduler.update();
 
@@ -3339,7 +3339,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSAppAttempt app3 = scheduler.getSchedulerApp(appAttId3);
     
     DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
-    drfPolicy.initialize(scheduler.getClusterResource());
+    drfPolicy.initialize(scheduler.getContext());
     scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
     scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
     scheduler.update();
@@ -3354,7 +3354,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     scheduler.handle(updateEvent);
     Assert.assertEquals(1, app2.getLiveContainers().size());
   }
-  
+
   @Test
   public void testDRFHierarchicalQueues() throws Exception {
     scheduler.init(conf);
@@ -3384,7 +3384,7 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     FSAppAttempt app4 = scheduler.getSchedulerApp(appAttId4);
     
     DominantResourceFairnessPolicy drfPolicy = new DominantResourceFairnessPolicy();
-    drfPolicy.initialize(scheduler.getClusterResource());
+    drfPolicy.initialize(scheduler.getContext());
     scheduler.getQueueManager().getQueue("root").setPolicy(drfPolicy);
     scheduler.getQueueManager().getQueue("queue1").setPolicy(drfPolicy);
     scheduler.getQueueManager().getQueue("queue1.subqueue1").setPolicy(drfPolicy);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b10e9622/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
index a5c20c1..3719e2a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java
@@ -19,12 +19,15 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 import java.util.Comparator;
 
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceType;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.ResourceWeights;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSContext;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FakeSchedulable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.Schedulable;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
@@ -40,7 +43,10 @@ public class TestDominantResourceFairnessPolicy {
   private Comparator<Schedulable> createComparator(int clusterMem,
       int clusterCpu) {
     DominantResourceFairnessPolicy policy = new DominantResourceFairnessPolicy();
-    policy.initialize(BuilderUtils.newResource(clusterMem, clusterCpu));
+    FSContext fsContext = mock(FSContext.class);
+    when(fsContext.getClusterResource()).
+        thenReturn(Resources.createResource(clusterMem, clusterCpu));
+    policy.initialize(fsContext);
     return policy.getComparator();
   }
   
@@ -160,4 +166,21 @@ public class TestDominantResourceFairnessPolicy {
     assertEquals(ResourceType.CPU, resourceOrder[0]);
     assertEquals(ResourceType.MEMORY, resourceOrder[1]);
   }
+
+  @Test
+  public void testCompareSchedulablesWithClusterResourceChanges(){
+    Schedulable schedulable1 = createSchedulable(2000, 1);
+    Schedulable schedulable2 = createSchedulable(1000, 2);
+
+    // schedulable1 has share weights [1/2, 1/5], schedulable2 has share
+    // weights [1/4, 2/5], schedulable1 > schedulable2 since 1/2 > 2/5
+    assertTrue(createComparator(4000, 5)
+        .compare(schedulable1, schedulable2) > 0);
+
+    // share weights have changed because of the cluster resource change.
+    // schedulable1 has share weights [1/4, 1/6], schedulable2 has share
+    // weights [1/8, 1/3], schedulable1 < schedulable2 since 1/4 < 1/3
+    assertTrue(createComparator(8000, 6)
+        .compare(schedulable1, schedulable2) < 0);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[13/31] hadoop git commit: HADOOP-14113. Review ADL Docs. Contributed by Steve Loughran

Posted by st...@apache.org.
HADOOP-14113. Review ADL Docs. Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e60c6543
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e60c6543
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e60c6543

Branch: refs/heads/HADOOP-13345
Commit: e60c6543d57611039b0438d5dcb4cb19ee239bb6
Parents: 9c22a91
Author: Steve Loughran <st...@apache.org>
Authored: Fri Feb 24 13:24:59 2017 +0000
Committer: Steve Loughran <st...@apache.org>
Committed: Fri Feb 24 13:24:59 2017 +0000

----------------------------------------------------------------------
 .../src/site/markdown/index.md                  | 237 ++++++++++---------
 1 file changed, 124 insertions(+), 113 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e60c6543/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
index 6d9e173..9355241 100644
--- a/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
+++ b/hadoop-tools/hadoop-azure-datalake/src/site/markdown/index.md
@@ -20,20 +20,20 @@
 * [Usage](#Usage)
     * [Concepts](#Concepts)
         * [OAuth2 Support](#OAuth2_Support)
-    * [Configuring Credentials & FileSystem](#Configuring_Credentials)
+    * [Configuring Credentials and FileSystem](#Configuring_Credentials)
         * [Using Refresh Token](#Refresh_Token)
         * [Using Client Keys](#Client_Credential_Token)
         * [Protecting the Credentials with Credential Providers](#Credential_Provider)
     * [Enabling ADL Filesystem](#Enabling_ADL)
-    * [Accessing adl URLs](#Accessing_adl_URLs)
+    * [Accessing `adl` URLs](#Accessing_adl_URLs)
     * [User/Group Representation](#OIDtoUPNConfiguration)
-* [Testing the hadoop-azure Module](#Testing_the_hadoop-azure_Module)
+* [Testing the `hadoop-azure` Module](#Testing_the_hadoop-azure_Module)
 
 ## <a name="Introduction" />Introduction
 
-The hadoop-azure-datalake module provides support for integration with
-[Azure Data Lake Store]( https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
-The jar file is named azure-datalake-store.jar.
+The `hadoop-azure-datalake` module provides support for integration with the
+[Azure Data Lake Store](https://azure.microsoft.com/en-in/documentation/services/data-lake-store/).
+This support comes via the JAR file `azure-datalake-store.jar`.
 
 ## <a name="Features" />Features
 
@@ -43,13 +43,14 @@ The jar file is named azure-datalake-store.jar.
 * Can act as a source of data in a MapReduce job, or a sink.
 * Tested on both Linux and Windows.
 * Tested for scale.
-* API setOwner/setAcl/removeAclEntries/modifyAclEntries accepts UPN or OID
-  (Object ID) as user and group name.
+* API `setOwner()`, `setAcl`, `removeAclEntries()`, `modifyAclEntries()` accepts UPN or OID
+  (Object ID) as user and group names.
 
 ## <a name="Limitations" />Limitations
+
 Partial or no support for the following operations :
 
-* Operation on Symbolic Link
+* Operation on Symbolic Links
 * Proxy Users
 * File Truncate
 * File Checksum
@@ -58,55 +59,71 @@ Partial or no support for the following operations :
 * Extended Attributes(XAttrs) Operations
 * Snapshot Operations
 * Delegation Token Operations
-* User and group information returned as ListStatus and GetFileStatus is in form of GUID associated in Azure Active Directory.
+* User and group information returned as `listStatus()` and `getFileStatus()` is
+in the form of the GUID associated in Azure Active Directory.
 
 ## <a name="Usage" />Usage
 
 ### <a name="Concepts" />Concepts
-Azure Data Lake Storage access path syntax is
+Azure Data Lake Storage access path syntax is:
+
+```
+adl://<Account Name>.azuredatalakestore.net/
+```
 
-    adl://<Account Name>.azuredatalakestore.net/
+For details on using the store, see
+[**Get started with Azure Data Lake Store using the Azure Portal**](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
 
-Get started with azure data lake account with [https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/](https://azure.microsoft.com/en-in/documentation/articles/data-lake-store-get-started-portal/)
+### <a name="#OAuth2_Support" />OAuth2 Support
 
-#### <a name="#OAuth2_Support" />OAuth2 Support
-Usage of Azure Data Lake Storage requires OAuth2 bearer token to be present as part of the HTTPS header as per OAuth2 specification. Valid OAuth2 bearer token should be obtained from Azure Active Directory for valid users who have  access to Azure Data Lake Storage Account.
+Usage of Azure Data Lake Storage requires an OAuth2 bearer token to be present as
+part of the HTTPS header as per the OAuth2 specification.
+A valid OAuth2 bearer token must be obtained from the Azure Active Directory service
+for those valid users who have access to Azure Data Lake Storage Account.
 
-Azure Active Directory (Azure AD) is Microsoft's multi-tenant cloud based directory and identity management service. See [https://azure.microsoft.com/en-in/documentation/articles/active-directory-whatis/](https://azure.microsoft.com/en-in/documentation/articles/active-directory-whatis/)
+Azure Active Directory (Azure AD) is Microsoft's multi-tenant cloud based directory
+and identity management service. See [*What is ActiveDirectory*](https://azure.microsoft.com/en-in/documentation/articles/active-directory-whatis/).
 
-Following sections describes on OAuth2 configuration in core-site.xml.
+Following sections describes theOAuth2 configuration in `core-site.xml`.
 
-## <a name="Configuring_Credentials" />Configuring Credentials & FileSystem
-Credentials can be configured using either a refresh token (associated with a user) or a client credential (analogous to a service principal).
+#### <a name="Configuring_Credentials" />Configuring Credentials & FileSystem
+Credentials can be configured using either a refresh token (associated with a user),
+or a client credential (analogous to a service principal).
 
-### <a name="Refresh_Token" />Using Refresh Token
+#### <a name="Refresh_Token" />Using Refresh Tokens
 
-Add the following properties to your core-site.xml
+Add the following properties to the cluster's `core-site.xml`
 
-        <property>
-            <name>dfs.adls.oauth2.access.token.provider.type</name>
-            <value>RefreshToken</value>
-        </property>
+```xml
+<property>
+  <name>dfs.adls.oauth2.access.token.provider.type</name>
+  <value>RefreshToken</value>
+</property>
+```
 
-Application require to set Client id and OAuth2 refresh token from Azure Active Directory associated with client id. See [https://github.com/AzureAD/azure-activedirectory-library-for-java](https://github.com/AzureAD/azure-activedirectory-library-for-java).
+Applications must set the Client id and OAuth2 refresh token from the Azure Active Directory
+service associated with the client id. See [*Active Directory Library For Java*](https://github.com/AzureAD/azure-activedirectory-library-for-java).
 
 **Do not share client id and refresh token, it must be kept secret.**
 
-        <property>
-            <name>dfs.adls.oauth2.client.id</name>
-            <value></value>
-        </property>
+```xml
+<property>
+  <name>dfs.adls.oauth2.client.id</name>
+  <value></value>
+</property>
 
-        <property>
-            <name>dfs.adls.oauth2.refresh.token</name>
-            <value></value>
-        </property>
+<property>
+  <name>dfs.adls.oauth2.refresh.token</name>
+  <value></value>
+</property>
+```
 
 
 ### <a name="Client_Credential_Token" />Using Client Keys
 
 #### Generating the Service Principal
-1.  Go to the portal (https://portal.azure.com)
+
+1.  Go to [the portal](https://portal.azure.com)
 2.  Under "Browse", look for Active Directory and click on it.
 3.  Create "Web Application". Remember the name you create here - that is what you will add to your ADL account as authorized user.
 4.  Go through the wizard
@@ -124,31 +141,31 @@ Application require to set Client id and OAuth2 refresh token from Azure Active
 3.  Add your user name you created in Step 6 above (note that it does not show up in the list, but will be found if you searched for the name)
 4.  Add "Owner" role
 
-#### Configure core-site.xml
-Add the following properties to your core-site.xml
-
-    <property>
-      <name>dfs.adls.oauth2.refresh.url</name>
-      <value>TOKEN ENDPOINT FROM STEP 7 ABOVE</value>
-    </property>
+### Configure core-site.xml
+Add the following properties to your `core-site.xml`
 
-    <property>
-      <name>dfs.adls.oauth2.client.id</name>
-      <value>CLIENT ID FROM STEP 7 ABOVE</value>
-    </property>
+```xml
+<property>
+  <name>dfs.adls.oauth2.refresh.url</name>
+  <value>TOKEN ENDPOINT FROM STEP 7 ABOVE</value>
+</property>
 
-    <property>
-      <name>dfs.adls.oauth2.credential</name>
-      <value>PASSWORD FROM STEP 7 ABOVE</value>
-    </property>
+<property>
+  <name>dfs.adls.oauth2.client.id</name>
+  <value>CLIENT ID FROM STEP 7 ABOVE</value>
+</property>
 
+<property>
+  <name>dfs.adls.oauth2.credential</name>
+  <value>PASSWORD FROM STEP 7 ABOVE</value>
+</property>
+```
 
 ### <a name="Credential_Provider" />Protecting the Credentials with Credential Providers
 
-In many Hadoop clusters, the core-site.xml file is world-readable. To protect
-these credentials from prying eyes, it is recommended that you use the
-credential provider framework to securely store them and access them through
-configuration.
+In many Hadoop clusters, the `core-site.xml` file is world-readable. To protect
+these credentials, it is recommended that you use the
+credential provider framework to securely store them and access them.
 
 All ADLS credential properties can be protected by credential providers.
 For additional reading on the credential provider API, see
@@ -156,16 +173,16 @@ For additional reading on the credential provider API, see
 
 #### Provisioning
 
-```
-% hadoop credential create dfs.adls.oauth2.refresh.token -value 123
+```bash
+hadoop credential create dfs.adls.oauth2.refresh.token -value 123
     -provider localjceks://file/home/foo/adls.jceks
-% hadoop credential create dfs.adls.oauth2.credential -value 123
+hadoop credential create dfs.adls.oauth2.credential -value 123
     -provider localjceks://file/home/foo/adls.jceks
 ```
 
 #### Configuring core-site.xml or command line property
 
-```
+```xml
 <property>
   <name>hadoop.security.credential.provider.path</name>
   <value>localjceks://file/home/foo/adls.jceks</value>
@@ -175,42 +192,28 @@ For additional reading on the credential provider API, see
 
 #### Running DistCp
 
-```
-% hadoop distcp
+```bash
+hadoop distcp
     [-D hadoop.security.credential.provider.path=localjceks://file/home/user/adls.jceks]
     hdfs://<NameNode Hostname>:9001/user/foo/007020615
     adl://<Account Name>.azuredatalakestore.net/testDir/
 ```
 
-NOTE: You may optionally add the provider path property to the distcp command
-line instead of added job specific configuration to a generic core-site.xml.
-The square brackets above illustrate this capability.
-
-
-## <a name="Enabling_ADL" />Enabling ADL Filesystem
-
-For ADL FileSystem to take effect. Update core-site.xml with
-
-        <property>
-            <name>fs.adl.impl</name>
-            <value>org.apache.hadoop.fs.adl.AdlFileSystem</value>
-        </property>
-
-        <property>
-            <name>fs.AbstractFileSystem.adl.impl</name>
-            <value>org.apache.hadoop.fs.adl.Adl</value>
-        </property>
-
+NOTE: You may optionally add the provider path property to the `distcp` command
+line instead of added job specific configuration to a generic `core-site.xml`.
+The square brackets above illustrate this capability.`
 
 ### <a name="Accessing_adl_URLs" />Accessing adl URLs
 
-After credentials are configured in core-site.xml, any Hadoop component may
+After credentials are configured in `core-site.xml`, any Hadoop component may
 reference files in that Azure Data Lake Storage account by using URLs of the following
 format:
 
-    adl://<Account Name>.azuredatalakestore.net/<path>
+```
+adl://<Account Name>.azuredatalakestore.net/<path>
+```
 
-The schemes `adl` identify a URL on a file system backed by Azure
+The schemes `adl` identifies a URL on a Hadoop-compatible file system backed by Azure
 Data Lake Storage.  `adl` utilizes encrypted HTTPS access for all interaction with
 the Azure Data Lake Storage API.
 
@@ -218,48 +221,56 @@ For example, the following
 [FileSystem Shell](../hadoop-project-dist/hadoop-common/FileSystemShell.html)
 commands demonstrate access to a storage account named `youraccount`.
 
-    > hadoop fs -mkdir adl://yourcontainer.azuredatalakestore.net/testDir
 
-    > hadoop fs -put testFile adl://yourcontainer.azuredatalakestore.net/testDir/testFile
+```bash
+hadoop fs -mkdir adl://yourcontainer.azuredatalakestore.net/testDir
 
-    > hadoop fs -cat adl://yourcontainer.azuredatalakestore.net/testDir/testFile
-    test file content
+hadoop fs -put testFile adl://yourcontainer.azuredatalakestore.net/testDir/testFile
 
+hadoop fs -cat adl://yourcontainer.azuredatalakestore.net/testDir/testFile
+test file content
+```
 ### <a name="OIDtoUPNConfiguration" />User/Group Representation
-The hadoop-azure-datalake module provides support for configuring how
-User/Group information is represented during
-getFileStatus/listStatus/getAclStatus.
 
-Add the following properties to your core-site.xml
-
-        <property>
-          <name>adl.feature.ownerandgroup.enableupn</name>
-          <value>true</value>
-          <description>
-            When true : User and Group in FileStatus/AclStatus response is
-            represented as user friendly name as per Azure AD profile.
-
-            When false (default) : User and Group in FileStatus/AclStatus
-            response is represented by the unique identifier from Azure AD
-            profile (Object ID as GUID).
+The `hadoop-azure-datalake` module provides support for configuring how
+User/Group information is represented during
+`getFileStatus()`, `listStatus()`,  and `getAclStatus()` calls..
 
-            For performance optimization, Recommended default value.
-          </description>
-        </property>
+Add the following properties to `core-site.xml`
 
+```xml
+<property>
+  <name>adl.feature.ownerandgroup.enableupn</name>
+  <value>true</value>
+  <description>
+    When true : User and Group in FileStatus/AclStatus response is
+    represented as user friendly name as per Azure AD profile.
+
+    When false (default) : User and Group in FileStatus/AclStatus
+    response is represented by the unique identifier from Azure AD
+    profile (Object ID as GUID).
+
+    For performance optimization, Recommended default value.
+  </description>
+</property>
+```
 ## <a name="Testing_the_hadoop-azure_Module" />Testing the azure-datalake-store Module
-The hadoop-azure module includes a full suite of unit tests. Most of the tests will run without additional configuration by running mvn test. This includes tests against mocked storage, which is an in-memory emulation of Azure Data Lake Storage.
+The `hadoop-azure` module includes a full suite of unit tests.
+Most of the tests will run without additional configuration by running mvn test.
+This includes tests against mocked storage, which is an in-memory emulation of Azure Data Lake Storage.
 
 A selection of tests can run against the Azure Data Lake Storage. To run these
 tests, please create `src/test/resources/auth-keys.xml` with Adl account
 information mentioned in the above sections and the following properties.
 
-        <property>
-            <name>dfs.adl.test.contract.enable</name>
-            <value>true</value>
-        </property>
+```xml
+<property>
+    <name>dfs.adl.test.contract.enable</name>
+    <value>true</value>
+</property>
 
-        <property>
-            <name>test.fs.adl.name</name>
-            <value>adl://yourcontainer.azuredatalakestore.net</value>
-        </property>
+<property>
+    <name>test.fs.adl.name</name>
+    <value>adl://yourcontainer.azuredatalakestore.net</value>
+</property>
+```


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[07/31] hadoop git commit: HADOOP-13321. Deprecate FileSystem APIs that promote inefficient call patterns. Contributed by Chris Nauroth and Mingliang Liu

Posted by st...@apache.org.
HADOOP-13321. Deprecate FileSystem APIs that promote inefficient call patterns. Contributed by Chris Nauroth and Mingliang Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4d4a237
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4d4a237
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4d4a237

Branch: refs/heads/HADOOP-13345
Commit: a4d4a23785356e6a19d0db3a2dec8ae8cf861273
Parents: a207aa9
Author: Mingliang Liu <li...@apache.org>
Authored: Thu Feb 16 16:25:51 2017 -0800
Committer: Mingliang Liu <li...@apache.org>
Committed: Thu Feb 23 12:55:40 2017 -0800

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/ChecksumFileSystem.java    |  2 ++
 .../src/main/java/org/apache/hadoop/fs/FileSystem.java   | 11 +++++++++++
 .../java/org/apache/hadoop/fs/ftp/FTPFileSystem.java     |  1 +
 .../java/org/apache/hadoop/fs/s3a/S3AFileSystem.java     |  2 ++
 .../hadoop/fs/swift/snative/SwiftNativeFileSystem.java   |  2 ++
 5 files changed, 18 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index e0ce327..14c1905 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -605,6 +605,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * Rename files/dirs
    */
   @Override
+  @SuppressWarnings("deprecation")
   public boolean rename(Path src, Path dst) throws IOException {
     if (fs.isDirectory(src)) {
       return fs.rename(src, dst);
@@ -721,6 +722,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * If src and dst are directories, the copyCrc parameter
    * determines whether to copy CRC files.
    */
+  @SuppressWarnings("deprecation")
   public void copyToLocalFile(Path src, Path dst, boolean copyCrc)
     throws IOException {
     if (!fs.isDirectory(src)) { // source is a file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 55cd97e..ededfa9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1624,6 +1624,11 @@ public abstract class FileSystem extends Configured implements Closeable {
   }
 
   /** Check if a path exists.
+   *
+   * It is highly discouraged to call this method back to back with other
+   * {@link #getFileStatus(Path)} calls, as this will involve multiple redundant
+   * RPC calls in HDFS.
+   *
    * @param f source path
    * @return true if the path exists
    * @throws IOException IO failure
@@ -1639,9 +1644,12 @@ public abstract class FileSystem extends Configured implements Closeable {
   /** True iff the named path is a directory.
    * Note: Avoid using this method. Instead reuse the FileStatus
    * returned by getFileStatus() or listStatus() methods.
+   *
    * @param f path to check
    * @throws IOException IO failure
+   * @deprecated Use {@link #getFileStatus(Path)} instead
    */
+  @Deprecated
   public boolean isDirectory(Path f) throws IOException {
     try {
       return getFileStatus(f).isDirectory();
@@ -1653,9 +1661,12 @@ public abstract class FileSystem extends Configured implements Closeable {
   /** True iff the named path is a regular file.
    * Note: Avoid using this method. Instead reuse the FileStatus
    * returned by {@link #getFileStatus(Path)} or listStatus() methods.
+   *
    * @param f path to check
    * @throws IOException IO failure
+   * @deprecated Use {@link #getFileStatus(Path)} instead
    */
+  @Deprecated
   public boolean isFile(Path f) throws IOException {
     try {
       return getFileStatus(f).isFile();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
index 25fec31..6ce39c1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
@@ -644,6 +644,7 @@ public class FTPFileSystem extends FileSystem {
    * @return
    * @throws IOException
    */
+  @SuppressWarnings("deprecation")
   private boolean rename(FTPClient client, Path src, Path dst)
       throws IOException {
     Path workDir = new Path(client.printWorkingDirectory());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 8b1a6d0..bc47918 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -2142,6 +2142,7 @@ public class S3AFileSystem extends FileSystem {
    * {@inheritDoc}
    */
   @Override
+  @SuppressWarnings("deprecation")
   public boolean isDirectory(Path f) throws IOException {
     incrementStatistic(INVOCATION_IS_DIRECTORY);
     return super.isDirectory(f);
@@ -2152,6 +2153,7 @@ public class S3AFileSystem extends FileSystem {
    * {@inheritDoc}
    */
   @Override
+  @SuppressWarnings("deprecation")
   public boolean isFile(Path f) throws IOException {
     incrementStatistic(INVOCATION_IS_FILE);
     return super.isFile(f);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4d4a237/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
index 7f93c38..9217532 100644
--- a/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
+++ b/hadoop-tools/hadoop-openstack/src/main/java/org/apache/hadoop/fs/swift/snative/SwiftNativeFileSystem.java
@@ -201,6 +201,7 @@ public class SwiftNativeFileSystem extends FileSystem {
   }
 
   @Override
+  @SuppressWarnings("deprecation")
   public boolean isFile(Path f) throws IOException {
     try {
       FileStatus fileStatus = getFileStatus(f);
@@ -210,6 +211,7 @@ public class SwiftNativeFileSystem extends FileSystem {
     }
   }
 
+  @SuppressWarnings("deprecation")
   @Override
   public boolean isDirectory(Path f) throws IOException {
 


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[14/31] hadoop git commit: HADOOP-14097. Remove Java6 specific code from GzipCodec.java. Contributed by Elek, Marton.

Posted by st...@apache.org.
HADOOP-14097. Remove Java6 specific code from GzipCodec.java. Contributed by Elek, Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50decd36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50decd36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50decd36

Branch: refs/heads/HADOOP-13345
Commit: 50decd36130945e184734dcd55b8912be6f4550a
Parents: e60c654
Author: Akira Ajisaka <aa...@apache.org>
Authored: Sat Feb 25 00:28:31 2017 +0900
Committer: Akira Ajisaka <aa...@apache.org>
Committed: Sat Feb 25 00:28:31 2017 +0900

----------------------------------------------------------------------
 .../apache/hadoop/io/compress/GzipCodec.java    | 59 --------------------
 1 file changed, 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50decd36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
index 01b6434..d079412 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/GzipCodec.java
@@ -45,10 +45,6 @@ public class GzipCodec extends DefaultCodec {
   protected static class GzipOutputStream extends CompressorStream {
 
     private static class ResetableGZIPOutputStream extends GZIPOutputStream {
-      private static final int TRAILER_SIZE = 8;
-      public static final String JVMVersion= System.getProperty("java.version");
-      private static final boolean HAS_BROKEN_FINISH =
-          (IBM_JAVA && JVMVersion.contains("1.6.0"));
 
       public ResetableGZIPOutputStream(OutputStream out) throws IOException {
         super(out);
@@ -57,61 +53,6 @@ public class GzipCodec extends DefaultCodec {
       public void resetState() throws IOException {
         def.reset();
       }
-
-      /**
-       * Override this method for HADOOP-8419.
-       * Override because IBM implementation calls def.end() which
-       * causes problem when reseting the stream for reuse.
-       *
-       */
-      @Override
-      public void finish() throws IOException {
-        if (HAS_BROKEN_FINISH) {
-          if (!def.finished()) {
-            def.finish();
-            while (!def.finished()) {
-              int i = def.deflate(this.buf, 0, this.buf.length);
-              if ((def.finished()) && (i <= this.buf.length - TRAILER_SIZE)) {
-                writeTrailer(this.buf, i);
-                i += TRAILER_SIZE;
-                out.write(this.buf, 0, i);
-
-                return;
-              }
-              if (i > 0) {
-                out.write(this.buf, 0, i);
-              }
-            }
-
-            byte[] arrayOfByte = new byte[TRAILER_SIZE];
-            writeTrailer(arrayOfByte, 0);
-            out.write(arrayOfByte);
-          }
-        } else {
-          super.finish();
-        }
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeTrailer(byte[] paramArrayOfByte, int paramInt)
-        throws IOException {
-        writeInt((int)this.crc.getValue(), paramArrayOfByte, paramInt);
-        writeInt(this.def.getTotalIn(), paramArrayOfByte, paramInt + 4);
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeInt(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
-        throws IOException {
-        writeShort(paramInt1 & 0xFFFF, paramArrayOfByte, paramInt2);
-        writeShort(paramInt1 >> 16 & 0xFFFF, paramArrayOfByte, paramInt2 + 2);
-      }
-
-      /** re-implement for HADOOP-8419 because the relative method in jdk is invisible */
-      private void writeShort(int paramInt1, byte[] paramArrayOfByte, int paramInt2)
-        throws IOException {
-        paramArrayOfByte[paramInt2] = (byte)(paramInt1 & 0xFF);
-        paramArrayOfByte[(paramInt2 + 1)] = (byte)(paramInt1 >> 8 & 0xFF);
-      }
     }
 
     public GzipOutputStream(OutputStream out) throws IOException {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[05/31] hadoop git commit: HDFS-4025. QJM: Sychronize past log segments to JNs that missed them. Contributed by Hanisha Koneru.

Posted by st...@apache.org.
HDFS-4025. QJM: Sychronize past log segments to JNs that missed them. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d4bcfe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d4bcfe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d4bcfe

Branch: refs/heads/HADOOP-13345
Commit: 13d4bcfe3535a2df79c2a56e7578716d15497ff4
Parents: b10e962
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Feb 22 16:33:38 2017 -0800
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Feb 22 16:33:38 2017 -0800

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  16 +
 .../qjournal/client/QuorumJournalManager.java   |  38 +-
 .../hadoop/hdfs/qjournal/server/JNStorage.java  |   9 +-
 .../hadoop/hdfs/qjournal/server/Journal.java    |  19 +
 .../hdfs/qjournal/server/JournalNode.java       |  23 +-
 .../hdfs/qjournal/server/JournalNodeSyncer.java | 413 +++++++++++++++++++
 .../hadoop/hdfs/server/common/Storage.java      |   9 +
 .../apache/hadoop/hdfs/server/common/Util.java  |  46 ++-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |   5 +-
 .../hdfs/server/namenode/TransferFsImage.java   |   3 +-
 .../src/main/resources/hdfs-default.xml         |  41 ++
 .../hdfs/qjournal/MiniJournalCluster.java       |   8 +
 .../hadoop/hdfs/qjournal/MiniQJMHACluster.java  |   1 +
 .../hdfs/qjournal/TestJournalNodeSync.java      | 264 ++++++++++++
 14 files changed, 853 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf1d21a..cfd16aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -707,6 +707,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_IMAGE_TRANSFER_CHUNKSIZE_KEY = "dfs.image.transfer.chunksize";
   public static final int DFS_IMAGE_TRANSFER_CHUNKSIZE_DEFAULT = 64 * 1024;
 
+  // Edit Log segment transfer timeout
+  public static final String DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY =
+      "dfs.edit.log.transfer.timeout";
+  public static final int DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT = 30 * 1000;
+
+  // Throttling Edit Log Segment transfer for Journal Sync
+  public static final String DFS_EDIT_LOG_TRANSFER_RATE_KEY =
+      "dfs.edit.log.transfer.bandwidthPerSec";
+  public static final long DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT = 0; //no throttling
+
   // Datanode File IO Stats
   public static final String DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY =
       "dfs.datanode.enable.fileio.profiling";
@@ -891,6 +901,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = "dfs.journalnode.kerberos.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.journalnode.kerberos.internal.spnego.principal";
+  public static final String DFS_JOURNALNODE_ENABLE_SYNC_KEY =
+      "dfs.journalnode.enable.sync";
+  public static final boolean DFS_JOURNALNODE_ENABLE_SYNC_DEFAULT = false;
+  public static final String DFS_JOURNALNODE_SYNC_INTERVAL_KEY =
+      "dfs.journalnode.sync.interval";
+  public static final long DFS_JOURNALNODE_SYNC_INTERVAL_DEFAULT = 2*60*1000L;
 
   // Journal-node related configs for the client side.
   public static final String  DFS_QJOURNAL_QUEUE_SIZE_LIMIT_KEY = "dfs.qjournal.queued-edits.limit.mb";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
index ae3358b..97c0050 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URL;
-import java.net.UnknownHostException;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
@@ -42,6 +41,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRe
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
 import org.apache.hadoop.hdfs.server.namenode.EditLogOutputStream;
@@ -51,8 +51,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
@@ -362,41 +360,17 @@ public class QuorumJournalManager implements JournalManager {
       URI uri, NamespaceInfo nsInfo, AsyncLogger.Factory factory)
           throws IOException {
     List<AsyncLogger> ret = Lists.newArrayList();
-    List<InetSocketAddress> addrs = getLoggerAddresses(uri);
+    List<InetSocketAddress> addrs = Util.getAddressesList(uri);
+    if (addrs.size() % 2 == 0) {
+      LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
+          "of Journal Nodes specified. This is not recommended!");
+    }
     String jid = parseJournalId(uri);
     for (InetSocketAddress addr : addrs) {
       ret.add(factory.createLogger(conf, nsInfo, jid, addr));
     }
     return ret;
   }
- 
-  private static List<InetSocketAddress> getLoggerAddresses(URI uri)
-      throws IOException {
-    String authority = uri.getAuthority();
-    Preconditions.checkArgument(authority != null && !authority.isEmpty(),
-        "URI has no authority: " + uri);
-    
-    String[] parts = StringUtils.split(authority, ';');
-    for (int i = 0; i < parts.length; i++) {
-      parts[i] = parts[i].trim();
-    }
-
-    if (parts.length % 2 == 0) {
-      LOG.warn("Quorum journal URI '" + uri + "' has an even number " +
-          "of Journal Nodes specified. This is not recommended!");
-    }
-    
-    List<InetSocketAddress> addrs = Lists.newArrayList();
-    for (String addr : parts) {
-      InetSocketAddress isa = NetUtils.createSocketAddr(
-          addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT);
-      if (isa.isUnresolved()) {
-        throw new UnknownHostException(addr);
-      }
-      addrs.add(isa);
-    }
-    return addrs;
-  }
   
   @Override
   public EditLogOutputStream startLogSegment(long txId, int layoutVersion)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index 07c9286..8f40f6b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -49,7 +49,6 @@ class JNStorage extends Storage {
   private final FileJournalManager fjm;
   private final StorageDirectory sd;
   private StorageState state;
-  
 
   private static final List<Pattern> CURRENT_DIR_PURGE_REGEXES =
       ImmutableList.of(
@@ -121,6 +120,14 @@ class JNStorage extends Storage {
     return new File(sd.getCurrentDir(), name);
   }
 
+  File getTemporaryEditsFile(long startTxId, long endTxId, long timestamp) {
+    return NNStorage.getTemporaryEditsFile(sd, startTxId, endTxId, timestamp);
+  }
+
+  File getFinalizedEditsFile(long startTxId, long endTxId) {
+    return NNStorage.getFinalizedEditsFile(sd, startTxId, endTxId);
+  }
+
   /**
    * @return the path for the file which contains persisted data for the
    * paxos-like recovery process for the given log segment.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index 3760641..ca21373 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -1092,6 +1092,25 @@ public class Journal implements Closeable {
     committedTxnId.set(startTxId - 1);
   }
 
+  synchronized boolean renameTmpSegment(File tmpFile, File finalFile,
+      long endTxId) throws IOException {
+    final boolean success;
+    if (endTxId <= committedTxnId.get()) {
+      success = tmpFile.renameTo(finalFile);
+      if (!success) {
+        LOG.warn("Unable to rename edits file from " + tmpFile + " to " +
+            finalFile);
+      }
+    } else {
+      success = false;
+      LOG.error("The endTxId of the temporary file is not less than the " +
+          "last committed transaction id. Aborting renaming to final file" +
+          finalFile);
+    }
+
+    return success;
+  }
+
   public Long getJournalCTime() throws IOException {
     return storage.getJournalManager().getJournalCTime();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
index cde0112..42e9be7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
@@ -68,6 +68,8 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   private JournalNodeRpcServer rpcServer;
   private JournalNodeHttpServer httpServer;
   private final Map<String, Journal> journalsById = Maps.newHashMap();
+  private final Map<String, JournalNodeSyncer> journalSyncersById = Maps
+      .newHashMap();
   private ObjectName journalNodeInfoBeanName;
   private String httpServerURI;
   private File localDir;
@@ -92,11 +94,24 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
       LOG.info("Initializing journal in directory " + logDir);      
       journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter());
       journalsById.put(jid, journal);
+
+      // Start SyncJouranl thread, if JournalNode Sync is enabled
+      if (conf.getBoolean(
+          DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY,
+          DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_DEFAULT)) {
+        startSyncer(journal, jid);
+      }
     }
-    
+
     return journal;
   }
 
+  private void startSyncer(Journal journal, String jid) {
+    JournalNodeSyncer jSyncer = new JournalNodeSyncer(this, journal, jid, conf);
+    journalSyncersById.put(jid, jSyncer);
+    jSyncer.start();
+  }
+
   @VisibleForTesting
   public Journal getOrCreateJournal(String jid) throws IOException {
     return getOrCreateJournal(jid, StartupOption.REGULAR);
@@ -190,7 +205,11 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
    */
   public void stop(int rc) {
     this.resultCode = rc;
-    
+
+    for (JournalNodeSyncer jSyncer : journalSyncersById.values()) {
+      jSyncer.stopSync();
+    }
+
     if (rpcServer != null) { 
       rpcServer.stop();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
new file mode 100644
index 0000000..f195c00
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
@@ -0,0 +1,413 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
+import com.google.protobuf.ServiceException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos
+  .JournalIdProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos
+  .GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos
+  .GetEditLogManifestResponseProto;
+import org.apache.hadoop.hdfs.qjournal.protocolPB.QJournalProtocolPB;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.MalformedURLException;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.List;
+
+/**
+ * A Journal Sync thread runs through the lifetime of the JN. It periodically
+ * gossips with other journal nodes to compare edit log manifests and if it
+ * detects any missing log segment, it downloads it from the other journal node
+ */
+@InterfaceAudience.Private
+public class JournalNodeSyncer {
+  public static final Logger LOG = LoggerFactory.getLogger(
+      JournalNodeSyncer.class);
+  private final JournalNode jn;
+  private final Journal journal;
+  private final String jid;
+  private final JournalIdProto jidProto;
+  private final JNStorage jnStorage;
+  private final Configuration conf;
+  private volatile Daemon syncJournalDaemon;
+  private volatile boolean shouldSync = true;
+
+  private List<JournalNodeProxy> otherJNProxies = Lists.newArrayList();
+  private int numOtherJNs;
+  private int journalNodeIndexForSync = 0;
+  private final long journalSyncInterval;
+  private final int logSegmentTransferTimeout;
+  private final DataTransferThrottler throttler;
+
+  JournalNodeSyncer(JournalNode jouranlNode, Journal journal, String jid,
+      Configuration conf) {
+    this.jn = jouranlNode;
+    this.journal = journal;
+    this.jid = jid;
+    this.jidProto = convertJournalId(this.jid);
+    this.jnStorage = journal.getStorage();
+    this.conf = conf;
+    journalSyncInterval = conf.getLong(
+        DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY,
+        DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_DEFAULT);
+    logSegmentTransferTimeout = conf.getInt(
+        DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_KEY,
+        DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_TIMEOUT_DEFAULT);
+    throttler = getThrottler(conf);
+  }
+
+  void stopSync() {
+    shouldSync = false;
+    if (syncJournalDaemon != null) {
+      syncJournalDaemon.interrupt();
+    }
+  }
+
+  public void start() {
+    LOG.info("Starting SyncJournal daemon for journal " + jid);
+    if (getOtherJournalNodeProxies()) {
+      startSyncJournalsDaemon();
+    } else {
+      LOG.warn("Failed to start SyncJournal daemon for journal " + jid);
+    }
+  }
+
+  private boolean getOtherJournalNodeProxies() {
+    List<InetSocketAddress> otherJournalNodes = getOtherJournalNodeAddrs();
+    if (otherJournalNodes == null || otherJournalNodes.isEmpty()) {
+      LOG.warn("Other JournalNode addresses not available. Journal Syncing " +
+          "cannot be done");
+      return false;
+    }
+    for (InetSocketAddress addr : otherJournalNodes) {
+      try {
+        otherJNProxies.add(new JournalNodeProxy(addr));
+      } catch (IOException e) {
+        LOG.warn("Could not add proxy for Journal at addresss " + addr, e);
+      }
+    }
+    if (otherJNProxies.isEmpty()) {
+      LOG.error("Cannot sync as there is no other JN available for sync.");
+      return false;
+    }
+    numOtherJNs = otherJNProxies.size();
+    return true;
+  }
+
+  private void startSyncJournalsDaemon() {
+    syncJournalDaemon = new Daemon(new Runnable() {
+      @Override
+      public void run() {
+        while(shouldSync) {
+          try {
+            if (!journal.isFormatted()) {
+              LOG.warn("Journal not formatted. Cannot sync.");
+            } else {
+              syncJournals();
+            }
+            Thread.sleep(journalSyncInterval);
+          } catch (Throwable t) {
+            if (!shouldSync) {
+              if (t instanceof InterruptedException) {
+                LOG.info("Stopping JournalNode Sync.");
+              } else {
+                LOG.warn("JournalNodeSyncer received an exception while " +
+                    "shutting down.", t);
+              }
+              break;
+            } else {
+              if (t instanceof InterruptedException) {
+                LOG.warn("JournalNodeSyncer interrupted", t);
+                break;
+              }
+            }
+            LOG.error(
+                "JournalNodeSyncer daemon received Runtime exception. ", t);
+          }
+        }
+      }
+    });
+    syncJournalDaemon.start();
+  }
+
+  private void syncJournals() {
+    syncWithJournalAtIndex(journalNodeIndexForSync);
+    journalNodeIndexForSync = (journalNodeIndexForSync + 1) % numOtherJNs;
+  }
+
+  private void syncWithJournalAtIndex(int index) {
+    LOG.info("Syncing Journal " + jn.getBoundIpcAddress().getAddress() + ":"
+        + jn.getBoundIpcAddress().getPort() + " with "
+        + otherJNProxies.get(index) + ", journal id: " + jid);
+    final QJournalProtocolPB jnProxy = otherJNProxies.get(index).jnProxy;
+    if (jnProxy == null) {
+      LOG.error("JournalNode Proxy not found.");
+      return;
+    }
+
+    List<RemoteEditLog> thisJournalEditLogs;
+    try {
+      thisJournalEditLogs = journal.getEditLogManifest(0, false).getLogs();
+    } catch (IOException e) {
+      LOG.error("Exception in getting local edit log manifest", e);
+      return;
+    }
+
+    GetEditLogManifestResponseProto editLogManifest;
+    try {
+      editLogManifest = jnProxy.getEditLogManifest(null,
+          GetEditLogManifestRequestProto.newBuilder().setJid(jidProto)
+              .setSinceTxId(0)
+              .setInProgressOk(false).build());
+    } catch (ServiceException e) {
+      LOG.error("Could not sync with Journal at " +
+          otherJNProxies.get(journalNodeIndexForSync), e);
+      return;
+    }
+
+    getMissingLogSegments(thisJournalEditLogs, editLogManifest,
+        otherJNProxies.get(index));
+  }
+
+  private List<InetSocketAddress> getOtherJournalNodeAddrs() {
+    URI uri = null;
+    try {
+      String uriStr = conf.get(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
+      if (uriStr == null || uriStr.isEmpty()) {
+        LOG.warn("Could not construct Shared Edits Uri");
+        return null;
+      }
+      uri = new URI(uriStr);
+      return Util.getLoggerAddresses(uri,
+          Sets.newHashSet(jn.getBoundIpcAddress()));
+    } catch (URISyntaxException e) {
+      LOG.error("The conf property " + DFSConfigKeys
+          .DFS_NAMENODE_SHARED_EDITS_DIR_KEY + " not set properly.");
+    } catch (IOException e) {
+      LOG.error("Could not parse JournalNode addresses: " + uri);
+    }
+    return null;
+  }
+
+  private JournalIdProto convertJournalId(String journalId) {
+    return QJournalProtocolProtos.JournalIdProto.newBuilder()
+      .setIdentifier(journalId)
+      .build();
+  }
+
+  private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,
+      GetEditLogManifestResponseProto response,
+      JournalNodeProxy remoteJNproxy) {
+
+    List<RemoteEditLog> otherJournalEditLogs = PBHelper.convert(
+        response.getManifest()).getLogs();
+    if (otherJournalEditLogs == null || otherJournalEditLogs.isEmpty()) {
+      LOG.warn("Journal at " + remoteJNproxy.jnAddr + " has no edit logs");
+      return;
+    }
+    List<RemoteEditLog> missingLogs = getMissingLogList(thisJournalEditLogs,
+        otherJournalEditLogs);
+
+    if (!missingLogs.isEmpty()) {
+      NamespaceInfo nsInfo = jnStorage.getNamespaceInfo();
+
+      for (RemoteEditLog missingLog : missingLogs) {
+        URL url = null;
+        boolean success = false;
+        try {
+          if (remoteJNproxy.httpServerUrl == null) {
+            if (response.hasFromURL()) {
+              URI uri = URI.create(response.getFromURL());
+              remoteJNproxy.httpServerUrl = getHttpServerURI(uri.getScheme(),
+                  uri.getHost(), uri.getPort());
+            } else {
+              remoteJNproxy.httpServerUrl = getHttpServerURI("http",
+                  remoteJNproxy.jnAddr.getHostName(), response.getHttpPort());
+            }
+          }
+
+          String urlPath = GetJournalEditServlet.buildPath(jid, missingLog
+              .getStartTxId(), nsInfo);
+          url = new URL(remoteJNproxy.httpServerUrl, urlPath);
+          success = downloadMissingLogSegment(url, missingLog);
+        } catch (MalformedURLException e) {
+          LOG.error("MalformedURL when download missing log segment", e);
+        } catch (Exception e) {
+          LOG.error("Exception in downloading missing log segment from url " +
+              url, e);
+        }
+        if (!success) {
+          LOG.error("Aborting current sync attempt.");
+          break;
+        }
+      }
+    }
+  }
+
+  /**
+   *  Returns the logs present in otherJournalEditLogs and missing from
+   *  thisJournalEditLogs.
+   */
+  private List<RemoteEditLog> getMissingLogList(
+      List<RemoteEditLog> thisJournalEditLogs,
+      List<RemoteEditLog> otherJournalEditLogs) {
+    if (thisJournalEditLogs.isEmpty()) {
+      return otherJournalEditLogs;
+    }
+
+    List<RemoteEditLog> missingEditLogs = Lists.newArrayList();
+
+    int thisJnIndex = 0, otherJnIndex = 0;
+    int thisJnNumLogs = thisJournalEditLogs.size();
+    int otherJnNumLogs = otherJournalEditLogs.size();
+
+    while (thisJnIndex < thisJnNumLogs && otherJnIndex < otherJnNumLogs) {
+      long localJNstartTxId = thisJournalEditLogs.get(thisJnIndex)
+          .getStartTxId();
+      long remoteJNstartTxId = otherJournalEditLogs.get(otherJnIndex)
+          .getStartTxId();
+
+      if (localJNstartTxId == remoteJNstartTxId) {
+        thisJnIndex++;
+        otherJnIndex++;
+      } else if (localJNstartTxId > remoteJNstartTxId) {
+        missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
+        otherJnIndex++;
+      } else {
+        thisJnIndex++;
+      }
+    }
+
+    if (otherJnIndex < otherJnNumLogs) {
+      for (; otherJnIndex < otherJnNumLogs; otherJnIndex++) {
+        missingEditLogs.add(otherJournalEditLogs.get(otherJnIndex));
+      }
+    }
+
+    return missingEditLogs;
+  }
+
+  private URL getHttpServerURI(String scheme, String hostname, int port)
+    throws MalformedURLException {
+    return new URL(scheme, hostname, port, "");
+  }
+
+  /**
+   * Transfer an edit log from one journal node to another for sync-up.
+   */
+  private boolean downloadMissingLogSegment(URL url, RemoteEditLog log) throws
+      IOException {
+    LOG.info("Downloading missing Edit Log from " + url + " to " + jnStorage
+        .getRoot());
+
+    assert log.getStartTxId() > 0 && log.getEndTxId() > 0 : "bad log: " + log;
+    File finalEditsFile = jnStorage.getFinalizedEditsFile(log.getStartTxId(),
+        log.getEndTxId());
+
+    if (finalEditsFile.exists() && FileUtil.canRead(finalEditsFile)) {
+      LOG.info("Skipping download of remote edit log " + log + " since it's" +
+          " already stored locally at " + finalEditsFile);
+      return true;
+    }
+
+    final long milliTime = Time.monotonicNow();
+    File tmpEditsFile = jnStorage.getTemporaryEditsFile(log.getStartTxId(), log
+        .getEndTxId(), milliTime);
+    try {
+      Util.doGetUrl(url, ImmutableList.of(tmpEditsFile), jnStorage, false,
+          logSegmentTransferTimeout, throttler);
+    } catch (IOException e) {
+      LOG.error("Download of Edit Log file for Syncing failed. Deleting temp " +
+          "file: " + tmpEditsFile);
+      if (!tmpEditsFile.delete()) {
+        LOG.warn("Deleting " + tmpEditsFile + " has failed");
+      }
+      return false;
+    }
+    LOG.info("Downloaded file " + tmpEditsFile.getName() + " of size " +
+        tmpEditsFile.length() + " bytes.");
+
+    LOG.debug("Renaming " + tmpEditsFile.getName() + " to "
+        + finalEditsFile.getName());
+    boolean renameSuccess = journal.renameTmpSegment(tmpEditsFile,
+        finalEditsFile, log.getEndTxId());
+    if (!renameSuccess) {
+      //If rename is not successful, delete the tmpFile
+      LOG.debug("Renaming unsuccessful. Deleting temporary file: "
+          + tmpEditsFile);
+      if (!tmpEditsFile.delete()) {
+        LOG.warn("Deleting " + tmpEditsFile + " has failed");
+      }
+      return false;
+    }
+    return true;
+  }
+
+  private static DataTransferThrottler getThrottler(Configuration conf) {
+    long transferBandwidth =
+        conf.getLong(DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_RATE_KEY,
+            DFSConfigKeys.DFS_EDIT_LOG_TRANSFER_RATE_DEFAULT);
+    DataTransferThrottler throttler = null;
+    if (transferBandwidth > 0) {
+      throttler = new DataTransferThrottler(transferBandwidth);
+    }
+    return throttler;
+  }
+
+  private class JournalNodeProxy {
+    private final InetSocketAddress jnAddr;
+    private final QJournalProtocolPB jnProxy;
+    private URL httpServerUrl;
+
+    JournalNodeProxy(InetSocketAddress jnAddr) throws IOException {
+      this.jnAddr = jnAddr;
+      this.jnProxy = RPC.getProxy(QJournalProtocolPB.class,
+          RPC.getProtocolVersion(QJournalProtocolPB.class), jnAddr, conf);
+    }
+
+    @Override
+    public String toString() {
+      return jnAddr.toString();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 1af7877..4493772 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.ToolRunner;
@@ -1010,6 +1011,14 @@ public abstract class Storage extends StorageInfo {
     return false;
   }
 
+  public NamespaceInfo getNamespaceInfo() {
+    return new NamespaceInfo(
+        getNamespaceID(),
+        getClusterID(),
+        null,
+        getCTime());
+  }
+
   /**
    * Return true if the layout of the given storage directory is from a version
    * of Hadoop prior to the introduction of the "current" and "previous"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
index f08c3fa..9c67f0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java
@@ -22,9 +22,11 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
+import java.net.UnknownHostException;
 import java.security.DigestInputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
@@ -32,18 +34,23 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.server.namenode.ImageServlet;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 
@@ -143,7 +150,8 @@ public final class Util {
    * storage.
    */
   public static MD5Hash doGetUrl(URL url, List<File> localPaths,
-      Storage dstStorage, boolean getChecksum, int timeout) throws IOException {
+      Storage dstStorage, boolean getChecksum, int timeout,
+      DataTransferThrottler throttler) throws IOException {
     HttpURLConnection connection;
     try {
       connection = (HttpURLConnection)
@@ -176,7 +184,7 @@ public final class Util {
 
     return receiveFile(url.toExternalForm(), localPaths, dstStorage,
         getChecksum, advertisedSize, advertisedDigest, fsImageName, stream,
-        null);
+        throttler);
   }
 
   /**
@@ -268,7 +276,7 @@ public final class Util {
       long xferKb = received / 1024;
       xferCombined += xferSec;
       xferStats.append(
-          String.format(" The fsimage download took %.2fs at %.2f KB/s.",
+          String.format(" The file download took %.2fs at %.2f KB/s.",
               xferSec, xferKb / xferSec));
     } finally {
       stream.close();
@@ -301,7 +309,7 @@ public final class Util {
             advertisedSize);
       }
     }
-    xferStats.insert(0, String.format("Combined time for fsimage download and" +
+    xferStats.insert(0, String.format("Combined time for file download and" +
         " fsync to all disks took %.2fs.", xferCombined));
     LOG.info(xferStats.toString());
 
@@ -350,4 +358,34 @@ public final class Util {
     String header = connection.getHeaderField(MD5_HEADER);
     return (header != null) ? new MD5Hash(header) : null;
   }
+
+  public static List<InetSocketAddress> getAddressesList(URI uri)
+      throws IOException{
+    String authority = uri.getAuthority();
+    Preconditions.checkArgument(authority != null && !authority.isEmpty(),
+        "URI has no authority: " + uri);
+
+    String[] parts = StringUtils.split(authority, ';');
+    for (int i = 0; i < parts.length; i++) {
+      parts[i] = parts[i].trim();
+    }
+
+    List<InetSocketAddress> addrs = Lists.newArrayList();
+    for (String addr : parts) {
+      InetSocketAddress isa = NetUtils.createSocketAddr(
+          addr, DFSConfigKeys.DFS_JOURNALNODE_RPC_PORT_DEFAULT);
+      if (isa.isUnresolved()) {
+        throw new UnknownHostException(addr);
+      }
+      addrs.add(isa);
+    }
+    return addrs;
+  }
+
+  public static List<InetSocketAddress> getLoggerAddresses(URI uri,
+      Set<InetSocketAddress> addrsToExclude) throws IOException {
+    List<InetSocketAddress> addrsList = getAddressesList(uri);
+    addrsList.removeAll(addrsToExclude);
+    return addrsList;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index c79ba4a..63d1a28 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -763,13 +763,13 @@ public class NNStorage extends Storage implements Closeable,
     return new File(sd.getCurrentDir(), getInProgressEditsFileName(startTxId));
   }
   
-  static File getFinalizedEditsFile(StorageDirectory sd,
+  public static File getFinalizedEditsFile(StorageDirectory sd,
       long startTxId, long endTxId) {
     return new File(sd.getCurrentDir(),
         getFinalizedEditsFileName(startTxId, endTxId));
   }
 
-  static File getTemporaryEditsFile(StorageDirectory sd,
+  public static File getTemporaryEditsFile(StorageDirectory sd,
       long startTxId, long endTxId, long timestamp) {
     return new File(sd.getCurrentDir(),
         getTemporaryEditsFileName(startTxId, endTxId, timestamp));
@@ -1106,6 +1106,7 @@ public class NNStorage extends Storage implements Closeable,
     return inspector;
   }
 
+  @Override
   public NamespaceInfo getNamespaceInfo() {
     return new NamespaceInfo(
         getNamespaceID(),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index 5821353..7316414 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -401,7 +401,8 @@ public class TransferFsImage {
   
   public static MD5Hash doGetUrl(URL url, List<File> localPaths,
       Storage dstStorage, boolean getChecksum) throws IOException {
-    return Util.doGetUrl(url, localPaths, dstStorage, getChecksum, timeout);
+    return Util.doGetUrl(url, localPaths, dstStorage, getChecksum, timeout,
+        null);
   }
 
   private static MD5Hash parseMD5Header(HttpServletRequest request) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 03f1a08..652b216 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1279,6 +1279,26 @@
 </property>
 
 <property>
+  <name>dfs.edit.log.transfer.timeout</name>
+  <value>30000</value>
+  <description>
+    Socket timeout for edit log transfer in milliseconds. This timeout
+    should be configured such that normal edit log transfer for journal
+    node syncing can complete successfully.
+  </description>
+</property>
+
+<property>
+  <name>dfs.edit.log.transfer.bandwidthPerSec</name>
+  <value>0</value>
+  <description>
+    Maximum bandwidth used for transferring edit log to between journal nodes
+    for syncing, in bytes per second.
+    A default value of 0 indicates that throttling is disabled.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.support.allow.format</name>
   <value>true</value>
   <description>Does HDFS namenode allow itself to be formatted?
@@ -3785,6 +3805,27 @@
 </property>
 
 <property>
+  <name>dfs.journalnode.enable.sync</name>
+  <value>true</value>
+  <description>
+    If true, the journal nodes wil sync with each other. The journal nodes
+    will periodically gossip with other journal nodes to compare edit log
+    manifests and if they detect any missing log segment, they will download
+    it from the other journal nodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.journalnode.sync.interval</name>
+  <value>120000</value>
+  <description>
+    Time interval, in milliseconds, between two Journal Node syncs.
+    This configuration takes effect only if the journalnode sync is enabled
+    by setting the configuration parameter dfs.journalnode.enable.sync to true.
+  </description>
+</property>
+
+<property>
   <name>dfs.journalnode.kerberos.internal.spnego.principal</name>
   <value></value>
   <description>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
index 7b974c3..2314e22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java
@@ -255,4 +255,12 @@ public class MiniJournalCluster {
       }
     }
   }
+
+  public void setNamenodeSharedEditsConf(String jid) {
+    URI quorumJournalURI = getQuorumJournalURI(jid);
+    for (int i = 0; i < nodes.length; i++) {
+      nodes[i].node.getConf().set(DFSConfigKeys
+          .DFS_NAMENODE_SHARED_EDITS_DIR_KEY, quorumJournalURI.toString());
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
index 0764f12..c163894 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniQJMHACluster.java
@@ -101,6 +101,7 @@ public class MiniQJMHACluster {
         journalCluster = new MiniJournalCluster.Builder(conf).format(true)
             .build();
         journalCluster.waitActive();
+        journalCluster.setNamenodeSharedEditsConf(NAMESERVICE);
         URI journalURI = journalCluster.getQuorumJournalURI(NAMESERVICE);
 
         // start cluster with specified NameNodes

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d4bcfe/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
new file mode 100644
index 0000000..5375b02
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestJournalNodeSync.java
@@ -0,0 +1,264 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal;
+
+import com.google.common.base.Supplier;
+import com.google.common.collect.Lists;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
+import static org.apache.hadoop.hdfs.server.namenode.FileJournalManager
+    .getLogFile;
+
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Random;
+
+/**
+ * Unit test for Journal Node formatting upon re-installation and syncing.
+ */
+public class TestJournalNodeSync {
+  private MiniQJMHACluster qjmhaCluster;
+  private MiniDFSCluster dfsCluster;
+  private MiniJournalCluster jCluster;
+  private FileSystem fs;
+  private FSNamesystem namesystem;
+  private int editsPerformed = 0;
+  private final String jid = "ns1";
+
+  @Before
+  public void setUpMiniCluster() throws IOException {
+    final Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_JOURNALNODE_SYNC_INTERVAL_KEY, 1000L);
+    qjmhaCluster = new MiniQJMHACluster.Builder(conf).setNumNameNodes(2)
+      .build();
+    dfsCluster = qjmhaCluster.getDfsCluster();
+    jCluster = qjmhaCluster.getJournalCluster();
+
+    dfsCluster.transitionToActive(0);
+    fs = dfsCluster.getFileSystem(0);
+    namesystem = dfsCluster.getNamesystem(0);
+  }
+
+  @After
+  public void shutDownMiniCluster() throws IOException {
+    if (qjmhaCluster != null) {
+      qjmhaCluster.shutdown();
+    }
+  }
+
+  @Test(timeout=30000)
+  public void testJournalNodeSync() throws Exception {
+    File firstJournalDir = jCluster.getJournalDir(0, jid);
+    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
+        .getCurrentDir();
+
+    // Generate some edit logs and delete one.
+    long firstTxId = generateEditLog();
+    generateEditLog();
+
+    File missingLog = deleteEditLog(firstJournalCurrentDir, firstTxId);
+
+    GenericTestUtils.waitFor(editLogExists(Lists.newArrayList(missingLog)),
+        500, 10000);
+  }
+
+  @Test(timeout=30000)
+  public void testSyncForMultipleMissingLogs() throws Exception {
+    File firstJournalDir = jCluster.getJournalDir(0, jid);
+    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
+        .getCurrentDir();
+
+    // Generate some edit logs and delete two.
+    long firstTxId = generateEditLog();
+    long nextTxId = generateEditLog();
+
+    List<File> missingLogs = Lists.newArrayList();
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
+  }
+
+  @Test(timeout=30000)
+  public void testSyncForDiscontinuousMissingLogs() throws Exception {
+    File firstJournalDir = jCluster.getJournalDir(0, jid);
+    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
+        .getCurrentDir();
+
+    // Generate some edit logs and delete two discontinuous logs.
+    long firstTxId = generateEditLog();
+    generateEditLog();
+    long nextTxId = generateEditLog();
+
+    List<File> missingLogs = Lists.newArrayList();
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, nextTxId));
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
+  }
+
+  @Test(timeout=30000)
+  public void testMultipleJournalsMissingLogs() throws Exception {
+    File firstJournalDir = jCluster.getJournalDir(0, jid);
+    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
+        .getCurrentDir();
+
+    File secondJournalDir = jCluster.getJournalDir(1, jid);
+    StorageDirectory sd = new StorageDirectory(secondJournalDir);
+    File secondJournalCurrentDir = sd.getCurrentDir();
+
+    // Generate some edit logs and delete one log from two journals.
+    long firstTxId = generateEditLog();
+    generateEditLog();
+
+    List<File> missingLogs = Lists.newArrayList();
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
+    missingLogs.add(deleteEditLog(secondJournalCurrentDir, firstTxId));
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 10000);
+  }
+
+  @Test(timeout=60000)
+  public void testMultipleJournalsMultipleMissingLogs() throws Exception {
+    File firstJournalDir = jCluster.getJournalDir(0, jid);
+    File firstJournalCurrentDir = new StorageDirectory(firstJournalDir)
+        .getCurrentDir();
+
+    File secondJournalDir = jCluster.getJournalDir(1, jid);
+    File secondJournalCurrentDir = new StorageDirectory(secondJournalDir)
+        .getCurrentDir();
+
+    File thirdJournalDir = jCluster.getJournalDir(2, jid);
+    File thirdJournalCurrentDir = new StorageDirectory(thirdJournalDir)
+        .getCurrentDir();
+
+    // Generate some edit logs and delete multiple logs in multiple journals.
+    long firstTxId = generateEditLog();
+    long secondTxId = generateEditLog();
+    long thirdTxId = generateEditLog();
+
+    List<File> missingLogs = Lists.newArrayList();
+    missingLogs.add(deleteEditLog(firstJournalCurrentDir, firstTxId));
+    missingLogs.add(deleteEditLog(secondJournalCurrentDir, firstTxId));
+    missingLogs.add(deleteEditLog(secondJournalCurrentDir, secondTxId));
+    missingLogs.add(deleteEditLog(thirdJournalCurrentDir, thirdTxId));
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+  }
+
+  // Test JournalNode Sync by randomly deleting edit logs from one or two of
+  // the journals.
+  @Test(timeout=60000)
+  public void testRandomJournalMissingLogs() throws Exception {
+    Random randomJournal = new Random();
+
+    List<File> journalCurrentDirs = Lists.newArrayList();
+
+    for (int i = 0; i < 3; i++) {
+      journalCurrentDirs.add(new StorageDirectory(jCluster.getJournalDir(i,
+          jid)).getCurrentDir());
+    }
+
+    int count = 0;
+    long lastStartTxId;
+    int journalIndex;
+    List<File> missingLogs = Lists.newArrayList();
+    while (count < 5) {
+      lastStartTxId = generateEditLog();
+
+      // Delete the last edit log segment from randomly selected journal node
+      journalIndex = randomJournal.nextInt(3);
+      missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
+          lastStartTxId));
+
+      // Delete the last edit log segment from two journals for some logs
+      if (count % 2 == 0) {
+        journalIndex = (journalIndex + 1) % 3;
+        missingLogs.add(deleteEditLog(journalCurrentDirs.get(journalIndex),
+            lastStartTxId));
+      }
+
+      count++;
+    }
+
+    GenericTestUtils.waitFor(editLogExists(missingLogs), 500, 30000);
+  }
+
+  private File deleteEditLog(File currentDir, long startTxId)
+      throws IOException {
+    EditLogFile logFile = getLogFile(currentDir, startTxId);
+    while (logFile.isInProgress()) {
+      dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
+      logFile = getLogFile(currentDir, startTxId);
+    }
+    File deleteFile = logFile.getFile();
+    Assert.assertTrue("Couldn't delete edit log file", deleteFile.delete());
+
+    return deleteFile;
+  }
+
+  /**
+   * Do a mutative metadata operation on the file system.
+   *
+   * @return true if the operation was successful, false otherwise.
+   */
+  private boolean doAnEdit() throws IOException {
+    return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
+  }
+
+  /**
+   * Does an edit and rolls the Edit Log.
+   *
+   * @return the startTxId of next segment after rolling edits.
+   */
+  private long generateEditLog() throws IOException {
+    long startTxId = namesystem.getFSImage().getEditLog().getLastWrittenTxId();
+    Assert.assertTrue("Failed to do an edit", doAnEdit());
+    dfsCluster.getNameNode(0).getRpcServer().rollEditLog();
+    return startTxId;
+  }
+
+  private Supplier<Boolean> editLogExists(List<File> editLogs) {
+    Supplier<Boolean> supplier = new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        for (File editLog : editLogs) {
+          if (!editLog.exists()) {
+            return false;
+          }
+        }
+        return true;
+      }
+    };
+    return supplier;
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org