You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/04/10 06:24:57 UTC

[01/47] hadoop git commit: YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error message:Invalid AMRMToken (zxu via rkanter)

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 68c6232f8 -> 1581b37a5


YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error message:Invalid AMRMToken (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64e5adc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64e5adc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64e5adc5

Branch: refs/heads/YARN-2928
Commit: 64e5adc59982413084b42e1326f96f0b8c3a5a3b
Parents: 68c6232
Author: Robert Kanter <rk...@apache.org>
Authored: Mon Apr 6 14:11:20 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:55 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                | 3 +++
 .../yarn/server/resourcemanager/security/TestAMRMTokens.java   | 6 ++++++
 2 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64e5adc5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 59a9165..c559591 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -207,6 +207,9 @@ Release 2.8.0 - UNRELEASED
     YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
     (Zhihai Xu via ozawa)
 
+    YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+    error message:Invalid AMRMToken (zxu via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64e5adc5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
index 0be72e3..5dfd092 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
@@ -107,6 +107,12 @@ public class TestAMRMTokens {
   @SuppressWarnings("unchecked")
   @Test
   public void testTokenExpiry() throws Exception {
+    conf.setLong(
+        YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
+        YarnConfiguration.
+            DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS);
+    conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,
+        YarnConfiguration.DEFAULT_RM_AM_EXPIRY_INTERVAL_MS);
 
     MyContainerManager containerManager = new MyContainerManager();
     final MockRMWithAMS rm =


[14/47] hadoop git commit: HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)

Posted by zj...@apache.org.
HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/49373365
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/49373365
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/49373365

Branch: refs/heads/YARN-2928
Commit: 493733658ed1be073638afa13b9a67c5b4306d10
Parents: 41b7a26
Author: Vinayakumar B <vi...@apache.org>
Authored: Wed Apr 8 09:56:37 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../BlockPlacementPolicyDefault.java            | 87 ++++++++++++--------
 2 files changed, 54 insertions(+), 36 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/49373365/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f194bd7..ac508cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -385,6 +385,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
     HdfsClientConfigKeys.Retry.  (szetszwo)
 
+    HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
+    can be easily overrided. (Walter Su via vinayakumarb)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/49373365/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3262772..09db986 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
             + " unavailableStorages=" + unavailableStorages
             + ", storagePolicy=" + storagePolicy);
       }
-
-      if (numOfResults == 0) {
-        writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-            maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-                .getDatanodeDescriptor();
-        if (--numOfReplicas == 0) {
-          return writer;
-        }
-      }
-      final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
-      if (numOfResults <= 1) {
-        chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-            results, avoidStaleNodes, storageTypes);
-        if (--numOfReplicas == 0) {
-          return writer;
-        }
-      }
-      if (numOfResults <= 2) {
-        final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
-        if (clusterMap.isOnSameRack(dn0, dn1)) {
-          chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-              results, avoidStaleNodes, storageTypes);
-        } else if (newBlock){
-          chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
-              results, avoidStaleNodes, storageTypes);
-        } else {
-          chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
-              results, avoidStaleNodes, storageTypes);
-        }
-        if (--numOfReplicas == 0) {
-          return writer;
-        }
-      }
-      chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-          maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+      writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, blocksize,
+          maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
     } catch (NotEnoughReplicasException e) {
       final String message = "Failed to place enough replicas, still in need of "
           + (totalReplicasExpected - results.size()) + " to reach "
@@ -422,7 +389,55 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
     return writer;
   }
-    
+
+  protected Node chooseTargetInOrder(int numOfReplicas, 
+                                 Node writer,
+                                 final Set<Node> excludedNodes,
+                                 final long blocksize,
+                                 final int maxNodesPerRack,
+                                 final List<DatanodeStorageInfo> results,
+                                 final boolean avoidStaleNodes,
+                                 final boolean newBlock,
+                                 EnumMap<StorageType, Integer> storageTypes)
+                                 throws NotEnoughReplicasException {
+    final int numOfResults = results.size();
+    if (numOfResults == 0) {
+      writer = chooseLocalStorage(writer, excludedNodes, blocksize,
+          maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
+          .getDatanodeDescriptor();
+      if (--numOfReplicas == 0) {
+        return writer;
+      }
+    }
+    final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
+    if (numOfResults <= 1) {
+      chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
+          results, avoidStaleNodes, storageTypes);
+      if (--numOfReplicas == 0) {
+        return writer;
+      }
+    }
+    if (numOfResults <= 2) {
+      final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
+      if (clusterMap.isOnSameRack(dn0, dn1)) {
+        chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
+            results, avoidStaleNodes, storageTypes);
+      } else if (newBlock){
+        chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
+            results, avoidStaleNodes, storageTypes);
+      } else {
+        chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
+            results, avoidStaleNodes, storageTypes);
+      }
+      if (--numOfReplicas == 0) {
+        return writer;
+      }
+    }
+    chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
+        maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+    return writer;
+  }
+  
   /**
    * Choose <i>localMachine</i> as the target.
    * if <i>localMachine</i> is not available, 


[05/47] hadoop git commit: Move YARN-3273 from 2.8 to 2.7.

Posted by zj...@apache.org.
Move YARN-3273 from 2.8 to 2.7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ed6b71a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ed6b71a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ed6b71a

Branch: refs/heads/YARN-2928
Commit: 5ed6b71aa6e26034cd6eed8c6469a76dc5369a51
Parents: 64e5adc
Author: Zhijie Shen <zj...@apache.org>
Authored: Mon Apr 6 12:28:31 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ed6b71a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c559591..1142baf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -127,9 +127,6 @@ Release 2.8.0 - UNRELEASED
     YARN-3243. CapacityScheduler should pass headroom from parent to children
     to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 
-    YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
-    debugging. (Rohith Sharmaks via jianhe)
-
     YARN-3357. Move TestFifoScheduler to FIFO package. (Rohith Sharmaks 
     via devaraj)
 
@@ -541,6 +538,9 @@ Release 2.7.0 - UNRELEASED
     YARN-2777. Mark the end of individual log in aggregated log.
     (Varun Saxena via xgong)
 
+    YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
+    debugging. (Rohith Sharmaks via jianhe)
+
   OPTIMIZATIONS
 
     YARN-2990. FairScheduler's delay-scheduling always waits for node-local and 


[18/47] hadoop git commit: HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.

Posted by zj...@apache.org.
HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c7c6081
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c7c6081
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c7c6081

Branch: refs/heads/YARN-2928
Commit: 1c7c60817bc2041dfed56cb3ddb56e93689f0e49
Parents: 79e0de5
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Apr 8 15:38:29 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt               |  2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 +++-
 .../server/namenode/ContentSummaryComputationContext.java | 10 +++++++---
 .../hdfs/server/namenode/FSDirStatAndListingOp.java       |  2 +-
 .../apache/hadoop/hdfs/server/namenode/FSDirectory.java   |  8 ++++++++
 5 files changed, 21 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7c6081/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c983849..35e9d54 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -394,6 +394,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
     (szetszwo)
 
+    HDFS-8046. Allow better control of getContentSummary (kihwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7c6081/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 6be61f6..869df32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -199,7 +199,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit";
-  public static final int     DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
+  public static final int     DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
+  public static final String  DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = "dfs.content-summary.sleep-microsec";
+  public static final long    DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";
   public static final int     DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7c6081/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 31f34b9..5739835 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -32,6 +32,8 @@ public class ContentSummaryComputationContext {
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
+  private long sleepMilliSec = 0;
+  private int sleepNanoSec = 0;
 
   /**
    * Constructor
@@ -43,17 +45,19 @@ public class ContentSummaryComputationContext {
    *        no limit (i.e. no yielding)
    */
   public ContentSummaryComputationContext(FSDirectory dir,
-      FSNamesystem fsn, long limitPerRun) {
+      FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
     this.dir = dir;
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
     this.counts = new ContentCounts.Builder().build();
+    this.sleepMilliSec = sleepMicroSec/1000;
+    this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
 
   /** Constructor for blocking computation. */
   public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) {
-    this(null, null, 0);
+    this(null, null, 0, 1000);
     this.bsps = bsps;
   }
 
@@ -105,7 +109,7 @@ public class ContentSummaryComputationContext {
     fsn.readUnlock();
 
     try {
-      Thread.sleep(1);
+      Thread.sleep(sleepMilliSec, sleepNanoSec);
     } catch (InterruptedException ie) {
     } finally {
       // reacquire

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7c6081/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 43c2de3..850b3bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -496,7 +496,7 @@ class FSDirStatAndListingOp {
         // processed. 0 means disabled. I.e. blocking for the entire duration.
         ContentSummaryComputationContext cscc =
             new ContentSummaryComputationContext(fsd, fsd.getFSNamesystem(),
-                fsd.getContentCountLimit());
+                fsd.getContentCountLimit(), fsd.getContentSleepMicroSec());
         ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
         fsd.addYieldCount(cscc.getYieldCount());
         return cs;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c7c6081/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 7eea343..966cf3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -136,6 +136,7 @@ public class FSDirectory implements Closeable {
   private final int maxDirItems;
   private final int lsLimit;  // max list limit
   private final int contentCountLimit; // max content summary counts per run
+  private final long contentSleepMicroSec;
   private final INodeMap inodeMap; // Synchronized by dirLock
   private long yieldCount = 0; // keep track of lock yield count.
 
@@ -264,6 +265,9 @@ public class FSDirectory implements Closeable {
     this.contentCountLimit = conf.getInt(
         DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
         DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);
+    this.contentSleepMicroSec = conf.getLong(
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY,
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT);
     
     // filesystem limits
     this.maxComponentLength = conf.getInt(
@@ -345,6 +349,10 @@ public class FSDirectory implements Closeable {
     return contentCountLimit;
   }
 
+  long getContentSleepMicroSec() {
+    return contentSleepMicroSec;
+  }
+
   int getInodeXAttrsLimit() {
     return inodeXAttrsLimit;
   }


[22/47] hadoop git commit: YARN-2901 addendum: Fixed findbugs warning caused by previously patch

Posted by zj...@apache.org.
YARN-2901 addendum: Fixed findbugs warning caused by previously patch


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8812777
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8812777
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8812777

Branch: refs/heads/YARN-2928
Commit: c88127771dc0ed112838e042c496b507355d6e45
Parents: e8098c1
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Apr 8 11:02:06 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 .../hadoop-yarn/dev-support/findbugs-exclude.xml         | 11 ++++++++++-
 1 file changed, 10 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8812777/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
index 943ecb0..375d19c 100644
--- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml
@@ -461,5 +461,14 @@
     <Method name="recoverContainersOnNode" />
     <Bug pattern="RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE" />
   </Match>
-
+  
+  <!-- Following fields are used in ErrorsAndWarningsBlock, which is not a part of analysis of findbugs -->
+  <Match>
+    <Class name="org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender$Element" />
+    <Or>
+      <Field name="count" />
+      <Field name="timestampSeconds" />
+    </Or>
+    <Bug pattern="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD" />
+  </Match>
 </FindBugsFilter>


[44/47] hadoop git commit: Fix CHANGES.txt for HDFS-8091

Posted by zj...@apache.org.
Fix CHANGES.txt for HDFS-8091


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed83be68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed83be68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed83be68

Branch: refs/heads/YARN-2928
Commit: ed83be685581581aaf031b3d24936a2b1cd2ac33
Parents: 1767116
Author: Arun Suresh <as...@apache.org>
Authored: Thu Apr 9 13:51:00 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:56 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed83be68/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index df6d90a..979534e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -153,9 +153,6 @@ Trunk (Unreleased)
 
   BUG FIXES
  
-    HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider
-               before returning to client (asuresh)
-
     HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
                 via cmccabe)
 
@@ -463,6 +460,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
     even for failed transfers (vinayakumarb)
 
+    HDFS-8091: ACLStatus and XAttributes should be presented to
+    INodeAttributesProvider before returning to client (asuresh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[20/47] hadoop git commit: HADOOP-11781. fix race conditions and add URL support to smart-apply-patch.sh (Raymie Stata via aw)

Posted by zj...@apache.org.
HADOOP-11781. fix race conditions and add URL support to smart-apply-patch.sh (Raymie Stata via aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6434b15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6434b15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6434b15

Branch: refs/heads/YARN-2928
Commit: b6434b15f92ad0d250d60e3580807bd8dc37e55b
Parents: 0a4a296
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Apr 8 10:05:25 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 dev-support/smart-apply-patch.sh                | 45 ++++++++++++++++----
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 2 files changed, 40 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6434b15/dev-support/smart-apply-patch.sh
----------------------------------------------------------------------
diff --git a/dev-support/smart-apply-patch.sh b/dev-support/smart-apply-patch.sh
index 03bc4f8..449fc22 100755
--- a/dev-support/smart-apply-patch.sh
+++ b/dev-support/smart-apply-patch.sh
@@ -11,8 +11,6 @@
 #   See the License for the specific language governing permissions and
 #   limitations under the License.
 
-set -e
-
 #
 # Determine if the patch file is a git diff file with prefixes.
 # These files are generated via "git diff" *without* the --no-prefix option.
@@ -54,6 +52,7 @@ if [ -z "$PATCH_FILE" ]; then
   exit 1
 fi
 
+TMPDIR=${TMPDIR:-/tmp}
 PATCH=${PATCH:-patch} # allow overriding patch binary
 
 # Cleanup handler for temporary files
@@ -66,11 +65,41 @@ trap "cleanup 1" HUP INT QUIT TERM
 
 # Allow passing "-" for stdin patches
 if [ "$PATCH_FILE" == "-" ]; then
-  PATCH_FILE=/tmp/tmp.in.$$
+  PATCH_FILE="$TMPDIR/smart-apply.in.$RANDOM"
   cat /dev/fd/0 > $PATCH_FILE
   TOCLEAN="$TOCLEAN $PATCH_FILE"
 fi
 
+ISSUE_RE='^(HADOOP|YARN|MAPREDUCE|HDFS)-[0-9]+$'
+if [[ ${PATCH_FILE} =~ ^http || ${PATCH_FILE} =~ ${ISSUE_RE} ]]; then
+  # Allow downloading of patches
+  PFILE="$TMPDIR/smart-apply.in.$RANDOM"
+  TOCLEAN="$TOCLEAN $PFILE"
+  if [[ ${PATCH_FILE} =~ ^http ]]; then
+    patchURL="${PATCH_FILE}"
+  else # Get URL of patch from JIRA
+    wget -q -O "${PFILE}" "http://issues.apache.org/jira/browse/${PATCH_FILE}"
+    if [[ $? != 0 ]]; then
+      echo "Unable to determine what ${PATCH_FILE} may reference." 1>&2
+      cleanup 1
+    elif [[ $(grep -c 'Patch Available' "${PFILE}") == 0 ]]; then
+      echo "${PATCH_FILE} is not \"Patch Available\".  Exiting." 1>&2
+      cleanup 1
+    fi
+    relativePatchURL=$(grep -o '"/jira/secure/attachment/[0-9]*/[^"]*' "${PFILE}" | grep -v -e 'htm[l]*$' | sort | tail -1 | grep -o '/jira/secure/attachment/[0-9]*/[^"]*')
+    patchURL="http://issues.apache.org${relativePatchURL}"
+  fi
+  if [[ -n $DRY_RUN ]]; then
+    echo "Downloading ${patchURL}"
+  fi
+  wget -q -O "${PFILE}" "${patchURL}"
+  if [[ $? != 0 ]]; then
+    echo "${PATCH_FILE} could not be downloaded." 1>&2
+    cleanup 1
+  fi
+  PATCH_FILE="${PFILE}"
+fi
+
 # Special case for git-diff patches without --no-prefix
 if is_git_diff_with_prefix "$PATCH_FILE"; then
   GIT_FLAGS="--binary -p1 -v"
@@ -85,7 +114,7 @@ if is_git_diff_with_prefix "$PATCH_FILE"; then
 fi
 
 # Come up with a list of changed files into $TMP
-TMP=/tmp/tmp.paths.$$
+TMP="$TMPDIR/smart-apply.paths.$RANDOM"
 TOCLEAN="$TOCLEAN $TMP"
 
 if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then
@@ -94,10 +123,10 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then
   # is adding new files and they would apply anywhere. So try to guess the
   # correct place to put those files.
 
-  TMP2=/tmp/tmp.paths.2.$$
+  TMP2="$TMPDIR/smart-apply.paths.2.$RANDOM"
   TOCLEAN="$TOCLEAN $TMP2"
 
-  egrep '^patching file |^checking file ' $TMP | awk '{print $3}' | grep -v /dev/null | sort | uniq > $TMP2
+  egrep '^patching file |^checking file ' $TMP | awk '{print $3}' | grep -v /dev/null | sort -u > $TMP2
 
   if [ ! -s $TMP2 ]; then
     echo "Error: Patch dryrun couldn't detect changes the patch would make. Exiting."
@@ -125,8 +154,8 @@ if $PATCH -p0 -E --dry-run < $PATCH_FILE 2>&1 > $TMP; then
       sed -i -e 's,^[ab]/,,' $TMP2
     fi
 
-    PREFIX_DIRS_AND_FILES=$(cut -d '/' -f 1 | sort | uniq)
-
+    PREFIX_DIRS_AND_FILES=$(cut -d '/' -f 1 $TMP2 | sort -u)
+ 
     # if we are at the project root then nothing more to do
     if [[ -d hadoop-common-project ]]; then
       echo Looks like this is being run at project root

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6434b15/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 412bad7..ce292b2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -194,6 +194,9 @@ Trunk (Unreleased)
     HADOOP-11524. hadoop_do_classpath_subcommand throws a shellcheck warning.
     (cnauroth)
 
+    HADOOP-11781. fix race conditions and add URL support to
+    smart-apply-patch.sh (Raymie Stata via aw)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1


[39/47] hadoop git commit: YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and node-label column. (Jason Lowe via wangda)

Posted by zj...@apache.org.
YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers and node-label column. (Jason Lowe via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90f14aea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90f14aea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90f14aea

Branch: refs/heads/YARN-2928
Commit: 90f14aea3d35be25b623c14e8f163cbe4657f80b
Parents: aa80ddd
Author: Wangda Tan <wa...@apache.org>
Authored: Thu Apr 9 10:35:12 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:55 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                   |  3 +++
 .../server/resourcemanager/webapp/NodesPage.java  | 18 +++++++++++-------
 2 files changed, 14 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90f14aea/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b339a7e..5e77b20 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -968,6 +968,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3430. Made headroom data available on app attempt page of RM WebUI.
     (Xuan Gong via zjshen)
 
+    YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
+    and node-label column (Jason Lowe via wangda)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90f14aea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 13e0835..a2bab0c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -65,14 +65,18 @@ class NodesPage extends RmView {
       String type = $(NODE_STATE);
       String labelFilter = $(NODE_LABEL, CommonNodeLabelsManager.ANY).trim();
       TBODY<TABLE<Hamlet>> tbody =
-          html.table("#nodes").thead().tr().th(".nodelabels", "Node Labels")
-              .th(".rack", "Rack").th(".state", "Node State")
+          html.table("#nodes").thead().tr()
+              .th(".nodelabels", "Node Labels")
+              .th(".rack", "Rack")
+              .th(".state", "Node State")
               .th(".nodeaddress", "Node Address")
               .th(".nodehttpaddress", "Node HTTP Address")
               .th(".lastHealthUpdate", "Last health-update")
               .th(".healthReport", "Health-report")
-              .th(".containers", "Containers").th(".mem", "Mem Used")
-              .th(".mem", "Mem Avail").th(".vcores", "VCores Used")
+              .th(".containers", "Containers")
+              .th(".mem", "Mem Used")
+              .th(".mem", "Mem Avail")
+              .th(".vcores", "VCores Used")
               .th(".vcores", "VCores Avail")
               .th(".nodeManagerVersion", "Version")._()._().tbody();
       NodeState stateFilter = null;
@@ -168,10 +172,10 @@ class NodesPage extends RmView {
 
   private String nodesTableInit() {
     StringBuilder b = tableInit().append(", aoColumnDefs: [");
-    b.append("{'bSearchable': false, 'aTargets': [ 6 ]}");
+    b.append("{'bSearchable': false, 'aTargets': [ 7 ]}");
     b.append(", {'sType': 'title-numeric', 'bSearchable': false, "
-        + "'aTargets': [ 7, 8 ] }");
-    b.append(", {'sType': 'title-numeric', 'aTargets': [ 4 ]}");
+        + "'aTargets': [ 8, 9 ] }");
+    b.append(", {'sType': 'title-numeric', 'aTargets': [ 5 ]}");
     b.append("]}");
     return b.toString();
   }


[45/47] hadoop git commit: MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL (rchiang via rkanter)

Posted by zj...@apache.org.
MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL (rchiang via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1767116b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1767116b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1767116b

Branch: refs/heads/YARN-2928
Commit: 1767116b4c51408d32ed45ae559a9a7827bff35f
Parents: 87bd06a
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Apr 9 13:48:14 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:56 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                             | 3 +++
 .../java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java     | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1767116b/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index c1eb6c3..b9a75e3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -324,6 +324,9 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-4844. Counters / AbstractCounters have constant references not
     declared final. (Brahma Reddy Battula via gera)
 
+    MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
+    (rchiang via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1767116b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
index 6df8261..0f1f391 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java
@@ -157,10 +157,10 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job
     String historyUrl = "N/A";
     try {
       historyUrl =
-          MRWebAppUtil.getApplicationWebURLOnJHSWithoutScheme(conf,
+          MRWebAppUtil.getApplicationWebURLOnJHSWithScheme(conf,
               jobId.getAppId());
     } catch (UnknownHostException e) {
-      //Ignore.
+        LOG.error("Problem determining local host: " + e.getMessage());
     }
     report.setTrackingUrl(historyUrl);
     report.setAMInfos(getAMInfos());


[10/47] hadoop git commit: HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. Contributed by Xiaoyu Yao.

Posted by zj...@apache.org.
HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5135143e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5135143e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5135143e

Branch: refs/heads/YARN-2928
Commit: 5135143e462f731e89f6d014d8a5a40347fec037
Parents: cd66d1b
Author: cnauroth <cn...@apache.org>
Authored: Tue Apr 7 14:47:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:57 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../java/org/apache/hadoop/security/TestShellBasedIdMapping.java  | 1 +
 2 files changed, 4 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5135143e/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5a8cda4..67050e7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1210,6 +1210,9 @@ Release 2.7.0 - UNRELEASED
     Ozawa via vinodkv)
 
     HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via vinodkv)
+
+    HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
+    (Xiaoyu Yao via cnauroth)
     
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5135143e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
index e6e1d73..3b533d2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
@@ -129,6 +129,7 @@ public class TestShellBasedIdMapping {
   // Test staticMap refreshing
   @Test
   public void testStaticMapUpdate() throws IOException {
+    assumeTrue(!Shell.WINDOWS);
     File tempStaticMapFile = File.createTempFile("nfs-", ".map");
     tempStaticMapFile.delete();
     Configuration conf = new Configuration();


[28/47] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
new file mode 100644
index 0000000..dede6a5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/** Http POST operation parameter. */
+public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
+  /** Put operations. */
+  public static enum Op implements HttpOpParam.Op {
+    CREATE(true, HttpURLConnection.HTTP_CREATED),
+
+    MKDIRS(false, HttpURLConnection.HTTP_OK),
+    CREATESYMLINK(false, HttpURLConnection.HTTP_OK),
+    RENAME(false, HttpURLConnection.HTTP_OK),
+    SETREPLICATION(false, HttpURLConnection.HTTP_OK),
+
+    SETOWNER(false, HttpURLConnection.HTTP_OK),
+    SETPERMISSION(false, HttpURLConnection.HTTP_OK),
+    SETTIMES(false, HttpURLConnection.HTTP_OK),
+
+    RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
+    CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
+
+    MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK),
+    REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK),
+    REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
+    REMOVEACL(false, HttpURLConnection.HTTP_OK),
+    SETACL(false, HttpURLConnection.HTTP_OK),
+
+    SETXATTR(false, HttpURLConnection.HTTP_OK),
+    REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
+
+    CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
+    RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
+
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+
+    final boolean doOutputAndRedirect;
+    final int expectedHttpResponseCode;
+    final boolean requireAuth;
+
+    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+      this(doOutputAndRedirect, expectedHttpResponseCode, false);
+    }
+
+    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode,
+       final boolean requireAuth) {
+      this.doOutputAndRedirect = doOutputAndRedirect;
+      this.expectedHttpResponseCode = expectedHttpResponseCode;
+      this.requireAuth = requireAuth;
+    }
+
+    @Override
+    public HttpOpParam.Type getType() {
+      return HttpOpParam.Type.PUT;
+    }
+
+    @Override
+    public boolean getRequireAuth() {
+      return requireAuth;
+    }
+
+    @Override
+    public boolean getDoOutput() {
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public int getExpectedHttpResponseCode() {
+      return expectedHttpResponseCode;
+    }
+
+    @Override
+    public String toQueryString() {
+      return NAME + "=" + this;
+    }
+  }
+
+  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public PutOpParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
new file mode 100644
index 0000000..4890a61
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Recursive parameter. */
+public class RecursiveParam extends BooleanParam {
+  /** Parameter name. */
+  public static final String NAME = "recursive";
+  /** Default parameter value. */
+  public static final String DEFAULT = FALSE;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public RecursiveParam(final Boolean value) {
+    super(DOMAIN, value);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public RecursiveParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
new file mode 100644
index 0000000..d7c157d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.Options;
+
+/** Rename option set parameter. */
+public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
+  /** Parameter name. */
+  public static final String NAME = "renameoptions";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain<Options.Rename> DOMAIN = new Domain<Options.Rename>(
+      NAME, Options.Rename.class);
+
+  /**
+   * Constructor.
+   * @param options rename options.
+   */
+  public RenameOptionSetParam(final Options.Rename... options) {
+    super(DOMAIN, toEnumSet(Options.Rename.class, options));
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public RenameOptionSetParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
new file mode 100644
index 0000000..750e8bc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Renewer parameter. */
+public class RenewerParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "renewer";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public RenewerParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
new file mode 100644
index 0000000..af2ca23
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_KEY;
+
+import org.apache.hadoop.conf.Configuration;
+
+/** Replication parameter. */
+public class ReplicationParam extends ShortParam {
+  /** Parameter name. */
+  public static final String NAME = "replication";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public ReplicationParam(final Short value) {
+    super(DOMAIN, value, (short)1, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public ReplicationParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  /** @return the value or, if it is null, return the default from conf. */
+  public short getValue(final Configuration conf) {
+    return getValue() != null? getValue()
+        : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
new file mode 100644
index 0000000..3f9af85
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Short parameter. */
+abstract class ShortParam extends Param<Short, ShortParam.Domain> {
+  ShortParam(final Domain domain, final Short value,
+      final Short min, final Short max) {
+    super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Short min, final Short max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "=" + domain.toString(getValue());
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public final String getValueString() {
+    return domain.toString(getValue());
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain extends Param.Domain<Short> {
+    /** The radix of the number. */
+    final int radix;
+
+    Domain(final String paramName) {
+      this(paramName, 10);
+    }
+
+    Domain(final String paramName, final int radix) {
+      super(paramName);
+      this.radix = radix;
+    }
+
+    @Override
+    public String getDomain() {
+      return "<" + NULL + " | short in radix " + radix + ">";
+    }
+
+    @Override
+    Short parse(final String str) {
+      try {
+        return NULL.equals(str) || str == null ? null : Short.parseShort(str,
+          radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " short integer.", e);
+      }
+    }
+
+    /** Convert a Short to a String. */
+    String toString(final Short n) {
+      return n == null? NULL: Integer.toString(n, radix);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
new file mode 100644
index 0000000..ad3bcd6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/**
+ * The snapshot name parameter for createSnapshot and deleteSnapshot operation.
+ * Also used to indicate the new snapshot name for renameSnapshot operation.
+ */
+public class SnapshotNameParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "snapshotname";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  public SnapshotNameParam(final String str) {
+    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
new file mode 100644
index 0000000..f063120
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.regex.Pattern;
+
+/** String parameter. */
+abstract class StringParam extends Param<String, StringParam.Domain> {
+  StringParam(final Domain domain, String str) {
+    super(domain, domain.parse(str));
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return value;
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain extends Param.Domain<String> {
+    /** The pattern defining the domain; null . */
+    private final Pattern pattern;
+
+    Domain(final String paramName, final Pattern pattern) {
+      super(paramName);
+      this.pattern = pattern;
+    }
+
+    @Override
+    public final String getDomain() {
+      return pattern == null ? "<String>" : pattern.pattern();
+    }
+
+    @Override
+    final String parse(final String str) {
+      if (str != null && pattern != null) {
+        if (!pattern.matcher(str).matches()) {
+          throw new IllegalArgumentException("Invalid value: \"" + str
+              + "\" does not belong to the domain " + getDomain());
+        }
+      }
+      return str;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
new file mode 100644
index 0000000..53b38ac
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/**
+ * Represents delegation token parameter as method arguments. This is
+ * different from {@link DelegationParam}.
+ */
+public class TokenArgumentParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "token";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str A string representation of the parameter value.
+   */
+  public TokenArgumentParam(final String str) {
+    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
new file mode 100644
index 0000000..46402fd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
+import org.apache.hadoop.security.UserGroupInformation;
+import com.google.common.annotations.VisibleForTesting;
+
+import java.text.MessageFormat;
+import java.util.regex.Pattern;
+
+/** User parameter. */
+public class UserParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "user.name";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static Domain domain = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+
+  @VisibleForTesting
+  public static Domain getUserPatternDomain() {
+    return domain;
+  }
+
+  @VisibleForTesting
+  public static void setUserPatternDomain(Domain dm) {
+    domain = dm;
+  }
+
+  public static void setUserPattern(String pattern) {
+    domain = new Domain(NAME, Pattern.compile(pattern));
+  }
+
+  private static String validateLength(String str) {
+    if (str == null) {
+      throw new IllegalArgumentException(
+        MessageFormat.format("Parameter [{0}], cannot be NULL", NAME));
+    }
+    int len = str.length();
+    if (len < 1) {
+      throw new IllegalArgumentException(MessageFormat.format(
+        "Parameter [{0}], it's length must be at least 1", NAME));
+    }
+    return str;
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public UserParam(final String str) {
+    super(domain, str == null || str.equals(DEFAULT)? null : validateLength(str));
+  }
+
+  /**
+   * Construct an object from a UGI.
+   */
+  public UserParam(final UserGroupInformation ugi) {
+    this(ugi.getShortUserName());
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
new file mode 100644
index 0000000..dd6eda1
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.XAttrCodec;
+
+public class XAttrEncodingParam extends EnumParam<XAttrCodec> {
+  /** Parameter name. */
+  public static final String NAME = "encoding";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain<XAttrCodec> DOMAIN =
+      new Domain<XAttrCodec>(NAME, XAttrCodec.class);
+
+  public XAttrEncodingParam(final XAttrCodec encoding) {
+    super(DOMAIN, encoding);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public XAttrEncodingParam(final String str) {
+    super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  @Override
+  public String getValueString() {
+    return value.toString();
+  }
+
+  public XAttrCodec getEncoding() {
+    return getValue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
new file mode 100644
index 0000000..b64666a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.regex.Pattern;
+
+public class XAttrNameParam extends StringParam {
+  /** Parameter name. **/
+  public static final String NAME = "xattr.name";
+  /** Default parameter value. **/
+  public static final String DEFAULT = "";
+
+  private static Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(".*"));
+
+  public XAttrNameParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public String getXAttrName() {
+    final String v = getValue();
+    return v;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
new file mode 100644
index 0000000..7cd5419
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.EnumSet;
+
+import org.apache.hadoop.fs.XAttrSetFlag;
+
+public class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
+  /** Parameter name. */
+  public static final String NAME = "flag";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain<XAttrSetFlag> DOMAIN = new Domain<XAttrSetFlag>(
+      NAME, XAttrSetFlag.class);
+
+  public XAttrSetFlagParam(final EnumSet<XAttrSetFlag> flag) {
+    super(DOMAIN, flag);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public XAttrSetFlagParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public EnumSet<XAttrSetFlag> getFlag() {
+    return getValue();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
new file mode 100644
index 0000000..b08f1b3
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.io.IOException;
+
+import org.apache.hadoop.fs.XAttrCodec;
+
+public class XAttrValueParam extends StringParam {
+  /** Parameter name. **/
+  public static final String NAME = "xattr.value";
+  /** Default parameter value. **/
+  public static final String DEFAULT = "";
+
+  private static Domain DOMAIN = new Domain(NAME, null);
+
+  public XAttrValueParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public byte[] getXAttrValue() throws IOException {
+    final String v = getValue();
+    return XAttrCodec.decodeValue(v);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1fdf6aa..dacdb3f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -402,6 +402,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
     complete without blocks being replicated. (Ming Ma via wang)
 
+    HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 869df32..3bb2ae6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -34,10 +34,15 @@ import org.apache.hadoop.http.HttpConfig;
  */
 @InterfaceAudience.Private
 public class DFSConfigKeys extends CommonConfigurationKeys {
-  public static final String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
-  public static final long    DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
-  public static final String  DFS_REPLICATION_KEY = "dfs.replication";
-  public static final short   DFS_REPLICATION_DEFAULT = 3;
+  public static final String  DFS_BLOCK_SIZE_KEY =
+      HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
+  public static final long    DFS_BLOCK_SIZE_DEFAULT =
+      HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+  public static final String  DFS_REPLICATION_KEY =
+      HdfsClientConfigKeys.DFS_REPLICATION_KEY;
+  public static final short   DFS_REPLICATION_DEFAULT =
+      HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
+
   public static final String  DFS_STREAM_BUFFER_SIZE_KEY = "dfs.stream-buffer-size";
   public static final int     DFS_STREAM_BUFFER_SIZE_DEFAULT = 4096;
   public static final String  DFS_BYTES_PER_CHECKSUM_KEY = "dfs.bytes-per-checksum";
@@ -49,7 +54,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled";
   public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
   public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
-      "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
+      HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
 
   // HA related configuration
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
@@ -154,7 +159,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
-  public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
+  public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT =
+      HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";
   public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
   public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_KEY = "dfs.permissions.superusergroup";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
deleted file mode 100644
index 9bc938d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Access time parameter. */
-public class AccessTimeParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "accesstime";
-  /** Default parameter value. */
-  public static final String DEFAULT = "-1";
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public AccessTimeParam(final Long value) {
-    super(DOMAIN, value, -1L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public AccessTimeParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
deleted file mode 100644
index 9ed0a30..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
-
-import java.util.List;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.commons.lang.StringUtils;
-
-/** AclPermission parameter. */
-public class AclPermissionParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "aclspec";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME,
-      Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
-
-  /**
-   * Constructor.
-   * 
-   * @param str a string representation of the parameter value.
-   */
-  public AclPermissionParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
-  }
-
-  public AclPermissionParam(List<AclEntry> acl) {
-    super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  public List<AclEntry> getAclPermission(boolean includePermission) {
-    final String v = getValue();
-    return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry
-        .parseAclSpec(DEFAULT, includePermission));
-  }
-
-  /**
-   * @return parse {@code aclEntry} and return aclspec
-   */
-  private static String parseAclSpec(List<AclEntry> aclEntry) {
-    return StringUtils.join(aclEntry, ",");
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
deleted file mode 100644
index b6d82c2..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-
-import org.apache.hadoop.conf.Configuration;
-
-/** Block size parameter. */
-public class BlockSizeParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "blocksize";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public BlockSizeParam(final Long value) {
-    super(DOMAIN, value, 1L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public BlockSizeParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  /** @return the value or, if it is null, return the default from conf. */
-  public long getValue(final Configuration conf) {
-    return getValue() != null? getValue()
-        : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
deleted file mode 100644
index 3437a0c..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Boolean parameter. */
-abstract class BooleanParam extends Param<Boolean, BooleanParam.Domain> {
-  static final String TRUE = "true";
-  static final String FALSE = "false";
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return value.toString();
-  }
-
-  BooleanParam(final Domain domain, final Boolean value) {
-    super(domain, value);
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.Domain<Boolean> {
-    Domain(final String paramName) {
-      super(paramName);
-    }
-
-    @Override
-    public String getDomain() {
-      return "<" + NULL + " | boolean>";
-    }
-
-    @Override
-    Boolean parse(final String str) {
-      if (TRUE.equalsIgnoreCase(str)) {
-        return true;
-      } else if (FALSE.equalsIgnoreCase(str)) {
-        return false;
-      }
-      throw new IllegalArgumentException("Failed to parse \"" + str
-          + "\" to Boolean.");
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
deleted file mode 100644
index b68c5f5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
+++ /dev/null
@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.Path;
-
-/** The concat source paths parameter. */
-public class ConcatSourcesParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "sources";
-
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  private static String paths2String(Path[] paths) {
-    if (paths == null || paths.length == 0) {
-      return "";
-    }
-    final StringBuilder b = new StringBuilder(paths[0].toUri().getPath());
-    for(int i = 1; i < paths.length; i++) {
-      b.append(',').append(paths[i].toUri().getPath());
-    }
-    return b.toString();
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public ConcatSourcesParam(String str) {
-    super(DOMAIN, str);
-  }
-
-  public ConcatSourcesParam(Path[] paths) {
-    this(paths2String(paths));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  /** @return the absolute path. */
-  public final String[] getAbsolutePaths() {
-    final String[] paths = getValue().split(",");
-    return paths;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
deleted file mode 100644
index 8152515..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Create Parent parameter. */
-public class CreateParentParam extends BooleanParam {
-  /** Parameter name. */
-  public static final String NAME = "createparent";
-  /** Default parameter value. */
-  public static final String DEFAULT = FALSE;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public CreateParentParam(final Boolean value) {
-    super(DOMAIN, value);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public CreateParentParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
deleted file mode 100644
index 57be43e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.security.UserGroupInformation;
-
-/** Represents delegation token used for authentication. */
-public class DelegationParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "delegation";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public DelegationParam(final String str) {
-    super(DOMAIN, UserGroupInformation.isSecurityEnabled()
-        && str != null && !str.equals(DEFAULT)? str: null);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
deleted file mode 100644
index 65275e0..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/** Http DELETE operation parameter. */
-public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
-  /** Delete operations. */
-  public static enum Op implements HttpOpParam.Op {
-    DELETE(HttpURLConnection.HTTP_OK),
-    DELETESNAPSHOT(HttpURLConnection.HTTP_OK),
-
-    NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
-
-    final int expectedHttpResponseCode;
-
-    Op(final int expectedHttpResponseCode) {
-      this.expectedHttpResponseCode = expectedHttpResponseCode;
-    }
-
-    @Override
-    public HttpOpParam.Type getType() {
-      return HttpOpParam.Type.DELETE;
-    }
-
-    @Override
-    public boolean getRequireAuth() {
-      return false;
-    }
-
-    @Override
-    public boolean getDoOutput() {
-      return false;
-    }
-
-    @Override
-    public boolean getRedirect() {
-      return false;
-    }
-
-    @Override
-    public int getExpectedHttpResponseCode() {
-      return expectedHttpResponseCode;
-    }
-
-    @Override
-    public String toQueryString() {
-      return NAME + "=" + this;
-    }
-  }
-
-  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public DeleteOpParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
deleted file mode 100644
index 6759738..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.Path;
-
-/** Destination path parameter. */
-public class DestinationParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "destination";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  private static String validate(final String str) {
-    if (str == null || str.equals(DEFAULT)) {
-      return null;
-    }
-    if (!str.startsWith(Path.SEPARATOR)) {
-      throw new IllegalArgumentException("Invalid parameter value: " + NAME
-          + " = \"" + str + "\" is not an absolute path.");
-    }
-    return new Path(str).toUri().getPath();
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public DestinationParam(final String str) {
-    super(DOMAIN, validate(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
deleted file mode 100644
index 13d188c..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** DoAs parameter for proxy user. */
-public class DoAsParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "doas";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public DoAsParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
deleted file mode 100644
index 60d201b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.Arrays;
-import org.apache.hadoop.util.StringUtils;
-
-abstract class EnumParam<E extends Enum<E>> extends Param<E, EnumParam.Domain<E>> {
-  EnumParam(final Domain<E> domain, final E value) {
-    super(domain, value);
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain<E extends Enum<E>> extends Param.Domain<E> {
-    private final Class<E> enumClass;
-
-    Domain(String name, final Class<E> enumClass) {
-      super(name);
-      this.enumClass = enumClass;
-    }
-
-    @Override
-    public final String getDomain() {
-      return Arrays.asList(enumClass.getEnumConstants()).toString();
-    }
-
-    @Override
-    final E parse(final String str) {
-      return Enum.valueOf(enumClass, StringUtils.toUpperCase(str));
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
deleted file mode 100644
index c2dfadf..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.Iterator;
-import org.apache.hadoop.util.StringUtils;
-
-abstract class EnumSetParam<E extends Enum<E>> extends Param<EnumSet<E>, EnumSetParam.Domain<E>> {
-  /** Convert an EnumSet to a string of comma separated values. */
-  static <E extends Enum<E>> String toString(EnumSet<E> set) {
-    if (set == null || set.isEmpty()) {
-      return "";
-    } else {
-      final StringBuilder b = new StringBuilder();
-      final Iterator<E> i = set.iterator();
-      b.append(i.next());
-      for(; i.hasNext(); ) {
-        b.append(',').append(i.next());
-      }
-      return b.toString();
-    }
-  }
-
-  static <E extends Enum<E>> EnumSet<E> toEnumSet(final Class<E> clazz,
-      final E... values) {
-    final EnumSet<E> set = EnumSet.noneOf(clazz);
-    set.addAll(Arrays.asList(values));
-    return set;
-  }
-
-  EnumSetParam(final Domain<E> domain, final EnumSet<E> value) {
-    super(domain, value);
-  }
-
-  @Override
-  public String toString() {
-    return getName() + "=" + toString(value);
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return toString(value);
-  }
-  
-  /** The domain of the parameter. */
-  static final class Domain<E extends Enum<E>> extends Param.Domain<EnumSet<E>> {
-    private final Class<E> enumClass;
-
-    Domain(String name, final Class<E> enumClass) {
-      super(name);
-      this.enumClass = enumClass;
-    }
-
-    @Override
-    public final String getDomain() {
-      return Arrays.asList(enumClass.getEnumConstants()).toString();
-    }
-
-    /** The string contains a comma separated values. */
-    @Override
-    final EnumSet<E> parse(final String str) {
-      final EnumSet<E> set = EnumSet.noneOf(enumClass);
-      if (!str.isEmpty()) {
-        for(int i, j = 0; j >= 0; ) {
-          i = j > 0 ? j + 1 : 0;
-          j = str.indexOf(',', i);
-          final String sub = j >= 0? str.substring(i, j): str.substring(i);
-          set.add(Enum.valueOf(enumClass, StringUtils.toUpperCase(sub.trim())));
-        }
-      }
-      return set;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
deleted file mode 100644
index 3f44fae..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
+++ /dev/null
@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-
-/** Exclude datanodes param */
-public class ExcludeDatanodesParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "excludedatanodes";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public ExcludeDatanodesParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
deleted file mode 100644
index c840196..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.permission.FsAction;
-
-import java.util.regex.Pattern;
-
-/** {@link FsAction} Parameter */
-public class FsActionParam extends StringParam {
-
-  /** Parameter name. */
-  public static final String NAME = "fsaction";
-
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static String FS_ACTION_PATTERN = "[rwx-]{3}";
-
-  private static final Domain DOMAIN = new Domain(NAME,
-      Pattern.compile(FS_ACTION_PATTERN));
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public FsActionParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
-  }
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public FsActionParam(final FsAction value) {
-    super(DOMAIN, value == null? null: value.SYMBOL);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
deleted file mode 100644
index f63ed44..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
+++ /dev/null
@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/** Http GET operation parameter. */
-public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
-  /** Get operations. */
-  public static enum Op implements HttpOpParam.Op {
-    OPEN(true, HttpURLConnection.HTTP_OK),
-
-    GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
-    LISTSTATUS(false, HttpURLConnection.HTTP_OK),
-    GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
-    GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
-
-    GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
-    GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
-
-    /** GET_BLOCK_LOCATIONS is a private unstable op. */
-    GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
-    GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
-    GETXATTRS(false, HttpURLConnection.HTTP_OK),
-    LISTXATTRS(false, HttpURLConnection.HTTP_OK),
-
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
-
-    CHECKACCESS(false, HttpURLConnection.HTTP_OK);
-
-    final boolean redirect;
-    final int expectedHttpResponseCode;
-    final boolean requireAuth;
-
-    Op(final boolean redirect, final int expectedHttpResponseCode) {
-      this(redirect, expectedHttpResponseCode, false);
-    }
-    
-    Op(final boolean redirect, final int expectedHttpResponseCode,
-       final boolean requireAuth) {
-      this.redirect = redirect;
-      this.expectedHttpResponseCode = expectedHttpResponseCode;
-      this.requireAuth = requireAuth;
-    }
-
-    @Override
-    public HttpOpParam.Type getType() {
-      return HttpOpParam.Type.GET;
-    }
-    
-    @Override
-    public boolean getRequireAuth() {
-      return requireAuth;
-    }
-
-    @Override
-    public boolean getDoOutput() {
-      return false;
-    }
-
-    @Override
-    public boolean getRedirect() {
-      return redirect;
-    }
-
-    @Override
-    public int getExpectedHttpResponseCode() {
-      return expectedHttpResponseCode;
-    }
-
-    @Override
-    public String toQueryString() {
-      return NAME + "=" + this;
-    }
-  }
-
-  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public GetOpParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
deleted file mode 100644
index c0429cc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Group parameter. */
-public class GroupParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "group";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public GroupParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
deleted file mode 100644
index f4c24ff..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
+++ /dev/null
@@ -1,134 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-
-import javax.ws.rs.core.Response;
-
-
-/** Http operation parameter. */
-public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
-    extends EnumParam<E> {
-  /** Parameter name. */
-  public static final String NAME = "op";
-
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  /** Http operation types */
-  public static enum Type {
-    GET, PUT, POST, DELETE;
-  }
-
-  /** Http operation interface. */
-  public static interface Op {
-    /** @return the Http operation type. */
-    public Type getType();
-
-    /** @return true if the operation cannot use a token */
-    public boolean getRequireAuth();
-    
-    /** @return true if the operation will do output. */
-    public boolean getDoOutput();
-
-    /** @return true if the operation will be redirected. */
-    public boolean getRedirect();
-
-    /** @return true the expected http response code. */
-    public int getExpectedHttpResponseCode();
-
-    /** @return a URI query string. */
-    public String toQueryString();
-  }
-
-  /** Expects HTTP response 307 "Temporary Redirect". */
-  public static class TemporaryRedirectOp implements Op {
-    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(
-        PutOpParam.Op.CREATE);
-    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(
-        PostOpParam.Op.APPEND);
-    static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp(
-        GetOpParam.Op.OPEN);
-    static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp(
-        GetOpParam.Op.GETFILECHECKSUM);
-    
-    static final List<TemporaryRedirectOp> values
-        = Collections.unmodifiableList(Arrays.asList(CREATE, APPEND, OPEN,
-                                       GETFILECHECKSUM));
-
-    /** Get an object for the given op. */
-    public static TemporaryRedirectOp valueOf(final Op op) {
-      for(TemporaryRedirectOp t : values) {
-        if (op == t.op) {
-          return t;
-        }
-      }
-      throw new IllegalArgumentException(op + " not found.");
-    }
-
-    private final Op op;
-
-    private TemporaryRedirectOp(final Op op) {
-      this.op = op;
-    }
-
-    @Override
-    public Type getType() {
-      return op.getType();
-    }
-
-    @Override
-    public boolean getRequireAuth() {
-      return op.getRequireAuth();
-    }
-
-    @Override
-    public boolean getDoOutput() {
-      return false;
-    }
-
-    @Override
-    public boolean getRedirect() {
-      return false;
-    }
-
-    /** Override the original expected response with "Temporary Redirect". */
-    @Override
-    public int getExpectedHttpResponseCode() {
-      return Response.Status.TEMPORARY_REDIRECT.getStatusCode();
-    }
-
-    @Override
-    public String toQueryString() {
-      return op.toQueryString();
-    }
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return value.toString();
-  }
-
-  HttpOpParam(final Domain<E> domain, final E value) {
-    super(domain, value);
-  }
-}
\ No newline at end of file


[26/47] hadoop git commit: HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete without blocks being replicated. Contributed by Ming Ma.

Posted by zj...@apache.org.
HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can complete without blocks being replicated. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d872470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d872470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d872470

Branch: refs/heads/YARN-2928
Commit: 5d8724706c653da28121262fe2dcf4c2df8b114c
Parents: 09afdc2
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Apr 8 16:09:17 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../server/blockmanagement/BlockManager.java    |  5 +++
 .../apache/hadoop/hdfs/TestDecommission.java    | 32 ++++++++------------
 3 files changed, 20 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d872470/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b203770..1fdf6aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -399,6 +399,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
     LocatedBlock when possible. (Zhe Zhang via wang)
 
+    HDFS-8025. Addendum fix for HDFS-3087 Decomissioning on NN restart can
+    complete without blocks being replicated. (Ming Ma via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d872470/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 524afa0..9a6535e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3305,6 +3305,11 @@ public class BlockManager {
    * liveness. Dead nodes cannot always be safely decommissioned.
    */
   boolean isNodeHealthyForDecommission(DatanodeDescriptor node) {
+    if (!node.checkBlockReportReceived()) {
+      LOG.info("Node {} hasn't sent its first block report.", node);
+      return false;
+    }
+
     if (node.isAlive) {
       return true;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d872470/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
index 081e40f..1ab7427 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
@@ -882,9 +882,12 @@ public class TestDecommission {
     int numNamenodes = 1;
     int numDatanodes = 1;
     int replicas = 1;
-    
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
+        DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, 5);
+
     startCluster(numNamenodes, numDatanodes, conf);
-    Path file1 = new Path("testDecommission.dat");
+    Path file1 = new Path("testDecommissionWithNamenodeRestart.dat");
     FileSystem fileSys = cluster.getFileSystem();
     writeFile(fileSys, file1, replicas);
         
@@ -894,37 +897,26 @@ public class TestDecommission {
     String excludedDatanodeName = info[0].getXferAddr();
 
     writeConfigFile(excludeFile, new ArrayList<String>(Arrays.asList(excludedDatanodeName)));
-    
+
     //Add a new datanode to cluster
     cluster.startDataNodes(conf, 1, true, null, null, null, null);
     numDatanodes+=1;
-    
+
     assertEquals("Number of datanodes should be 2 ", 2, cluster.getDataNodes().size());
     //Restart the namenode
     cluster.restartNameNode();
     DatanodeInfo datanodeInfo = NameNodeAdapter.getDatanode(
         cluster.getNamesystem(), excludedDatanodeID);
     waitNodeState(datanodeInfo, AdminStates.DECOMMISSIONED);
-    
+
     // Ensure decommissioned datanode is not automatically shutdown
     assertEquals("All datanodes must be alive", numDatanodes, 
         client.datanodeReport(DatanodeReportType.LIVE).length);
-    // wait for the block to be replicated
-    int tries = 0;
-    while (tries++ < 20) {
-      try {
-        Thread.sleep(1000);
-        if (checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
-            numDatanodes) == null) {
-          break;
-        }
-      } catch (InterruptedException ie) {
-      }
-    }
-    assertTrue("Checked if block was replicated after decommission, tried "
-        + tries + " times.", tries < 20);
-    cleanupFile(fileSys, file1);
+    assertTrue("Checked if block was replicated after decommission.",
+        checkFile(fileSys, file1, replicas, datanodeInfo.getXferAddr(),
+        numDatanodes) == null);
 
+    cleanupFile(fileSys, file1);
     // Restart the cluster and ensure recommissioned datanodes
     // are allowed to register with the namenode
     cluster.shutdown();


[25/47] hadoop git commit: HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock when possible. Contributed by Zhe Zhang.

Posted by zj...@apache.org.
HDFS-8076. Code cleanup for DFSInputStream: use offset instead of LocatedBlock when possible. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8711282
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8711282
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8711282

Branch: refs/heads/YARN-2928
Commit: c8711282269080468d079b1a569fa0fcde922f24
Parents: 8118c95
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Apr 8 15:41:48 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:52 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../org/apache/hadoop/hdfs/DFSInputStream.java  | 40 ++++++++++----------
 2 files changed, 24 insertions(+), 19 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8711282/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 35e9d54..852006d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -396,6 +396,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8046. Allow better control of getContentSummary (kihwal)
 
+    HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
+    LocatedBlock when possible. (Zhe Zhang via wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8711282/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index cf8015f..a9f2746 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1045,16 +1045,16 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
     return errMsgr.toString();
   }
 
-  private void fetchBlockByteRange(LocatedBlock block, long start, long end,
+  private void fetchBlockByteRange(long blockStartOffset, long start, long end,
       byte[] buf, int offset,
       Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
       throws IOException {
-    block = getBlockAt(block.getStartOffset());
+    LocatedBlock block = getBlockAt(blockStartOffset);
     while (true) {
       DNAddrPair addressPair = chooseDataNode(block, null);
       try {
-        actualGetFromOneDataNode(addressPair, block, start, end, buf, offset,
-            corruptedBlockMap);
+        actualGetFromOneDataNode(addressPair, blockStartOffset, start, end,
+            buf, offset, corruptedBlockMap);
         return;
       } catch (IOException e) {
         // Ignore. Already processed inside the function.
@@ -1064,7 +1064,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
   }
 
   private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode,
-      final LocatedBlock block, final long start, final long end,
+      final long blockStartOffset, final long start, final long end,
       final ByteBuffer bb,
       final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
       final int hedgedReadId) {
@@ -1077,8 +1077,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
         TraceScope scope =
             Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
         try {
-          actualGetFromOneDataNode(datanode, block, start, end, buf, offset,
-              corruptedBlockMap);
+          actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
+              offset, corruptedBlockMap);
           return bb;
         } finally {
           scope.close();
@@ -1088,7 +1088,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
   }
 
   private void actualGetFromOneDataNode(final DNAddrPair datanode,
-      LocatedBlock block, final long start, final long end, byte[] buf,
+      long blockStartOffset, final long start, final long end, byte[] buf,
       int offset, Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
       throws IOException {
     DFSClientFaultInjector.get().startFetchFromDatanode();
@@ -1101,7 +1101,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       // start of the loop.
       CachingStrategy curCachingStrategy;
       boolean allowShortCircuitLocalReads;
-      block = getBlockAt(block.getStartOffset());
+      LocatedBlock block = getBlockAt(blockStartOffset);
       synchronized(infoLock) {
         curCachingStrategy = cachingStrategy;
         allowShortCircuitLocalReads = !shortCircuitForbidden();
@@ -1189,7 +1189,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
    * if the first read is taking longer than configured amount of
    * time.  We then wait on which ever read returns first.
    */
-  private void hedgedFetchBlockByteRange(LocatedBlock block, long start,
+  private void hedgedFetchBlockByteRange(long blockStartOffset, long start,
       long end, byte[] buf, int offset,
       Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap)
       throws IOException {
@@ -1201,7 +1201,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
     ByteBuffer bb = null;
     int len = (int) (end - start + 1);
     int hedgedReadId = 0;
-    block = getBlockAt(block.getStartOffset());
+    LocatedBlock block = getBlockAt(blockStartOffset);
     while (true) {
       // see HDFS-6591, this metric is used to verify/catch unnecessary loops
       hedgedReadOpsLoopNumForTesting++;
@@ -1213,8 +1213,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
         chosenNode = chooseDataNode(block, ignored);
         bb = ByteBuffer.wrap(buf, offset, len);
         Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
-            chosenNode, block, start, end, bb, corruptedBlockMap,
-            hedgedReadId++);
+            chosenNode, block.getStartOffset(), start, end, bb,
+            corruptedBlockMap, hedgedReadId++);
         Future<ByteBuffer> firstRequest = hedgedService
             .submit(getFromDataNodeCallable);
         futures.add(firstRequest);
@@ -1251,8 +1251,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
           }
           bb = ByteBuffer.allocate(len);
           Callable<ByteBuffer> getFromDataNodeCallable = getFromOneDataNode(
-              chosenNode, block, start, end, bb, corruptedBlockMap,
-              hedgedReadId++);
+              chosenNode, block.getStartOffset(), start, end, bb,
+              corruptedBlockMap, hedgedReadId++);
           Future<ByteBuffer> oneMoreRequest = hedgedService
               .submit(getFromDataNodeCallable);
           futures.add(oneMoreRequest);
@@ -1405,11 +1405,13 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
       long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart);
       try {
         if (dfsClient.isHedgedReadsEnabled()) {
-          hedgedFetchBlockByteRange(blk, targetStart, targetStart + bytesToRead
-              - 1, buffer, offset, corruptedBlockMap);
+          hedgedFetchBlockByteRange(blk.getStartOffset(), targetStart,
+              targetStart + bytesToRead - 1, buffer, offset,
+              corruptedBlockMap);
         } else {
-          fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1,
-              buffer, offset, corruptedBlockMap);
+          fetchBlockByteRange(blk.getStartOffset(), targetStart,
+              targetStart + bytesToRead - 1, buffer, offset,
+              corruptedBlockMap);
         }
       } finally {
         // Check and report if any block replicas are corrupted.


[37/47] hadoop git commit: HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey.

Posted by zj...@apache.org.
HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS classes at runtime. Contributed by Sean Busbey.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa80dddb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa80dddb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa80dddb

Branch: refs/heads/YARN-2928
Commit: aa80dddb53e4a293485eb4ebdcd87a3e2a0b7d7e
Parents: 1488457
Author: Aaron T. Myers <at...@apache.org>
Authored: Thu Apr 9 09:40:08 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:54 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  8 +++--
 .../apache/hadoop/hdfs/TestDFSConfigKeys.java   | 37 ++++++++++++++++++++
 3 files changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa80dddb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 74ed624..727bec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -406,6 +406,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7979. Initialize block report IDs with a random number. (wang)
 
+    HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
+    classes at runtime. (Sean Busbey via atm)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa80dddb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3bb2ae6..d0ca125 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
-import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -157,7 +156,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY = "dfs.namenode.replication.max-streams-hard-limit";
   public static final int     DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
-  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
+  /* Phrased as below to avoid javac inlining as a constant, to match the behavior when
+     this was AuthFilter.class.getName(). Note that if you change the import for AuthFilter, you
+     need to update the literal here as well as TestDFSConfigKeys.
+   */
+  public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT =
+      "org.apache.hadoop.hdfs.web.AuthFilter".toString();
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
   public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT =
       HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa80dddb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
new file mode 100644
index 0000000..c7df891
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSConfigKeys.java
@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.hdfs.web.AuthFilter;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestDFSConfigKeys {
+
+  /**
+   * Make sure we keep the String literal up to date with what we'd get by calling
+   * class.getName.
+   */
+  @Test
+  public void testStringLiteralDefaultWebFilter() {
+    Assert.assertEquals("The default webhdfs auth filter should make the FQCN of AuthFilter.",
+        AuthFilter.class.getName(), DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
+  }
+ 
+}


[29/47] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

Posted by zj...@apache.org.
HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21655165
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21655165
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21655165

Branch: refs/heads/YARN-2928
Commit: 2165516595bef241ad13d8b2b3649b114f9a77a7
Parents: 5d87247
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Apr 8 16:30:08 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  |   9 ++
 .../hdfs/client/HdfsClientConfigKeys.java       |   9 +-
 .../hdfs/web/resources/AccessTimeParam.java     |  49 +++++++
 .../hdfs/web/resources/AclPermissionParam.java  |  69 ++++++++++
 .../hdfs/web/resources/BlockSizeParam.java      |  60 +++++++++
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 ++++++++
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 +++++++++
 .../hdfs/web/resources/CreateParentParam.java   |  49 +++++++
 .../hdfs/web/resources/DelegationParam.java     |  44 ++++++
 .../hdfs/web/resources/DeleteOpParam.java       |  82 ++++++++++++
 .../hdfs/web/resources/DestinationParam.java    |  54 ++++++++
 .../hadoop/hdfs/web/resources/DoAsParam.java    |  41 ++++++
 .../hadoop/hdfs/web/resources/EnumParam.java    |  47 +++++++
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 +++++++++++++
 .../web/resources/ExcludeDatanodesParam.java    |  42 ++++++
 .../hdfs/web/resources/FsActionParam.java       |  58 ++++++++
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 +++++++++++++++
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 ++++++
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 +++++++++++++++++++
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 ++++++++++++
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 ++++++++
 .../hadoop/hdfs/web/resources/LongParam.java    |  87 ++++++++++++
 .../web/resources/ModificationTimeParam.java    |  49 +++++++
 .../hdfs/web/resources/NewLengthParam.java      |  49 +++++++
 .../hadoop/hdfs/web/resources/OffsetParam.java  |  54 ++++++++
 .../web/resources/OldSnapshotNameParam.java     |  40 ++++++
 .../hdfs/web/resources/OverwriteParam.java      |  49 +++++++
 .../hadoop/hdfs/web/resources/OwnerParam.java   |  41 ++++++
 .../apache/hadoop/hdfs/web/resources/Param.java | 122 +++++++++++++++++
 .../hdfs/web/resources/PermissionParam.java     |  64 +++++++++
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  88 ++++++++++++
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 114 ++++++++++++++++
 .../hdfs/web/resources/RecursiveParam.java      |  49 +++++++
 .../web/resources/RenameOptionSetParam.java     |  52 +++++++
 .../hadoop/hdfs/web/resources/RenewerParam.java |  41 ++++++
 .../hdfs/web/resources/ReplicationParam.java    |  60 +++++++++
 .../hadoop/hdfs/web/resources/ShortParam.java   |  88 ++++++++++++
 .../hdfs/web/resources/SnapshotNameParam.java   |  41 ++++++
 .../hadoop/hdfs/web/resources/StringParam.java  |  60 +++++++++
 .../hdfs/web/resources/TokenArgumentParam.java  |  44 ++++++
 .../hadoop/hdfs/web/resources/UserParam.java    |  82 ++++++++++++
 .../hdfs/web/resources/XAttrEncodingParam.java  |  56 ++++++++
 .../hdfs/web/resources/XAttrNameParam.java      |  44 ++++++
 .../hdfs/web/resources/XAttrSetFlagParam.java   |  53 ++++++++
 .../hdfs/web/resources/XAttrValueParam.java     |  45 +++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  18 ++-
 .../hdfs/web/resources/AccessTimeParam.java     |  49 -------
 .../hdfs/web/resources/AclPermissionParam.java  |  68 ----------
 .../hdfs/web/resources/BlockSizeParam.java      |  60 ---------
 .../hadoop/hdfs/web/resources/BooleanParam.java |  57 --------
 .../hdfs/web/resources/ConcatSourcesParam.java  |  65 ---------
 .../hdfs/web/resources/CreateParentParam.java   |  49 -------
 .../hdfs/web/resources/DelegationParam.java     |  44 ------
 .../hdfs/web/resources/DeleteOpParam.java       |  82 ------------
 .../hdfs/web/resources/DestinationParam.java    |  54 --------
 .../hadoop/hdfs/web/resources/DoAsParam.java    |  41 ------
 .../hadoop/hdfs/web/resources/EnumParam.java    |  47 -------
 .../hadoop/hdfs/web/resources/EnumSetParam.java |  92 -------------
 .../web/resources/ExcludeDatanodesParam.java    |  42 ------
 .../hdfs/web/resources/FsActionParam.java       |  58 --------
 .../hadoop/hdfs/web/resources/GetOpParam.java   | 106 ---------------
 .../hadoop/hdfs/web/resources/GroupParam.java   |  41 ------
 .../hadoop/hdfs/web/resources/HttpOpParam.java  | 134 -------------------
 .../hadoop/hdfs/web/resources/IntegerParam.java |  88 ------------
 .../hadoop/hdfs/web/resources/LengthParam.java  |  54 --------
 .../hadoop/hdfs/web/resources/LongParam.java    |  87 ------------
 .../web/resources/ModificationTimeParam.java    |  49 -------
 .../hdfs/web/resources/NewLengthParam.java      |  49 -------
 .../hadoop/hdfs/web/resources/OffsetParam.java  |  54 --------
 .../web/resources/OldSnapshotNameParam.java     |  40 ------
 .../hdfs/web/resources/OverwriteParam.java      |  49 -------
 .../hadoop/hdfs/web/resources/OwnerParam.java   |  41 ------
 .../apache/hadoop/hdfs/web/resources/Param.java | 122 -----------------
 .../hdfs/web/resources/PermissionParam.java     |  64 ---------
 .../hadoop/hdfs/web/resources/PostOpParam.java  |  88 ------------
 .../hadoop/hdfs/web/resources/PutOpParam.java   | 114 ----------------
 .../hdfs/web/resources/RecursiveParam.java      |  49 -------
 .../web/resources/RenameOptionSetParam.java     |  52 -------
 .../hadoop/hdfs/web/resources/RenewerParam.java |  41 ------
 .../hdfs/web/resources/ReplicationParam.java    |  60 ---------
 .../hadoop/hdfs/web/resources/ShortParam.java   |  88 ------------
 .../hdfs/web/resources/SnapshotNameParam.java   |  41 ------
 .../hadoop/hdfs/web/resources/StringParam.java  |  60 ---------
 .../hdfs/web/resources/TokenArgumentParam.java  |  44 ------
 .../hadoop/hdfs/web/resources/UserParam.java    |  82 ------------
 .../hdfs/web/resources/XAttrEncodingParam.java  |  56 --------
 .../hdfs/web/resources/XAttrNameParam.java      |  44 ------
 .../hdfs/web/resources/XAttrSetFlagParam.java   |  53 --------
 .../hdfs/web/resources/XAttrValueParam.java     |  45 -------
 90 files changed, 2735 insertions(+), 2710 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
index 900f345..33c2ed9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -29,6 +29,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <scope>provided</scope>
+    </dependency>
+  </dependencies>
+
   <build>
     <plugins>
       <plugin>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index cf2d50a..604d60e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -19,8 +19,15 @@ package org.apache.hadoop.hdfs.client;
 
 /** Client configuration properties */
 public interface HdfsClientConfigKeys {
-  static final String PREFIX = "dfs.client.";
+  String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
+  long    DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
+  String  DFS_REPLICATION_KEY = "dfs.replication";
+  short   DFS_REPLICATION_DEFAULT = 3;
+  String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
+  String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
+      "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
+  static final String PREFIX = "dfs.client.";
   /** Client retry configuration properties */
   public interface Retry {
     static final String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
new file mode 100644
index 0000000..9bc938d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Access time parameter. */
+public class AccessTimeParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "accesstime";
+  /** Default parameter value. */
+  public static final String DEFAULT = "-1";
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public AccessTimeParam(final Long value) {
+    super(DOMAIN, value, -1L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public AccessTimeParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
new file mode 100644
index 0000000..4c998b6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/AclPermissionParam.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys
+    .DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
+
+import java.util.List;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.commons.lang.StringUtils;
+
+/** AclPermission parameter. */
+public class AclPermissionParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "aclspec";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
+
+  /**
+   * Constructor.
+   *
+   * @param str a string representation of the parameter value.
+   */
+  public AclPermissionParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
+  }
+
+  public AclPermissionParam(List<AclEntry> acl) {
+    super(DOMAIN,parseAclSpec(acl).equals(DEFAULT) ? null : parseAclSpec(acl));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public List<AclEntry> getAclPermission(boolean includePermission) {
+    final String v = getValue();
+    return (v != null ? AclEntry.parseAclSpec(v, includePermission) : AclEntry
+        .parseAclSpec(DEFAULT, includePermission));
+  }
+
+  /**
+   * @return parse {@code aclEntry} and return aclspec
+   */
+  private static String parseAclSpec(List<AclEntry> aclEntry) {
+    return StringUtils.join(aclEntry, ",");
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
new file mode 100644
index 0000000..3f53d7c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_BLOCK_SIZE_KEY;
+
+import org.apache.hadoop.conf.Configuration;
+
+/** Block size parameter. */
+public class BlockSizeParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "blocksize";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public BlockSizeParam(final Long value) {
+    super(DOMAIN, value, 1L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public BlockSizeParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  /** @return the value or, if it is null, return the default from conf. */
+  public long getValue(final Configuration conf) {
+    return getValue() != null? getValue()
+        : conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
new file mode 100644
index 0000000..3437a0c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/BooleanParam.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Boolean parameter. */
+abstract class BooleanParam extends Param<Boolean, BooleanParam.Domain> {
+  static final String TRUE = "true";
+  static final String FALSE = "false";
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return value.toString();
+  }
+
+  BooleanParam(final Domain domain, final Boolean value) {
+    super(domain, value);
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain extends Param.Domain<Boolean> {
+    Domain(final String paramName) {
+      super(paramName);
+    }
+
+    @Override
+    public String getDomain() {
+      return "<" + NULL + " | boolean>";
+    }
+
+    @Override
+    Boolean parse(final String str) {
+      if (TRUE.equalsIgnoreCase(str)) {
+        return true;
+      } else if (FALSE.equalsIgnoreCase(str)) {
+        return false;
+      }
+      throw new IllegalArgumentException("Failed to parse \"" + str
+          + "\" to Boolean.");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
new file mode 100644
index 0000000..b68c5f5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ConcatSourcesParam.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.Path;
+
+/** The concat source paths parameter. */
+public class ConcatSourcesParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "sources";
+
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  private static String paths2String(Path[] paths) {
+    if (paths == null || paths.length == 0) {
+      return "";
+    }
+    final StringBuilder b = new StringBuilder(paths[0].toUri().getPath());
+    for(int i = 1; i < paths.length; i++) {
+      b.append(',').append(paths[i].toUri().getPath());
+    }
+    return b.toString();
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public ConcatSourcesParam(String str) {
+    super(DOMAIN, str);
+  }
+
+  public ConcatSourcesParam(Path[] paths) {
+    this(paths2String(paths));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  /** @return the absolute path. */
+  public final String[] getAbsolutePaths() {
+    final String[] paths = getValue().split(",");
+    return paths;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
new file mode 100644
index 0000000..8152515
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/CreateParentParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Create Parent parameter. */
+public class CreateParentParam extends BooleanParam {
+  /** Parameter name. */
+  public static final String NAME = "createparent";
+  /** Default parameter value. */
+  public static final String DEFAULT = FALSE;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public CreateParentParam(final Boolean value) {
+    super(DOMAIN, value);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public CreateParentParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
new file mode 100644
index 0000000..57be43e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.security.UserGroupInformation;
+
+/** Represents delegation token used for authentication. */
+public class DelegationParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "delegation";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public DelegationParam(final String str) {
+    super(DOMAIN, UserGroupInformation.isSecurityEnabled()
+        && str != null && !str.equals(DEFAULT)? str: null);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
new file mode 100644
index 0000000..65275e0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DeleteOpParam.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/** Http DELETE operation parameter. */
+public class DeleteOpParam extends HttpOpParam<DeleteOpParam.Op> {
+  /** Delete operations. */
+  public static enum Op implements HttpOpParam.Op {
+    DELETE(HttpURLConnection.HTTP_OK),
+    DELETESNAPSHOT(HttpURLConnection.HTTP_OK),
+
+    NULL(HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+
+    final int expectedHttpResponseCode;
+
+    Op(final int expectedHttpResponseCode) {
+      this.expectedHttpResponseCode = expectedHttpResponseCode;
+    }
+
+    @Override
+    public HttpOpParam.Type getType() {
+      return HttpOpParam.Type.DELETE;
+    }
+
+    @Override
+    public boolean getRequireAuth() {
+      return false;
+    }
+
+    @Override
+    public boolean getDoOutput() {
+      return false;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
+    @Override
+    public int getExpectedHttpResponseCode() {
+      return expectedHttpResponseCode;
+    }
+
+    @Override
+    public String toQueryString() {
+      return NAME + "=" + this;
+    }
+  }
+
+  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public DeleteOpParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
new file mode 100644
index 0000000..6759738
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.Path;
+
+/** Destination path parameter. */
+public class DestinationParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "destination";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  private static String validate(final String str) {
+    if (str == null || str.equals(DEFAULT)) {
+      return null;
+    }
+    if (!str.startsWith(Path.SEPARATOR)) {
+      throw new IllegalArgumentException("Invalid parameter value: " + NAME
+          + " = \"" + str + "\" is not an absolute path.");
+    }
+    return new Path(str).toUri().getPath();
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public DestinationParam(final String str) {
+    super(DOMAIN, validate(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
new file mode 100644
index 0000000..13d188c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/DoAsParam.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** DoAs parameter for proxy user. */
+public class DoAsParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "doas";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public DoAsParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
new file mode 100644
index 0000000..60d201b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumParam.java
@@ -0,0 +1,47 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.Arrays;
+import org.apache.hadoop.util.StringUtils;
+
+abstract class EnumParam<E extends Enum<E>> extends Param<E, EnumParam.Domain<E>> {
+  EnumParam(final Domain<E> domain, final E value) {
+    super(domain, value);
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain<E extends Enum<E>> extends Param.Domain<E> {
+    private final Class<E> enumClass;
+
+    Domain(String name, final Class<E> enumClass) {
+      super(name);
+      this.enumClass = enumClass;
+    }
+
+    @Override
+    public final String getDomain() {
+      return Arrays.asList(enumClass.getEnumConstants()).toString();
+    }
+
+    @Override
+    final E parse(final String str) {
+      return Enum.valueOf(enumClass, StringUtils.toUpperCase(str));
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
new file mode 100644
index 0000000..06b7bc6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/EnumSetParam.java
@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.Iterator;
+import org.apache.hadoop.util.StringUtils;
+
+abstract class EnumSetParam<E extends Enum<E>> extends Param<EnumSet<E>, EnumSetParam.Domain<E>> {
+  /** Convert an EnumSet to a string of comma separated values. */
+  static <E extends Enum<E>> String toString(EnumSet<E> set) {
+    if (set == null || set.isEmpty()) {
+      return "";
+    } else {
+      final StringBuilder b = new StringBuilder();
+      final Iterator<E> i = set.iterator();
+      b.append(i.next());
+      for(; i.hasNext(); ) {
+        b.append(',').append(i.next());
+      }
+      return b.toString();
+    }
+  }
+
+  static <E extends Enum<E>> EnumSet<E> toEnumSet(final Class<E> clazz,
+      final E... values) {
+    final EnumSet<E> set = EnumSet.noneOf(clazz);
+    set.addAll(Arrays.asList(values));
+    return set;
+  }
+
+  EnumSetParam(final Domain<E> domain, final EnumSet<E> value) {
+    super(domain, value);
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "=" + toString(value);
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return toString(value);
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain<E extends Enum<E>> extends Param.Domain<EnumSet<E>> {
+    private final Class<E> enumClass;
+
+    Domain(String name, final Class<E> enumClass) {
+      super(name);
+      this.enumClass = enumClass;
+    }
+
+    @Override
+    public final String getDomain() {
+      return Arrays.asList(enumClass.getEnumConstants()).toString();
+    }
+
+    /** The string contains a comma separated values. */
+    @Override
+    final EnumSet<E> parse(final String str) {
+      final EnumSet<E> set = EnumSet.noneOf(enumClass);
+      if (!str.isEmpty()) {
+        for(int i, j = 0; j >= 0; ) {
+          i = j > 0 ? j + 1 : 0;
+          j = str.indexOf(',', i);
+          final String sub = j >= 0? str.substring(i, j): str.substring(i);
+          set.add(Enum.valueOf(enumClass, StringUtils.toUpperCase(sub.trim())));
+        }
+      }
+      return set;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
new file mode 100644
index 0000000..3f44fae
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ExcludeDatanodesParam.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+
+/** Exclude datanodes param */
+public class ExcludeDatanodesParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "excludedatanodes";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public ExcludeDatanodesParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
new file mode 100644
index 0000000..c840196
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import java.util.regex.Pattern;
+
+/** {@link FsAction} Parameter */
+public class FsActionParam extends StringParam {
+
+  /** Parameter name. */
+  public static final String NAME = "fsaction";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static String FS_ACTION_PATTERN = "[rwx-]{3}";
+
+  private static final Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(FS_ACTION_PATTERN));
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public FsActionParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public FsActionParam(final FsAction value) {
+    super(DOMAIN, value == null? null: value.SYMBOL);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
new file mode 100644
index 0000000..c39032c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/** Http GET operation parameter. */
+public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
+  /** Get operations. */
+  public static enum Op implements HttpOpParam.Op {
+    OPEN(true, HttpURLConnection.HTTP_OK),
+
+    GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
+    LISTSTATUS(false, HttpURLConnection.HTTP_OK),
+    GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
+    GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
+
+    GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),
+    GETDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
+
+    /** GET_BLOCK_LOCATIONS is a private unstable op. */
+    GET_BLOCK_LOCATIONS(false, HttpURLConnection.HTTP_OK),
+    GETACLSTATUS(false, HttpURLConnection.HTTP_OK),
+    GETXATTRS(false, HttpURLConnection.HTTP_OK),
+    LISTXATTRS(false, HttpURLConnection.HTTP_OK),
+
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
+
+    CHECKACCESS(false, HttpURLConnection.HTTP_OK);
+
+    final boolean redirect;
+    final int expectedHttpResponseCode;
+    final boolean requireAuth;
+
+    Op(final boolean redirect, final int expectedHttpResponseCode) {
+      this(redirect, expectedHttpResponseCode, false);
+    }
+
+    Op(final boolean redirect, final int expectedHttpResponseCode,
+       final boolean requireAuth) {
+      this.redirect = redirect;
+      this.expectedHttpResponseCode = expectedHttpResponseCode;
+      this.requireAuth = requireAuth;
+    }
+
+    @Override
+    public HttpOpParam.Type getType() {
+      return HttpOpParam.Type.GET;
+    }
+
+    @Override
+    public boolean getRequireAuth() {
+      return requireAuth;
+    }
+
+    @Override
+    public boolean getDoOutput() {
+      return false;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return redirect;
+    }
+
+    @Override
+    public int getExpectedHttpResponseCode() {
+      return expectedHttpResponseCode;
+    }
+
+    @Override
+    public String toQueryString() {
+      return NAME + "=" + this;
+    }
+  }
+
+  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public GetOpParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
new file mode 100644
index 0000000..c0429cc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GroupParam.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Group parameter. */
+public class GroupParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "group";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public GroupParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
new file mode 100644
index 0000000..67224ef
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/HttpOpParam.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+
+import javax.ws.rs.core.Response;
+
+
+/** Http operation parameter. */
+public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
+    extends EnumParam<E> {
+  /** Parameter name. */
+  public static final String NAME = "op";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  /** Http operation types */
+  public static enum Type {
+    GET, PUT, POST, DELETE;
+  }
+
+  /** Http operation interface. */
+  public static interface Op {
+    /** @return the Http operation type. */
+    public Type getType();
+
+    /** @return true if the operation cannot use a token */
+    public boolean getRequireAuth();
+
+    /** @return true if the operation will do output. */
+    public boolean getDoOutput();
+
+    /** @return true if the operation will be redirected. */
+    public boolean getRedirect();
+
+    /** @return true the expected http response code. */
+    public int getExpectedHttpResponseCode();
+
+    /** @return a URI query string. */
+    public String toQueryString();
+  }
+
+  /** Expects HTTP response 307 "Temporary Redirect". */
+  public static class TemporaryRedirectOp implements Op {
+    static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(
+        PutOpParam.Op.CREATE);
+    static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(
+        PostOpParam.Op.APPEND);
+    static final TemporaryRedirectOp OPEN = new TemporaryRedirectOp(
+        GetOpParam.Op.OPEN);
+    static final TemporaryRedirectOp GETFILECHECKSUM = new TemporaryRedirectOp(
+        GetOpParam.Op.GETFILECHECKSUM);
+
+    static final List<TemporaryRedirectOp> values
+        = Collections.unmodifiableList(Arrays.asList(CREATE, APPEND, OPEN,
+                                       GETFILECHECKSUM));
+
+    /** Get an object for the given op. */
+    public static TemporaryRedirectOp valueOf(final Op op) {
+      for(TemporaryRedirectOp t : values) {
+        if (op == t.op) {
+          return t;
+        }
+      }
+      throw new IllegalArgumentException(op + " not found.");
+    }
+
+    private final Op op;
+
+    private TemporaryRedirectOp(final Op op) {
+      this.op = op;
+    }
+
+    @Override
+    public Type getType() {
+      return op.getType();
+    }
+
+    @Override
+    public boolean getRequireAuth() {
+      return op.getRequireAuth();
+    }
+
+    @Override
+    public boolean getDoOutput() {
+      return false;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return false;
+    }
+
+    /** Override the original expected response with "Temporary Redirect". */
+    @Override
+    public int getExpectedHttpResponseCode() {
+      return Response.Status.TEMPORARY_REDIRECT.getStatusCode();
+    }
+
+    @Override
+    public String toQueryString() {
+      return op.toQueryString();
+    }
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return value.toString();
+  }
+
+  HttpOpParam(final Domain<E> domain, final E value) {
+    super(domain, value);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
new file mode 100644
index 0000000..c860d90
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Integer parameter. */
+abstract class IntegerParam extends Param<Integer, IntegerParam.Domain> {
+  IntegerParam(final Domain domain, final Integer value,
+      final Integer min, final Integer max) {
+    super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Integer min, final Integer max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "=" + domain.toString(getValue());
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return domain.toString(getValue());
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain extends Param.Domain<Integer> {
+    /** The radix of the number. */
+    final int radix;
+
+    Domain(final String paramName) {
+      this(paramName, 10);
+    }
+
+    Domain(final String paramName, final int radix) {
+      super(paramName);
+      this.radix = radix;
+    }
+
+    @Override
+    public String getDomain() {
+      return "<" + NULL + " | int in radix " + radix + ">";
+    }
+
+    @Override
+    Integer parse(final String str) {
+      try{
+        return NULL.equals(str) || str == null ? null : Integer.parseInt(str,
+          radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " integer.", e);
+      }
+    }
+
+    /** Convert an Integer to a String. */
+    String toString(final Integer n) {
+      return n == null? NULL: Integer.toString(n, radix);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
new file mode 100644
index 0000000..5a609ee
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Length parameter. */
+public class LengthParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "length";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public LengthParam(final Long value) {
+    super(DOMAIN, value, 0L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public LengthParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public long getLength() {
+    Long v = getValue();
+    return v == null ? -1 : v;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
new file mode 100644
index 0000000..12e0a94
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Long parameter. */
+abstract class LongParam extends Param<Long, LongParam.Domain> {
+  LongParam(final Domain domain, final Long value, final Long min, final Long max) {
+    super(domain, value);
+    checkRange(min, max);
+  }
+
+  private void checkRange(final Long min, final Long max) {
+    if (value == null) {
+      return;
+    }
+    if (min != null && value < min) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " < " + domain.toString(min));
+    }
+    if (max != null && value > max) {
+      throw new IllegalArgumentException("Invalid parameter range: " + getName()
+          + " = " + domain.toString(value) + " > " + domain.toString(max));
+    }
+  }
+
+  @Override
+  public String toString() {
+    return getName() + "=" + domain.toString(getValue());
+  }
+
+  /** @return the parameter value as a string */
+  @Override
+  public String getValueString() {
+    return domain.toString(getValue());
+  }
+
+  /** The domain of the parameter. */
+  static final class Domain extends Param.Domain<Long> {
+    /** The radix of the number. */
+    final int radix;
+
+    Domain(final String paramName) {
+      this(paramName, 10);
+    }
+
+    Domain(final String paramName, final int radix) {
+      super(paramName);
+      this.radix = radix;
+    }
+
+    @Override
+    public String getDomain() {
+      return "<" + NULL + " | long in radix " + radix + ">";
+    }
+
+    @Override
+    Long parse(final String str) {
+      try {
+        return NULL.equals(str) || str == null ? null: Long.parseLong(str,
+          radix);
+      } catch(NumberFormatException e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" as a radix-" + radix + " long integer.", e);
+      }
+    }
+
+    /** Convert a Long to a String. */
+    String toString(final Long n) {
+      return n == null? NULL: Long.toString(n, radix);
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
new file mode 100644
index 0000000..59911d7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Modification time parameter. */
+public class ModificationTimeParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "modificationtime";
+  /** Default parameter value. */
+  public static final String DEFAULT = "-1";
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public ModificationTimeParam(final Long value) {
+    super(DOMAIN, value, -1L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public ModificationTimeParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
new file mode 100644
index 0000000..83aba9e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** NewLength parameter. */
+public class NewLengthParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "newlength";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public NewLengthParam(final Long value) {
+    super(DOMAIN, value, 0L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public NewLengthParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
new file mode 100644
index 0000000..6d88703
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Offset parameter. */
+public class OffsetParam extends LongParam {
+  /** Parameter name. */
+  public static final String NAME = "offset";
+  /** Default parameter value. */
+  public static final String DEFAULT = "0";
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public OffsetParam(final Long value) {
+    super(DOMAIN, value, 0L, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public OffsetParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  public Long getOffset() {
+    Long offset = getValue();
+    return (offset == null) ? Long.valueOf(0) : offset;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
new file mode 100644
index 0000000..befade5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/**
+ * The old snapshot name parameter for renameSnapshot operation.
+ */
+public class OldSnapshotNameParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "oldsnapshotname";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  public OldSnapshotNameParam(final String str) {
+    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
new file mode 100644
index 0000000..f6945bb
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Overwrite parameter. */
+public class OverwriteParam extends BooleanParam {
+  /** Parameter name. */
+  public static final String NAME = "overwrite";
+  /** Default parameter value. */
+  public static final String DEFAULT = FALSE;
+
+  private static final Domain DOMAIN = new Domain(NAME);
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public OverwriteParam(final Boolean value) {
+    super(DOMAIN, value);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public OverwriteParam(final String str) {
+    this(DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
new file mode 100644
index 0000000..a1c10aa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+/** Owner parameter. */
+public class OwnerParam extends StringParam {
+  /** Parameter name. */
+  public static final String NAME = "owner";
+  /** Default parameter value. */
+  public static final String DEFAULT = "";
+
+  private static final Domain DOMAIN = new Domain(NAME, null);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public OwnerParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
new file mode 100644
index 0000000..279a50c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.io.UnsupportedEncodingException;
+import java.net.URLEncoder;
+import java.util.Arrays;
+import java.util.Comparator;
+
+
+/** Base class of parameters. */
+public abstract class Param<T, D extends Param.Domain<T>> {
+  static final String NULL = "null";
+
+  static final Comparator<Param<?,?>> NAME_CMP = new Comparator<Param<?,?>>() {
+    @Override
+    public int compare(Param<?, ?> left, Param<?, ?> right) {
+      return left.getName().compareTo(right.getName());
+    }
+  };
+
+  /** Convert the parameters to a sorted String.
+   *
+   * @param separator URI parameter separator character
+   * @param parameters parameters to encode into a string
+   * @return the encoded URI string
+   */
+  public static String toSortedString(final String separator,
+      final Param<?, ?>... parameters) {
+    Arrays.sort(parameters, NAME_CMP);
+    final StringBuilder b = new StringBuilder();
+    try {
+      for(Param<?, ?> p : parameters) {
+        if (p.getValue() != null) {
+          b.append(separator).append(
+              URLEncoder.encode(p.getName(), "UTF-8")
+              + "="
+              + URLEncoder.encode(p.getValueString(), "UTF-8"));
+        }
+      }
+  } catch (UnsupportedEncodingException e) {
+    // Sane systems know about UTF-8, so this should never happen.
+    throw new RuntimeException(e);
+  }
+    return b.toString();
+  }
+
+  /** The domain of the parameter. */
+  final D domain;
+  /** The actual parameter value. */
+  final T value;
+
+  Param(final D domain, final T value) {
+    this.domain = domain;
+    this.value = value;
+  }
+
+  /** @return the parameter value. */
+  public final T getValue() {
+    return value;
+  }
+
+  /** @return the parameter value as a string */
+  public abstract String getValueString();
+
+  /** @return the parameter name. */
+  public abstract String getName();
+
+  @Override
+  public String toString() {
+    return getName() + "=" + value;
+  }
+
+  /** Base class of parameter domains. */
+  static abstract class Domain<T> {
+    /** Parameter name. */
+    final String paramName;
+
+    Domain(final String paramName) {
+      this.paramName = paramName;
+    }
+
+    /** @return the parameter name. */
+    public final String getParamName() {
+      return paramName;
+    }
+
+    /** @return a string description of the domain of the parameter. */
+    public abstract String getDomain();
+
+    /** @return the parameter value represented by the string. */
+    abstract T parse(String str);
+
+    /** Parse the given string.
+     * @return the parameter value represented by the string.
+     */
+    public final T parse(final String varName, final String str) {
+      try {
+        return str != null && str.trim().length() > 0 ? parse(str) : null;
+      } catch(Exception e) {
+        throw new IllegalArgumentException("Failed to parse \"" + str
+            + "\" for the parameter " + varName
+            + ".  The value must be in the domain " + getDomain(), e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
new file mode 100644
index 0000000..ac6f097
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/** Permission parameter, use a Short to represent a FsPermission. */
+public class PermissionParam extends ShortParam {
+  /** Parameter name. */
+  public static final String NAME = "permission";
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static final Domain DOMAIN = new Domain(NAME, 8);
+
+  private static final short DEFAULT_PERMISSION = 0755;
+
+  /** @return the default FsPermission. */
+  public static FsPermission getDefaultFsPermission() {
+    return new FsPermission(DEFAULT_PERMISSION);
+  }
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public PermissionParam(final FsPermission value) {
+    super(DOMAIN, value == null? null: value.toShort(), null, null);
+  }
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public PermissionParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str), (short)0, (short)01777);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+
+  /** @return the represented FsPermission. */
+  public FsPermission getFsPermission() {
+    final Short v = getValue();
+    return new FsPermission(v != null? v: DEFAULT_PERMISSION);
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
new file mode 100644
index 0000000..e0178d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import java.net.HttpURLConnection;
+
+/** Http POST operation parameter. */
+public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
+  /** Post operations. */
+  public static enum Op implements HttpOpParam.Op {
+    APPEND(true, HttpURLConnection.HTTP_OK),
+
+    CONCAT(false, HttpURLConnection.HTTP_OK),
+
+    TRUNCATE(false, HttpURLConnection.HTTP_OK),
+
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+
+    final boolean doOutputAndRedirect;
+    final int expectedHttpResponseCode;
+
+    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
+      this.doOutputAndRedirect = doOutputAndRedirect;
+      this.expectedHttpResponseCode = expectedHttpResponseCode;
+    }
+
+    @Override
+    public Type getType() {
+      return Type.POST;
+    }
+
+    @Override
+    public boolean getRequireAuth() {
+      return false;
+    }
+
+    @Override
+    public boolean getDoOutput() {
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public boolean getRedirect() {
+      return doOutputAndRedirect;
+    }
+
+    @Override
+    public int getExpectedHttpResponseCode() {
+      return expectedHttpResponseCode;
+    }
+
+    /** @return a URI query string. */
+    @Override
+    public String toQueryString() {
+      return NAME + "=" + this;
+    }
+  }
+
+  private static final Domain<Op> DOMAIN = new Domain<PostOpParam.Op>(NAME, Op.class);
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public PostOpParam(final String str) {
+    super(DOMAIN, DOMAIN.parse(str));
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}
\ No newline at end of file


[23/47] hadoop git commit: YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev via wangda)

Posted by zj...@apache.org.
YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender. (Varun Vasudev via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8098c1c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8098c1c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8098c1c

Branch: refs/heads/YARN-2928
Commit: e8098c1c1d633727fda13f5d510f0cbec35117ed
Parents: b6434b1
Author: Wangda Tan <wa...@apache.org>
Authored: Wed Apr 8 10:57:48 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java    | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8098c1c/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0d9c5ce..aaab195 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -215,6 +215,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called.
     (Bibin A Chundatt via ozawa)
 
+    YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
+    (Varun Vasudev via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8098c1c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
index 61d4c4c..e788e80 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestLog4jWarningErrorMetricsAppender.java
@@ -84,7 +84,7 @@ public class TestLog4jWarningErrorMetricsAppender {
     Assert.assertEquals(1, appender.getErrorCounts(cutoff).get(0).longValue());
     Assert.assertEquals(1, appender.getErrorMessagesAndCounts(cutoff).get(0)
       .size());
-    Thread.sleep(2000);
+    Thread.sleep(3000);
     Assert.assertEquals(1, appender.getErrorCounts(cutoff).size());
     Assert.assertEquals(0, appender.getErrorCounts(cutoff).get(0).longValue());
     Assert.assertEquals(0, appender.getErrorMessagesAndCounts(cutoff).get(0)


[42/47] hadoop git commit: HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider before returning to client (asuresh)

Posted by zj...@apache.org.
HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider before returning to client (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e77547ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e77547ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e77547ec

Branch: refs/heads/YARN-2928
Commit: e77547ec236d0fcd10e361d5f59cacc230b73706
Parents: 9963096
Author: Arun Suresh <as...@apache.org>
Authored: Thu Apr 9 12:28:44 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:55 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hadoop/hdfs/server/namenode/AclStorage.java | 11 ++++++
 .../hadoop/hdfs/server/namenode/FSDirAclOp.java |  3 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java      |  3 +-
 .../hdfs/server/namenode/XAttrStorage.java      |  6 ++--
 .../namenode/TestINodeAttributeProvider.java    | 36 ++++++++++++++++----
 6 files changed, 50 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4b22fa4..df6d90a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -153,6 +153,9 @@ Trunk (Unreleased)
 
   BUG FIXES
  
+    HDFS-8091: ACLStatus and XAttributes should be presented to INodeAttributesProvider
+               before returning to client (asuresh)
+
     HADOOP-9635 Fix potential Stack Overflow in DomainSocket.c (V. Karthik Kumar
                 via cmccabe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
index 4f6ce3a..abd3755 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclStorage.java
@@ -163,6 +163,17 @@ public final class AclStorage {
   }
 
   /**
+   * Reads the existing extended ACL entries of an INodeAttribute object.
+   *
+   * @param inodeAttr INode to read
+   * @return List<AclEntry> containing extended inode ACL entries
+   */
+  public static List<AclEntry> readINodeAcl(INodeAttributes inodeAttr) {
+    AclFeature f = inodeAttr.getAclFeature();
+    return getEntriesFromAclFeature(f);
+  }
+
+  /**
    * Build list of AclEntries from the AclFeature
    * @param aclFeature AclFeature
    * @return List of entries

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
index dff1c2e..0c572b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java
@@ -172,7 +172,8 @@ class FSDirAclOp {
       }
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getPathSnapshotId();
-      List<AclEntry> acl = AclStorage.readINodeAcl(inode, snapshotId);
+      List<AclEntry> acl = AclStorage.readINodeAcl(fsd.getAttributes(src,
+              inode.getLocalNameBytes(), inode, snapshotId));
       FsPermission fsPermission = inode.getFsPermission(snapshotId);
       return new AclStatus.Builder()
           .owner(inode.getUserName()).group(inode.getGroupName())

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index d5c9124..53b9b41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -451,7 +451,8 @@ class FSDirXAttrOp {
       INodesInPath iip = fsd.getINodesInPath(srcs, true);
       INode inode = FSDirectory.resolveLastINode(iip);
       int snapshotId = iip.getPathSnapshotId();
-      return XAttrStorage.readINodeXAttrs(inode, snapshotId);
+      return XAttrStorage.readINodeXAttrs(fsd.getAttributes(src,
+              inode.getLocalNameBytes(), inode, snapshotId));
     } finally {
       fsd.readUnlock();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
index 7e843d2..e38648d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrStorage.java
@@ -57,11 +57,11 @@ public class XAttrStorage {
    * <p/>
    * Must be called while holding the FSDirectory read lock.
    *
-   * @param inode INode to read.
+   * @param inodeAttr INodeAttributes to read.
    * @return List<XAttr> <code>XAttr</code> list.
    */
-  public static List<XAttr> readINodeXAttrs(INode inode) {
-    XAttrFeature f = inode.getXAttrFeature();
+  public static List<XAttr> readINodeXAttrs(INodeAttributes inodeAttr) {
+    XAttrFeature f = inodeAttr.getXAttrFeature();
     return f == null ? ImmutableList.<XAttr> of() : f.getXAttrs();
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e77547ec/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
index 111c67c..c5f428b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -20,16 +20,16 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.AclEntry;
-import org.apache.hadoop.fs.permission.AclEntryType;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.permission.*;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -131,7 +131,17 @@ public class TestINodeAttributeProvider {
 
         @Override
         public XAttrFeature getXAttrFeature() {
-          return (useDefault) ? inode.getXAttrFeature() : null;
+          XAttrFeature x;
+          if (useDefault) {
+            x = inode.getXAttrFeature();
+          } else {
+            x = new XAttrFeature(ImmutableList.copyOf(
+                    Lists.newArrayList(
+                            new XAttr.Builder().setName("test")
+                                    .setValue(new byte[] {1, 2})
+                                    .build())));
+          }
+          return x;
         }
 
         @Override
@@ -218,12 +228,24 @@ public class TestINodeAttributeProvider {
     FileStatus status = fs.getFileStatus(new Path("/user/xxx"));
     Assert.assertEquals(System.getProperty("user.name"), status.getOwner());
     Assert.assertEquals("supergroup", status.getGroup());
-    Assert.assertEquals(new FsPermission((short)0755), status.getPermission());
+    Assert.assertEquals(new FsPermission((short) 0755), status.getPermission());
     fs.mkdirs(new Path("/user/authz"));
-    status = fs.getFileStatus(new Path("/user/authz"));
+    Path p = new Path("/user/authz");
+    status = fs.getFileStatus(p);
     Assert.assertEquals("foo", status.getOwner());
     Assert.assertEquals("bar", status.getGroup());
     Assert.assertEquals(new FsPermission((short) 0770), status.getPermission());
+    AclStatus aclStatus = fs.getAclStatus(p);
+    Assert.assertEquals(1, aclStatus.getEntries().size());
+    Assert.assertEquals(AclEntryType.GROUP, aclStatus.getEntries().get(0)
+            .getType());
+    Assert.assertEquals("xxx", aclStatus.getEntries().get(0)
+            .getName());
+    Assert.assertEquals(FsAction.ALL, aclStatus.getEntries().get(0)
+            .getPermission());
+    Map<String, byte[]> xAttrs = fs.getXAttrs(p);
+    Assert.assertTrue(xAttrs.containsKey("user.test"));
+    Assert.assertEquals(2, xAttrs.get("user.test").length);
   }
 
 }


[12/47] hadoop git commit: HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client implementation. Contributed by Takuya Fukudome

Posted by zj...@apache.org.
HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client implementation. Contributed by Takuya Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f55a1b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f55a1b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f55a1b0

Branch: refs/heads/YARN-2928
Commit: 6f55a1b041d3ed9f20846a8e7c483d81eb42e185
Parents: cfe3ba3
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue Apr 7 13:59:48 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:57 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReader.java         | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java    | 2 ++
 .../main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java     | 3 +++
 .../java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java     | 2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java    | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSPacket.java           | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java        | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java     | 2 ++
 .../hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java    | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java        | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java     | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java   | 2 ++
 15 files changed, 34 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d20060..3edc80e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -379,6 +379,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of
     subclassing. (Li Bo via szetszwo)
 
+    HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client
+    implementation. (Takuya Fukudome via szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
index 7cd2426..aa3e8ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
  * A BlockReader is responsible for reading a single block
  * from a single datanode.
  */
+@InterfaceAudience.Private
 public interface BlockReader extends ByteBufferReadable {
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 8073ea0..ab93441 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -24,6 +24,7 @@ import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSClient.Conf;
@@ -57,6 +58,7 @@ import com.google.common.base.Preconditions;
  * <li>The client reads the file descriptors.</li>
  * </ul>
  */
+@InterfaceAudience.Private
 class BlockReaderLocal implements BlockReader {
   static final Log LOG = LogFactory.getLog(BlockReaderLocal.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
index 8e190e7..0c9ec45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
@@ -31,6 +31,7 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
@@ -73,6 +74,7 @@ import org.apache.htrace.TraceScope;
  * if security is enabled.</li>
  * </ul>
  */
+@InterfaceAudience.Private
 class BlockReaderLocalLegacy implements BlockReader {
   private static final Log LOG = LogFactory.getLog(BlockReaderLocalLegacy.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
index a9f5c85..dbc528e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java
@@ -17,11 +17,14 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 import java.io.IOException;
 
 /**
  * For sharing between the local and remote block reader implementations.
  */
+@InterfaceAudience.Private
 class BlockReaderUtil {
 
   /* See {@link BlockReader#readAll(byte[], int, int)} */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
index 9d6394c..1597b87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.NoSuchElementException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -29,6 +30,7 @@ import org.apache.hadoop.fs.RemoteIterator;
  * Provides an iterator interface for listCorruptFileBlocks.
  * This class is used by DistributedFileSystem and Hdfs.
  */
+@InterfaceAudience.Private
 public class CorruptFileBlockIterator implements RemoteIterator<Path> {
   private final DFSClient dfs;
   private final String path;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java
index e7a5112..2a228e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 import java.util.concurrent.atomic.AtomicLong;
 
 /**
@@ -24,6 +26,7 @@ import java.util.concurrent.atomic.AtomicLong;
  * This class has a number of metrics variables that are publicly accessible,
  * we can grab them from client side, like HBase.
  */
+@InterfaceAudience.Private
 public class DFSHedgedReadMetrics {
   public final AtomicLong hedgedReadOps = new AtomicLong();
   public final AtomicLong hedgedReadOpsWin = new AtomicLong();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 7e7f780..22055c3 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -23,6 +23,7 @@ import java.nio.BufferOverflowException;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
@@ -34,6 +35,7 @@ import org.apache.htrace.Span;
  * to send them to datanodes.
  ****************************************************************/
 
+@InterfaceAudience.Private
 class DFSPacket {
   public static final long HEART_BEAT_SEQNO = -1L;
   private static long[] EMPTY = new long[0];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 6bcbfde..0c6b4a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -40,6 +40,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -109,6 +111,7 @@ import com.google.common.cache.RemovalNotification;
  *
  *********************************************************************/
 
+@InterfaceAudience.Private
 class DataStreamer extends Daemon {
   /**
    * Create a socket for a write pipeline

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
index 57eff39..7b9e8e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java
@@ -19,11 +19,13 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 
 /**
  * An immutable key which identifies a block.
  */
+@InterfaceAudience.Private
 final public class ExtendedBlockId {
   /**
    * The block ID for this block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
index 0ee57c2..e1401c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
@@ -34,6 +34,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -56,6 +57,7 @@ import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
+@InterfaceAudience.Private
 public class HAUtil {
   
   private static final Log LOG = 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
index 68ff554..c7da7af 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java
@@ -26,6 +26,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 
@@ -35,6 +36,7 @@ import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 
+@InterfaceAudience.Private
 public class KeyProviderCache {
 
   public static final Log LOG = LogFactory.getLog(KeyProviderCache.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
index e767501..3e0abce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java
@@ -30,6 +30,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Daemon;
@@ -68,6 +69,7 @@ import com.google.common.annotations.VisibleForTesting;
  * </ul>
  * </p>
  */
+@InterfaceAudience.Private
 class LeaseRenewer {
   static final Log LOG = LogFactory.getLog(LeaseRenewer.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index fcc2f5f..98c8b41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -40,6 +40,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient.Conf;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -94,6 +95,7 @@ import com.google.common.base.Preconditions;
  * {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will
  * create either an HA- or non-HA-enabled client proxy as appropriate.
  */
+@InterfaceAudience.Private
 public class NameNodeProxies {
   
   private static final Log LOG = LogFactory.getLog(NameNodeProxies.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f55a1b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
index 5afff00..f03e179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java
@@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 
+@InterfaceAudience.Private
 public interface RemotePeerFactory {
   /**
    * @param addr          The address to connect to.


[35/47] hadoop git commit: YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu via kasha)

Posted by zj...@apache.org.
YARN-3465. Use LinkedHashMap to preserve order of resource requests. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1488457f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1488457f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1488457f

Branch: refs/heads/YARN-2928
Commit: 1488457f84f88a43072d164366ccb2106e70172a
Parents: 680c2ee
Author: Karthik Kambatla <ka...@apache.org>
Authored: Thu Apr 9 00:07:49 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:54 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../nodemanager/containermanager/container/ContainerImpl.java     | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1488457f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f47be48..b339a7e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -221,6 +221,9 @@ Release 2.8.0 - UNRELEASED
     YARN-2890. MiniYarnCluster should turn on timeline service if
     configured to do so. (Mit Desai via hitesh)
 
+    YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
+    (Zhihai Xu via kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1488457f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
index cf3d8e7..131d439 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java
@@ -25,6 +25,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
@@ -638,7 +639,7 @@ public class ContainerImpl implements Container {
           return ContainerState.LOCALIZATION_FAILED;
         }
         Map<LocalResourceVisibility, Collection<LocalResourceRequest>> req =
-            new HashMap<LocalResourceVisibility, 
+            new LinkedHashMap<LocalResourceVisibility,
                         Collection<LocalResourceRequest>>();
         if (!container.publicRsrcs.isEmpty()) {
           req.put(LocalResourceVisibility.PUBLIC, container.publicRsrcs);


[31/47] hadoop git commit: Revert HDFS-7808.

Posted by zj...@apache.org.
Revert HDFS-7808.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e10f2b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e10f2b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e10f2b5

Branch: refs/heads/YARN-2928
Commit: 6e10f2b5812583f9228af3424cd0b0e514f68799
Parents: 6f15dd9
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Apr 8 15:59:55 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ---
 .../apache/hadoop/hdfs/tools/DFSHAAdmin.java    | 20 ++++++++++++++++++++
 .../hadoop/hdfs/tools/TestDFSHAAdmin.java       | 20 ++++++++++++++++++++
 .../hdfs/tools/TestDFSHAAdminMiniCluster.java   |  3 +++
 4 files changed, 43 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e10f2b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 95c6912..d4a8c0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1235,9 +1235,6 @@ Release 2.7.0 - UNRELEASED
     HDFS-6662. WebHDFS cannot open a file if its path contains "%".
     (Gerson Carlos via wheat9)
 
-    HDFS-7808. Remove obsolete -ns options in in DFSHAAdmin.java.
-    (Arshad Mohammad via wheat9)
-
     HDFS-7788. Post-2.6 namenode may not start up with an image containing
     inodes created with an old release. (Rushabh Shah via kihwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e10f2b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
index 6b6fb30..e9c611d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.PrintStream;
+import java.util.Arrays;
 import java.util.Collection;
 
 import org.apache.commons.logging.Log;
@@ -97,6 +98,25 @@ public class DFSHAAdmin extends HAAdmin {
       printUsage(errOut);
       return -1;
     }
+
+    int i = 0;
+    String cmd = argv[i++];
+
+    if ("-ns".equals(cmd)) {
+      if (i == argv.length) {
+        errOut.println("Missing nameservice ID");
+        printUsage(errOut);
+        return -1;
+      }
+      nameserviceId = argv[i++];
+      if (i >= argv.length) {
+        errOut.println("Missing command");
+        printUsage(errOut);
+        return -1;
+      }
+      argv = Arrays.copyOfRange(argv, i, argv.length);
+    }
+
     return super.runCmd(argv);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e10f2b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
index 8ecc71a..33da4d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
@@ -147,6 +147,17 @@ public class TestDFSHAAdmin {
   }
   
   @Test
+  public void testNameserviceOption() throws Exception {
+    assertEquals(-1, runTool("-ns"));
+    assertOutputContains("Missing nameservice ID");
+    assertEquals(-1, runTool("-ns", "ns1"));
+    assertOutputContains("Missing command");
+    // "ns1" isn't defined but we check this lazily and help doesn't use the ns
+    assertEquals(0, runTool("-ns", "ns1", "-help", "transitionToActive"));
+    assertOutputContains("Transitions the service into Active");
+  }
+
+  @Test
   public void testNamenodeResolution() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     assertEquals(0, runTool("-getServiceState", "nn1"));
@@ -268,6 +279,15 @@ public class TestDFSHAAdmin {
   }
 
   @Test
+  public void testFailoverWithFencerAndNameservice() throws Exception {
+    Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
+    HdfsConfiguration conf = getHAConf();
+    conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
+    tool.setConf(conf);
+    assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
+  }
+
+  @Test
   public void testFailoverWithFencerConfiguredAndForce() throws Exception {
     Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
     HdfsConfiguration conf = getHAConf();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e10f2b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index 9047279..2910004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -157,6 +157,9 @@ public class TestDFSHAAdminMiniCluster {
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
     assertEquals(0, runTool("-failover", "nn2", "nn1"));
     
+    // Test failover with fencer and nameservice
+    assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));
+
     // Fencer has not run yet, since none of the above required fencing 
     assertEquals("", Files.toString(tmpFile, Charsets.UTF_8));
 


[30/47] hadoop git commit: HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. Contributed by Ming Ma.

Posted by zj...@apache.org.
HDFS-7725. Incorrect 'nodes in service' metrics caused all writes to fail. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f15dd9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f15dd9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f15dd9e

Branch: refs/heads/YARN-2928
Commit: 6f15dd9ef38cd6211cfcd267247b02f877e059b8
Parents: c871128
Author: Andrew Wang <wa...@apache.org>
Authored: Wed Apr 8 15:52:06 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../blockmanagement/DecommissionManager.java    | 28 +++++++++----------
 .../blockmanagement/HeartbeatManager.java       | 29 ++++++++++++++------
 .../namenode/TestNamenodeCapacityReport.java    |  5 ++++
 4 files changed, 41 insertions(+), 24 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f15dd9e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 852006d..95c6912 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -441,6 +441,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-5215. dfs.datanode.du.reserved is not considered while computing
     available space ( Brahma Reddy Battula via Yongjun Zhang)
 
+    HDFS-7725. Incorrect "nodes in service" metrics caused all writes to fail.
+    (Ming Ma via wang)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f15dd9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 9355329..7f3d778 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -197,23 +197,21 @@ public class DecommissionManager {
    */
   @VisibleForTesting
   public void startDecommission(DatanodeDescriptor node) {
-    if (!node.isDecommissionInProgress()) {
-      if (!node.isAlive) {
-        LOG.info("Dead node {} is decommissioned immediately.", node);
-        node.setDecommissioned();
-      } else if (!node.isDecommissioned()) {
+    if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
+      // Update DN stats maintained by HeartbeatManager
+      hbManager.startDecommission(node);
+      // hbManager.startDecommission will set dead node to decommissioned.
+      if (node.isDecommissionInProgress()) {
         for (DatanodeStorageInfo storage : node.getStorageInfos()) {
-          LOG.info("Starting decommission of {} {} with {} blocks", 
+          LOG.info("Starting decommission of {} {} with {} blocks",
               node, storage, storage.numBlocks());
         }
-        // Update DN stats maintained by HeartbeatManager
-        hbManager.startDecommission(node);
         node.decommissioningStatus.setStartTime(monotonicNow());
         pendingNodes.add(node);
       }
     } else {
-      LOG.trace("startDecommission: Node {} is already decommission in "
-              + "progress, nothing to do.", node);
+      LOG.trace("startDecommission: Node {} in {}, nothing to do." +
+          node, node.getAdminState());
     }
   }
 
@@ -221,12 +219,12 @@ public class DecommissionManager {
    * Stop decommissioning the specified datanode. 
    * @param node
    */
-  void stopDecommission(DatanodeDescriptor node) {
+  @VisibleForTesting
+  public void stopDecommission(DatanodeDescriptor node) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stopping decommissioning of node {}", node);
       // Update DN stats maintained by HeartbeatManager
       hbManager.stopDecommission(node);
-      // Over-replicated blocks will be detected and processed when 
+      // Over-replicated blocks will be detected and processed when
       // the dead node comes back and send in its full block report.
       if (node.isAlive) {
         blockManager.processOverReplicatedBlocksOnReCommission(node);
@@ -235,8 +233,8 @@ public class DecommissionManager {
       pendingNodes.remove(node);
       decomNodeBlocks.remove(node);
     } else {
-      LOG.trace("stopDecommission: Node {} is not decommission in progress " +
-          "or decommissioned, nothing to do.", node);
+      LOG.trace("stopDecommission: Node {} in {}, nothing to do." +
+          node, node.getAdminState());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f15dd9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
index d2905a2..b0ab315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HeartbeatManager.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -31,6 +29,8 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Manage the heartbeats received from datanodes.
@@ -38,7 +38,7 @@ import org.apache.hadoop.util.Time;
  * by the heartbeat manager lock.
  */
 class HeartbeatManager implements DatanodeStatistics {
-  static final Log LOG = LogFactory.getLog(HeartbeatManager.class);
+  static final Logger LOG = LoggerFactory.getLogger(HeartbeatManager.class);
 
   /**
    * Stores a subset of the datanodeMap in DatanodeManager,
@@ -227,15 +227,26 @@ class HeartbeatManager implements DatanodeStatistics {
   }
 
   synchronized void startDecommission(final DatanodeDescriptor node) {
-    stats.subtract(node);
-    node.startDecommission();
-    stats.add(node);
+    if (!node.isAlive) {
+      LOG.info("Dead node {} is decommissioned immediately.", node);
+      node.setDecommissioned();
+    } else {
+      stats.subtract(node);
+      node.startDecommission();
+      stats.add(node);
+    }
   }
 
   synchronized void stopDecommission(final DatanodeDescriptor node) {
-    stats.subtract(node);
-    node.stopDecommission();
-    stats.add(node);
+    LOG.info("Stopping decommissioning of {} node {}",
+        node.isAlive ? "live" : "dead", node);
+    if (!node.isAlive) {
+      node.stopDecommission();
+    } else {
+      stats.subtract(node);
+      node.stopDecommission();
+      stats.add(node);
+    }
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f15dd9e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
index fd611ce..6f54722 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
@@ -202,9 +202,14 @@ public class TestNamenodeCapacityReport {
         dn.shutdown();
         DFSTestUtil.setDatanodeDead(dnd);
         BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
+        //Verify decommission of dead node won't impact nodesInService metrics.
+        dnm.getDecomManager().startDecommission(dnd);
         expectedInServiceNodes--;
         assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
         assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
+        //Verify recommission of dead node won't impact nodesInService metrics.
+        dnm.getDecomManager().stopDecommission(dnd);
+        assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
       }
 
       // restart the nodes to verify that counts are correct after


[09/47] hadoop git commit: YARN-3429. Fix incorrect CHANGES.txt

Posted by zj...@apache.org.
YARN-3429. Fix incorrect CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a48dec60
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a48dec60
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a48dec60

Branch: refs/heads/YARN-2928
Commit: a48dec600232b0d209196ac6cb389e0501bf38e4
Parents: 5135143
Author: Robert Kanter <rk...@apache.org>
Authored: Tue Apr 7 16:15:42 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:57 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48dec60/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index af86f02..3008d56 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -207,7 +207,7 @@ Release 2.8.0 - UNRELEASED
     YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
     (Zhihai Xu via ozawa)
 
-    YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+    YARN-3429. TestAMRMTokens.testTokenExpiry fails Intermittently with
     error message:Invalid AMRMToken (zxu via rkanter)
 
     YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via xgong)


[21/47] hadoop git commit: HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)

Posted by zj...@apache.org.
HDFS-8072. Reserved RBW space is not released if client terminates while writing block. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/455edc81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/455edc81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/455edc81

Branch: refs/heads/YARN-2928
Commit: 455edc8134c49a9674853cab35231618b75b6adb
Parents: c881277
Author: Arpit Agarwal <ar...@apache.org>
Authored: Wed Apr 8 11:38:21 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/datanode/BlockReceiver.java     |  1 +
 .../hdfs/server/datanode/ReplicaInPipeline.java |  6 ++
 .../datanode/ReplicaInPipelineInterface.java    |  5 ++
 .../server/datanode/SimulatedFSDataset.java     |  4 ++
 .../extdataset/ExternalReplicaInPipeline.java   |  4 ++
 .../fsdataset/impl/TestRbwSpaceReservation.java | 67 +++++++++++++++++---
 7 files changed, 81 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 84e382a..91a16bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1403,6 +1403,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
     platform-specific format. (Xiaoyu Yao via cnauroth)
 
+    HDFS-8072. Reserved RBW space is not released if client terminates while
+    writing block. (Arpit Agarwal)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 58cb8b1..c0be956 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -817,6 +817,7 @@ class BlockReceiver implements Closeable {
       }
 
     } catch (IOException ioe) {
+      replicaInfo.releaseAllBytesReserved();
       if (datanode.isRestarting()) {
         // Do not throw if shutting down for restart. Otherwise, it will cause
         // premature termination of responder.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
index 6a26640..cc55f85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipeline.java
@@ -148,6 +148,12 @@ public class ReplicaInPipeline extends ReplicaInfo
     return bytesReserved;
   }
   
+  @Override
+  public void releaseAllBytesReserved() {  // ReplicaInPipelineInterface
+    getVolume().releaseReservedSpace(bytesReserved);
+    bytesReserved = 0;
+  }
+
   @Override // ReplicaInPipelineInterface
   public synchronized void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
     this.bytesOnDisk = dataLength;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
index 7f08b81..0263d0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInPipelineInterface.java
@@ -45,6 +45,11 @@ public interface ReplicaInPipelineInterface extends Replica {
   void setBytesAcked(long bytesAcked);
   
   /**
+   * Release any disk space reserved for this replica.
+   */
+  public void releaseAllBytesReserved();
+
+  /**
    * store the checksum for the last chunk along with the data length
    * @param dataLength number of bytes on disk
    * @param lastChecksum - checksum bytes for the last chunk

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 160a86c..a358e22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -290,6 +290,10 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
     }
 
     @Override
+    public void releaseAllBytesReserved() {
+    }
+
+    @Override
     synchronized public long getBytesOnDisk() {
       if (finalized) {
         return theBlock.getNumBytes();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java
index c3c0197..ad44500 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/extdataset/ExternalReplicaInPipeline.java
@@ -41,6 +41,10 @@ public class ExternalReplicaInPipeline implements ReplicaInPipelineInterface {
   }
 
   @Override
+  public void releaseAllBytesReserved() {
+  }
+
+  @Override
   public void setLastChecksumAndDataLen(long dataLength, byte[] lastChecksum) {
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/455edc81/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
index 487f3ab..ebf2f3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestRbwSpaceReservation.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -45,6 +46,7 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 /**
  * Ensure that the DN reserves disk space equivalent to a full block for
@@ -53,7 +55,6 @@ import java.util.Random;
 public class TestRbwSpaceReservation {
   static final Log LOG = LogFactory.getLog(TestRbwSpaceReservation.class);
 
-  private static final short REPL_FACTOR = 1;
   private static final int DU_REFRESH_INTERVAL_MSEC = 500;
   private static final int STORAGES_PER_DATANODE = 1;
   private static final int BLOCK_SIZE = 1024 * 1024;
@@ -83,25 +84,38 @@ public class TestRbwSpaceReservation {
     ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL);
   }
 
-  private void startCluster(int blockSize, long perVolumeCapacity) throws IOException {
+  /**
+   *
+   * @param blockSize
+   * @param perVolumeCapacity limit the capacity of each volume to the given
+   *                          value. If negative, then don't limit.
+   * @throws IOException
+   */
+  private void startCluster(int blockSize, int numDatanodes, long perVolumeCapacity) throws IOException {
     initConfig(blockSize);
 
     cluster = new MiniDFSCluster
         .Builder(conf)
         .storagesPerDatanode(STORAGES_PER_DATANODE)
-        .numDataNodes(REPL_FACTOR)
+        .numDataNodes(numDatanodes)
         .build();
     fs = cluster.getFileSystem();
     client = fs.getClient();
     cluster.waitActive();
 
     if (perVolumeCapacity >= 0) {
+      for (DataNode dn : cluster.getDataNodes()) {
+        for (FsVolumeSpi volume : dn.getFSDataset().getVolumes()) {
+          ((FsVolumeImpl) volume).setCapacityForTesting(perVolumeCapacity);
+        }
+      }
+    }
+
+    if (numDatanodes == 1) {
       List<? extends FsVolumeSpi> volumes =
           cluster.getDataNodes().get(0).getFSDataset().getVolumes();
-
       assertThat(volumes.size(), is(1));
       singletonVolume = ((FsVolumeImpl) volumes.get(0));
-      singletonVolume.setCapacityForTesting(perVolumeCapacity);
     }
   }
 
@@ -128,7 +142,7 @@ public class TestRbwSpaceReservation {
       throws IOException, InterruptedException {
     // Enough for 1 block + meta files + some delta.
     final long configuredCapacity = fileBlockSize * 2 - 1;
-    startCluster(BLOCK_SIZE, configuredCapacity);
+    startCluster(BLOCK_SIZE, 1, configuredCapacity);
     FSDataOutputStream out = null;
     Path path = new Path("/" + fileNamePrefix + ".dat");
 
@@ -195,7 +209,7 @@ public class TestRbwSpaceReservation {
   @Test (timeout=300000)
   public void testWithLimitedSpace() throws IOException {
     // Cluster with just enough space for a full block + meta.
-    startCluster(BLOCK_SIZE, 2 * BLOCK_SIZE - 1);
+    startCluster(BLOCK_SIZE, 1, 2 * BLOCK_SIZE - 1);
     final String methodName = GenericTestUtils.getMethodName();
     Path file1 = new Path("/" + methodName + ".01.dat");
     Path file2 = new Path("/" + methodName + ".02.dat");
@@ -208,7 +222,6 @@ public class TestRbwSpaceReservation {
       os2 = fs.create(file2);
 
       // Write one byte to the first file.
-      LOG.info("arpit: writing first file");
       byte[] data = new byte[1];
       os1.write(data);
       os1.hsync();
@@ -228,6 +241,42 @@ public class TestRbwSpaceReservation {
   }
 
   /**
+   * Ensure that reserved space is released when the client goes away
+   * unexpectedly.
+   *
+   * The verification is done for each replica in the write pipeline.
+   *
+   * @throws IOException
+   */
+  @Test(timeout=300000)
+  public void testSpaceReleasedOnUnexpectedEof()
+      throws IOException, InterruptedException, TimeoutException {
+    final short replication = 3;
+    startCluster(BLOCK_SIZE, replication, -1);
+
+    final String methodName = GenericTestUtils.getMethodName();
+    final Path file = new Path("/" + methodName + ".01.dat");
+
+    // Write 1 byte to the file and kill the writer.
+    FSDataOutputStream os = fs.create(file, replication);
+    os.write(new byte[1]);
+    os.hsync();
+    DFSTestUtil.abortStream((DFSOutputStream) os.getWrappedStream());
+
+    // Ensure all space reserved for the replica was released on each
+    // DataNode.
+    for (DataNode dn : cluster.getDataNodes()) {
+      final FsVolumeImpl volume = (FsVolumeImpl) dn.getFSDataset().getVolumes().get(0);
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          return (volume.getReservedForRbw() == 0);
+        }
+      }, 500, Integer.MAX_VALUE); // Wait until the test times out.
+    }
+  }
+
+  /**
    * Stress test to ensure we are not leaking reserved space.
    * @throws IOException
    * @throws InterruptedException
@@ -235,7 +284,7 @@ public class TestRbwSpaceReservation {
   @Test (timeout=600000)
   public void stressTest() throws IOException, InterruptedException {
     final int numWriters = 5;
-    startCluster(SMALL_BLOCK_SIZE, SMALL_BLOCK_SIZE * numWriters * 10);
+    startCluster(SMALL_BLOCK_SIZE, 1, SMALL_BLOCK_SIZE * numWriters * 10);
     Writer[] writers = new Writer[numWriters];
 
     // Start a few writers and let them run for a while.


[19/47] hadoop git commit: HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.

Posted by zj...@apache.org.
HDFS-8079. Move CorruptFileBlockIterator to a new hdfs.client.impl package.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79e0de5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79e0de5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79e0de5d

Branch: refs/heads/YARN-2928
Commit: 79e0de5d754eff8fa3bb84a71ae6c34943e47d7c
Parents: 455edc8
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Wed Apr 8 11:50:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../main/java/org/apache/hadoop/fs/Hdfs.java    |   2 +-
 .../hadoop/hdfs/CorruptFileBlockIterator.java   | 104 ------------------
 .../hadoop/hdfs/DistributedFileSystem.java      |   1 +
 .../client/impl/CorruptFileBlockIterator.java   | 105 +++++++++++++++++++
 .../namenode/TestListCorruptFileBlocks.java     |   2 +-
 6 files changed, 111 insertions(+), 106 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 91a16bc..c983849 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -391,6 +391,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
     package local class. (wheat9)
 
+    HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
+    (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index 8c09193..aaaff25 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -35,13 +35,13 @@ import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
-import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
deleted file mode 100644
index 1597b87..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.NoSuchElementException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.RemoteIterator;
-
-/**
- * Provides an iterator interface for listCorruptFileBlocks.
- * This class is used by DistributedFileSystem and Hdfs.
- */
-@InterfaceAudience.Private
-public class CorruptFileBlockIterator implements RemoteIterator<Path> {
-  private final DFSClient dfs;
-  private final String path;
-
-  private String[] files = null;
-  private int fileIdx = 0;
-  private String cookie = null;
-  private Path nextPath = null;
-
-  private int callsMade = 0;
-
-  public CorruptFileBlockIterator(DFSClient dfs, Path path) throws IOException {
-    this.dfs = dfs;
-    this.path = path2String(path);
-    loadNext();
-  }
-
-  /**
-   * @return the number of calls made to the DFSClient.
-   * This is for debugging and testing purposes.
-   */
-  public int getCallsMade() {
-    return callsMade;
-  }
-
-  private String path2String(Path path) {
-    return path.toUri().getPath();
-  }
-
-  private Path string2Path(String string) {
-    return new Path(string);
-  }
-
-  private void loadNext() throws IOException {
-    if (files == null || fileIdx >= files.length) {
-      CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie);
-      files = cfb.getFiles();
-      cookie = cfb.getCookie();
-      fileIdx = 0;
-      callsMade++;
-    }
-
-    if (fileIdx >= files.length) {
-      // received an empty response
-      // there are no more corrupt file blocks
-      nextPath = null;
-    } else {
-      nextPath = string2Path(files[fileIdx]);
-      fileIdx++;
-    }
-  }
-
-  
-  @Override
-  public boolean hasNext() {
-    return nextPath != null;
-  }
-
-  
-  @Override
-  public Path next() throws IOException {
-    if (!hasNext()) {
-      throw new NoSuchElementException("No more corrupt file blocks");
-    }
-
-    Path result = nextPath;
-    loadNext();
-
-    return result;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 432e4ef..090d884 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
+import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
new file mode 100644
index 0000000..77bed1a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/impl/CorruptFileBlockIterator.java
@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.client.impl;
+
+import java.io.IOException;
+import java.util.NoSuchElementException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+
+/**
+ * Provides an iterator interface for listCorruptFileBlocks.
+ * This class is used by DistributedFileSystem and Hdfs.
+ */
+@InterfaceAudience.Private
+public class CorruptFileBlockIterator implements RemoteIterator<Path> {
+  private final DFSClient dfs;
+  private final String path;
+
+  private String[] files = null;
+  private int fileIdx = 0;
+  private String cookie = null;
+  private Path nextPath = null;
+
+  private int callsMade = 0;
+
+  public CorruptFileBlockIterator(DFSClient dfs, Path path) throws IOException {
+    this.dfs = dfs;
+    this.path = path2String(path);
+    loadNext();
+  }
+
+  /**
+   * @return the number of calls made to the DFSClient.
+   * This is for debugging and testing purposes.
+   */
+  public int getCallsMade() {
+    return callsMade;
+  }
+
+  private String path2String(Path path) {
+    return path.toUri().getPath();
+  }
+
+  private Path string2Path(String string) {
+    return new Path(string);
+  }
+
+  private void loadNext() throws IOException {
+    if (files == null || fileIdx >= files.length) {
+      CorruptFileBlocks cfb = dfs.listCorruptFileBlocks(path, cookie);
+      files = cfb.getFiles();
+      cookie = cfb.getCookie();
+      fileIdx = 0;
+      callsMade++;
+    }
+
+    if (fileIdx >= files.length) {
+      // received an empty response
+      // there are no more corrupt file blocks
+      nextPath = null;
+    } else {
+      nextPath = string2Path(files[fileIdx]);
+      fileIdx++;
+    }
+  }
+
+  
+  @Override
+  public boolean hasNext() {
+    return nextPath != null;
+  }
+
+  
+  @Override
+  public Path next() throws IOException {
+    if (!hasNext()) {
+      throw new NoSuchElementException("No more corrupt file blocks");
+    }
+
+    Path result = nextPath;
+    loadNext();
+
+    return result;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/79e0de5d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 7118b9e..3afdd0e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -33,13 +33,13 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.BlockMissingException;
-import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;


[15/47] hadoop git commit: YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. Contributed by Bibin A Chundatt.

Posted by zj...@apache.org.
YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called. Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a4a296d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a4a296d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a4a296d

Branch: refs/heads/YARN-2928
Commit: 0a4a296dd6b7dc355d1c0aa1a56b53ba89cb3d17
Parents: fd77880
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Apr 8 15:56:18 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:58 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../yarn/server/nodemanager/NodeManager.java    | 22 +++++++++++---------
 2 files changed, 15 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4a296d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3008d56..0d9c5ce 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -212,6 +212,9 @@ Release 2.8.0 - UNRELEASED
 
     YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via xgong)
 
+    YARN-3457. NPE when NodeManager.serviceInit fails and stopRecoveryStore called.
+    (Bibin A Chundatt via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a4a296d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
index d140d92..c5b8f3f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java
@@ -184,16 +184,18 @@ public class NodeManager extends CompositeService
 
   private void stopRecoveryStore() throws IOException {
     nmStore.stop();
-    if (context.getDecommissioned() && nmStore.canRecover()) {
-      LOG.info("Removing state store due to decommission");
-      Configuration conf = getConfig();
-      Path recoveryRoot = new Path(
-          conf.get(YarnConfiguration.NM_RECOVERY_DIR));
-      LOG.info("Removing state store at " + recoveryRoot
-          + " due to decommission");
-      FileSystem recoveryFs = FileSystem.getLocal(conf);
-      if (!recoveryFs.delete(recoveryRoot, true)) {
-        LOG.warn("Unable to delete " + recoveryRoot);
+    if (null != context) {
+      if (context.getDecommissioned() && nmStore.canRecover()) {
+        LOG.info("Removing state store due to decommission");
+        Configuration conf = getConfig();
+        Path recoveryRoot =
+            new Path(conf.get(YarnConfiguration.NM_RECOVERY_DIR));
+        LOG.info("Removing state store at " + recoveryRoot
+            + " due to decommission");
+        FileSystem recoveryFs = FileSystem.getLocal(conf);
+        if (!recoveryFs.delete(recoveryRoot, true)) {
+          LOG.warn("Unable to delete " + recoveryRoot);
+        }
       }
     }
   }


[34/47] hadoop git commit: HDFS-7979. Initialize block report IDs with a random number.

Posted by zj...@apache.org.
HDFS-7979. Initialize block report IDs with a random number.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1efbe9ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1efbe9ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1efbe9ce

Branch: refs/heads/YARN-2928
Commit: 1efbe9ce32c9ccaaf1f303025e54e8e9209e978b
Parents: 9c4c2dd
Author: Andrew Wang <an...@cloudera.com>
Authored: Wed Apr 8 21:43:42 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:54 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  2 ++
 .../hadoop/hdfs/server/datanode/BPServiceActor.java | 16 +++++++++-------
 .../hdfs/server/protocol/BlockReportContext.java    |  3 +++
 3 files changed, 14 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efbe9ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index dacdb3f..19f264a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -404,6 +404,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. (wheat9)
 
+    HDFS-7979. Initialize block report IDs with a random number. (wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efbe9ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index dd6f9ac..ba22225 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -118,6 +118,7 @@ class BPServiceActor implements Runnable {
   private volatile boolean shouldServiceRun = true;
   private final DataNode dn;
   private final DNConf dnConf;
+  private long prevBlockReportId;
 
   private DatanodeRegistration bpRegistration;
   final LinkedList<BPServiceActorAction> bpThreadQueue 
@@ -128,6 +129,7 @@ class BPServiceActor implements Runnable {
     this.dn = bpos.getDataNode();
     this.nnAddr = nnAddr;
     this.dnConf = dn.getDnConf();
+    prevBlockReportId = DFSUtil.getRandom().nextLong();
   }
 
   boolean isAlive() {
@@ -434,15 +436,15 @@ class BPServiceActor implements Runnable {
     return sendImmediateIBR;
   }
 
-  private long prevBlockReportId = 0;
-
   private long generateUniqueBlockReportId() {
-    long id = System.nanoTime();
-    if (id <= prevBlockReportId) {
-      id = prevBlockReportId + 1;
+    // Initialize the block report ID the first time through.
+    // Note that 0 is used on the NN to indicate "uninitialized", so we should
+    // not send a 0 value ourselves.
+    prevBlockReportId++;
+    while (prevBlockReportId == 0) {
+      prevBlockReportId = DFSUtil.getRandom().nextLong();
     }
-    prevBlockReportId = id;
-    return id;
+    return prevBlockReportId;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1efbe9ce/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
index a084a81..d0b0282 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.hdfs.server.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * The context of the block report.
  *
@@ -27,6 +29,7 @@ package org.apache.hadoop.hdfs.server.protocol;
  * of RPCs which this block report is split into, and the index into that
  * total for the current RPC.
  */
+@InterfaceAudience.Private
 public class BlockReportContext {
   private final int totalRpcs;
   private final int curRpc;


[16/47] hadoop git commit: HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to HdfsClientConfigKeys.Retry.

Posted by zj...@apache.org.
HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to HdfsClientConfigKeys.Retry.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41b7a26b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41b7a26b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41b7a26b

Branch: refs/heads/YARN-2928
Commit: 41b7a26b182951a2862d46a9602f31134f4202eb
Parents: 6529c50
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue Apr 7 19:48:57 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:58 2015 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       |  56 ++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  27 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 325 +++++++++++--------
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |  28 +-
 .../apache/hadoop/hdfs/client/HdfsUtils.java    |   3 +-
 .../hadoop/hdfs/TestBlockMissingException.java  |   3 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |   3 +-
 .../hadoop/hdfs/TestClientReportBadBlock.java   |   3 +-
 .../apache/hadoop/hdfs/TestCrcCorruption.java   |  12 +-
 .../hadoop/hdfs/TestDFSClientRetries.java       |  11 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java    |   5 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java      |   4 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java     |   4 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  |   5 +-
 .../datatransfer/sasl/TestSaslDataTransfer.java |  11 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   9 +-
 .../namenode/TestListCorruptFileBlocks.java     |   5 +-
 .../ha/TestFailoverWithBlockTokensEnabled.java  |   3 +-
 20 files changed, 309 insertions(+), 214 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
new file mode 100644
index 0000000..cf2d50a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+/** Client configuration properties */
+public interface HdfsClientConfigKeys {
+  static final String PREFIX = "dfs.client.";
+
+  /** Client retry configuration properties */
+  public interface Retry {
+    static final String PREFIX = HdfsClientConfigKeys.PREFIX + "retry.";
+
+    public static final String  POLICY_ENABLED_KEY
+        = PREFIX + "policy.enabled";
+    public static final boolean POLICY_ENABLED_DEFAULT
+        = false; 
+    public static final String  POLICY_SPEC_KEY
+        = PREFIX + "policy.spec";
+    public static final String  POLICY_SPEC_DEFAULT
+        = "10000,6,60000,10"; //t1,n1,t2,n2,... 
+
+    public static final String  TIMES_GET_LAST_BLOCK_LENGTH_KEY
+        = PREFIX + "times.get-last-block-length";
+    public static final int     TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT
+        = 3;
+    public static final String  INTERVAL_GET_LAST_BLOCK_LENGTH_KEY
+        = PREFIX + "interval-ms.get-last-block-length";
+    public static final int     INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT
+        = 4000;
+
+    public static final String  MAX_ATTEMPTS_KEY
+        = PREFIX + "max.attempts";
+    public static final int     MAX_ATTEMPTS_DEFAULT
+        = 10;
+
+    public static final String  WINDOW_BASE_KEY
+        = PREFIX + "window.base";
+    public static final int     WINDOW_BASE_DEFAULT
+        = 3000;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 46d0217..f194bd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -382,6 +382,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client
     implementation. (Takuya Fukudome via szetszwo)
 
+    HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
+    HdfsClientConfigKeys.Retry.  (szetszwo)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 29bb604..c85c1ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -22,10 +22,10 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
@@ -46,9 +46,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
@@ -129,8 +126,8 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.XAttr;
@@ -139,6 +136,7 @@ import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -187,7 +185,6 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -350,8 +347,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
       maxRetryAttempts = conf.getInt(
-          DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
-          DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
+          HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
+          HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
       failoverSleepBaseMillis = conf.getInt(
           DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
           DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
@@ -411,7 +408,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
           DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
       prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
           10 * defaultBlockSize);
-      timeWindow = conf.getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
+      timeWindow = conf.getInt(
+          HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY,
+          HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT);
       nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
           DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
       nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
@@ -435,11 +434,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
           DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS,
           DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT);
       retryTimesForGetLastBlockLength = conf.getInt(
-          DFSConfigKeys.DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH,
-          DFSConfigKeys.DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
+          HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY,
+          HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT);
       retryIntervalForGetLastBlockLength = conf.getInt(
-        DFSConfigKeys.DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH,
-        DFSConfigKeys.DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
+          HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY,
+          HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT);
 
       useLegacyBlockReader = conf.getBoolean(
           DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 610932a..6be61f6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -22,20 +22,18 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import org.apache.hadoop.hdfs.web.AuthFilter;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
- * This class contains constants for configuration keys used
- * in hdfs.
- *
+ * This class contains constants for configuration keys and default values
+ * used in hdfs.
  */
-
 @InterfaceAudience.Private
 public class DFSConfigKeys extends CommonConfigurationKeys {
-
   public static final String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";
   public static final long    DFS_BLOCK_SIZE_DEFAULT = 128*1024*1024;
   public static final String  DFS_REPLICATION_KEY = "dfs.replication";
@@ -46,82 +44,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_BYTES_PER_CHECKSUM_DEFAULT = 512;
   public static final String  DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
   public static final String  DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
-  public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.client.retry.policy.enabled";
-  public static final boolean DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false; 
-  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.client.retry.policy.spec";
-  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,... 
   public static final String  DFS_CHECKSUM_TYPE_KEY = "dfs.checksum.type";
   public static final String  DFS_CHECKSUM_TYPE_DEFAULT = "CRC32C";
-  public static final String  DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_KEY = "dfs.client.write.max-packets-in-flight";
-  public static final int     DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_DEFAULT = 80;
-  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
-  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
-  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_KEY
-      = "dfs.client.write.byte-array-manager.enabled";
-  public static final boolean DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_DEFAULT
-      = false;
-  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_KEY
-      = "dfs.client.write.byte-array-manager.count-threshold";
-  public static final int     DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_DEFAULT
-      = 128;
-  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_KEY
-      = "dfs.client.write.byte-array-manager.count-limit";
-  public static final int     DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_DEFAULT
-      = 2048;
-  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_KEY
-      = "dfs.client.write.byte-array-manager.count-reset-time-period-ms";
-  public static final long    DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_DEFAULT
-      = 10L * 1000;
-
-  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY = "dfs.client.block.write.replace-datanode-on-failure.enable";
-  public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT = true;
-  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY = "dfs.client.block.write.replace-datanode-on-failure.policy";
-  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT = "DEFAULT";
-  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY = "dfs.client.block.write.replace-datanode-on-failure.best-effort";
-  public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_DEFAULT = false;
-  public static final String  DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity";
-  public static final int     DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
-  public static final String  DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
-  public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
-  public static final String  DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = "dfs.client.cache.drop.behind.writes";
-  public static final String  DFS_CLIENT_CACHE_DROP_BEHIND_READS = "dfs.client.cache.drop.behind.reads";
-  public static final String  DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
-  public static final String  DFS_CLIENT_CONTEXT = "dfs.client.context";
-  public static final String  DFS_CLIENT_CONTEXT_DEFAULT = "default";
   public static final String  DFS_HDFS_BLOCKS_METADATA_ENABLED = "dfs.datanode.hdfs-blocks-metadata.enabled";
   public static final boolean DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT = false;
-  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads";
-  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
-  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = "dfs.client.file-block-storage-locations.timeout.millis";
-  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
-  public static final String  DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.times.get-last-block-length";
-  public static final int     DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT = 3;
-  public static final String  DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH = "dfs.client.retry.interval-ms.get-last-block-length";
-  public static final int     DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT = 4000;
   public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
       "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$";
 
   // HA related configuration
-  public static final String  DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
-  public static final String  DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.client.failover.max.attempts";
-  public static final int     DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
-  public static final String  DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.client.failover.sleep.base.millis";
-  public static final int     DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
-  public static final String  DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.client.failover.sleep.max.millis";
-  public static final int     DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000;
-  public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY = "dfs.client.failover.connection.retries";
-  public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0;
-  public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
-  public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
-  public static final String  DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.client.retry.max.attempts";
-  public static final int     DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
-  
-  public static final String  DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
-  public static final long    DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
-  public static final String  DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis";
-  public static final long    DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms
-  public static final String  DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout";
-  public static final long    DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
   public static final String  DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
   public static final long    DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT = 50;
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
@@ -207,12 +137,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
   public static final String  DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY = "dfs.namenode.tolerate.heartbeat.multiplier";
   public static final int     DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT = 4;
-  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
-  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
-  public static final String  DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
-  public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
-  public static final String  DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
-  public static final int     DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
   public static final String  DFS_NAMENODE_ACCESSTIME_PRECISION_KEY = "dfs.namenode.accesstime.precision";
   public static final long    DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT = 3600000;
   public static final String  DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY = "dfs.namenode.replication.considerLoad";
@@ -376,19 +300,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
   public static final String  DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
   public static final String  DFS_NAMENODE_EDITS_DIR_DEFAULT = "file:///tmp/hadoop/dfs/name";
-  public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
-  public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
   public static final String  DFS_METRICS_PERCENTILES_INTERVALS_KEY = "dfs.metrics.percentiles.intervals";
   public static final String  DFS_DATANODE_HOST_NAME_KEY = "dfs.datanode.hostname";
   public static final String  DFS_NAMENODE_HOSTS_KEY = "dfs.namenode.hosts";
   public static final String  DFS_NAMENODE_HOSTS_EXCLUDE_KEY = "dfs.namenode.hosts.exclude";
-  public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
   public static final String  DFS_NAMENODE_CHECKPOINT_DIR_KEY = "dfs.namenode.checkpoint.dir";
   public static final String  DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY = "dfs.namenode.checkpoint.edits.dir";
   public static final String  DFS_HOSTS = "dfs.hosts";
   public static final String  DFS_HOSTS_EXCLUDE = "dfs.hosts.exclude";
-  public static final String  DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
   public static final String  DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers";
   public static final String  DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default";
   public static final String  DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY = "dfs.namenode.audit.log.token.tracking.id";
@@ -396,20 +316,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
   public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;
 
-  // Much code in hdfs is not yet updated to use these keys.
-  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
-  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
-  // the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
-  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY = "dfs.client.block.write.locateFollowingBlock.initial.delay.ms";
-  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT = 400;
-  public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
-  public static final int     DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
-  public static final String  DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
-  public static final int     DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
-  public static final String  DFS_CLIENT_USE_LEGACY_BLOCKREADER = "dfs.client.use.legacy.blockreader";
-  public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
-  public static final String  DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local";
-  public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
   public static final long    DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
   public static final String  DFS_BALANCER_MOVERTHREADS_KEY = "dfs.balancer.moverThreads";
@@ -508,28 +414,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   /* Maximum number of blocks to process for initializing replication queues */
   public static final String  DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT = "dfs.block.misreplication.processing.limit";
   public static final int     DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT = 10000;
-  public static final String DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit";
-  public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false;
-  public static final String DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum";
-  public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false;
-  public static final String DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY = "dfs.client.read.shortcircuit.buffer.size";
-  public static final int DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT = 1024 * 1024;
-  public static final String DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY = "dfs.client.read.shortcircuit.streams.cache.size";
-  public static final int DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT = 256;
-  public static final String DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY = "dfs.client.read.shortcircuit.streams.cache.expiry.ms";
-  public static final long DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT = 5 * 60 * 1000;
-  public static final String DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic";
-  public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
-  public static final String DFS_CLIENT_MMAP_ENABLED= "dfs.client.mmap.enabled";
-  public static final boolean DFS_CLIENT_MMAP_ENABLED_DEFAULT = true;
-  public static final String DFS_CLIENT_MMAP_CACHE_SIZE = "dfs.client.mmap.cache.size";
-  public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 256;
-  public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms";
-  public static final long DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT  = 60 * 60 * 1000;
-  public static final String DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS = "dfs.client.mmap.retry.timeout.ms";
-  public static final long DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS_DEFAULT = 5 * 60 * 1000;
-  public static final String DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS = "dfs.client.short.circuit.replica.stale.threshold.ms";
-  public static final long DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS_DEFAULT = 30 * 60 * 1000;
 
   // property for fsimage compression
   public static final String DFS_IMAGE_COMPRESS_KEY = "dfs.image.compress";
@@ -699,12 +583,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY = "dfs.namenode.retrycache.heap.percent";
   public static final float DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT = 0.03f;
   
-  // The number of NN response dropped by client proactively in each RPC call.
-  // For testing NN retry cache, we can set this property with positive value.
-  public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
-  public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
-
-
   // Hidden configuration undocumented in hdfs-site. xml
   // Timeout to wait for block receiver and responder thread to stop
   public static final String DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_KEY = "dfs.datanode.xceiver.stop.timeout.millis";
@@ -730,20 +608,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT =
       false;
   
-  // hedged read properties
-  public static final String DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS =
-      "dfs.client.hedged.read.threshold.millis";
-  public static final long DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS =
-      500;
-
-  public static final String DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
-      "dfs.client.hedged.read.threadpool.size";
-  public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
-
   // Slow io warning log threshold settings for dfsclient and datanode.
-  public static final String DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
-    "dfs.client.slow.io.warning.threshold.ms";
-  public static final long DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
   public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
     "dfs.datanode.slow.io.warning.threshold.ms";
   public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
@@ -779,13 +644,187 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_PIPELINE_ECN_ENABLED_DEFAULT = false;
 
   // Key Provider Cache Expiry
-  public static final String DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
-      "dfs.client.key.provider.cache.expiry";
-  // 10 days
-  public static final long DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
-      TimeUnit.DAYS.toMillis(10);
   public static final String DFS_DATANODE_BLOCK_PINNING_ENABLED = 
     "dfs.datanode.block-pinning.enabled";
   public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
     false;
+
+
+  
+  
+  // client retry confs are moved to HdfsConfigKeys.Client.Retry 
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
+      = HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY;
+  @Deprecated
+  public static final boolean DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT
+      = HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT; 
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_KEY
+      = HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY;
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT
+      = HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT;
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH
+      = HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_KEY;
+  @Deprecated
+  public static final int     DFS_CLIENT_RETRY_TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT
+      = HdfsClientConfigKeys.Retry.TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT;
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH
+      = HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_KEY;
+  @Deprecated
+  public static final int     DFS_CLIENT_RETRY_INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT
+      = HdfsClientConfigKeys.Retry.INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT;
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY
+      = HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY;
+  @Deprecated
+  public static final int     DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT
+      = HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT;
+  @Deprecated
+  public static final String  DFS_CLIENT_RETRY_WINDOW_BASE
+      = HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY;
+  @Deprecated
+  public static final int     DFS_CLIENT_RETRY_WINDOW_BASE_DEFAULT
+      = HdfsClientConfigKeys.Retry.WINDOW_BASE_DEFAULT;
+
+
+  
+  
+  
+  public static final String  DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_KEY = "dfs.client.write.max-packets-in-flight";
+  public static final int     DFS_CLIENT_WRITE_MAX_PACKETS_IN_FLIGHT_DEFAULT = 80;
+  public static final String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
+  public static final int     DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT = 64*1024;
+  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_KEY
+      = "dfs.client.write.byte-array-manager.enabled";
+  public static final boolean DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_ENABLED_DEFAULT
+      = false;
+  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_KEY
+      = "dfs.client.write.byte-array-manager.count-threshold";
+  public static final int     DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_THRESHOLD_DEFAULT
+      = 128;
+  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_KEY
+      = "dfs.client.write.byte-array-manager.count-limit";
+  public static final int     DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_LIMIT_DEFAULT
+      = 2048;
+  public static final String  DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_KEY
+      = "dfs.client.write.byte-array-manager.count-reset-time-period-ms";
+  public static final long    DFS_CLIENT_WRITE_BYTE_ARRAY_MANAGER_COUNT_RESET_TIME_PERIOD_MS_DEFAULT
+      = 10L * 1000;
+  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY = "dfs.client.block.write.replace-datanode-on-failure.enable";
+  public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT = true;
+  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY = "dfs.client.block.write.replace-datanode-on-failure.policy";
+  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT = "DEFAULT";
+  public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY = "dfs.client.block.write.replace-datanode-on-failure.best-effort";
+  public static final boolean DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_DEFAULT = false;
+  public static final String  DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis";
+  public static final long    DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms
+
+  public static final String  DFS_CLIENT_SOCKET_TIMEOUT_KEY = "dfs.client.socket-timeout";
+  public static final String  DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity";
+  public static final int     DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
+  public static final String  DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
+  public static final long    DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
+
+  public static final String  DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
+  public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
+  public static final String  DFS_CLIENT_CACHE_DROP_BEHIND_WRITES = "dfs.client.cache.drop.behind.writes";
+  public static final String  DFS_CLIENT_CACHE_DROP_BEHIND_READS = "dfs.client.cache.drop.behind.reads";
+  public static final String  DFS_CLIENT_CACHE_READAHEAD = "dfs.client.cache.readahead";
+  public static final String  DFS_CLIENT_CACHED_CONN_RETRY_KEY = "dfs.client.cached.conn.retry";
+  public static final int     DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT = 3;
+
+  public static final String  DFS_CLIENT_CONTEXT = "dfs.client.context";
+  public static final String  DFS_CLIENT_CONTEXT_DEFAULT = "default";
+  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS = "dfs.client.file-block-storage-locations.num-threads";
+  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT = 10;
+  public static final String  DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS = "dfs.client.file-block-storage-locations.timeout.millis";
+  public static final int     DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_MS_DEFAULT = 1000;
+
+  public static final String  DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
+  public static final String  DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.client.failover.max.attempts";
+  public static final int     DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
+  public static final String  DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.client.failover.sleep.base.millis";
+  public static final int     DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
+  public static final String  DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.client.failover.sleep.max.millis";
+  public static final int     DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000;
+  public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_KEY = "dfs.client.failover.connection.retries";
+  public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0;
+  public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
+  public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
+  
+  public static final String  DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY = "dfs.client.datanode-restart.timeout";
+  public static final long    DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT = 30;
+
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
+  public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_DEFAULT = "ssl-client.xml";
+  public static final String  DFS_CLIENT_HTTPS_NEED_AUTH_KEY = "dfs.client.https.need-auth";
+  public static final boolean DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT = false;
+  // Much code in hdfs is not yet updated to use these keys.
+  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT = 5;
+  // the initial delay (unit is ms) for locateFollowingBlock, the delay time will increase exponentially(double) for each retry.
+  public static final String  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY = "dfs.client.block.write.locateFollowingBlock.initial.delay.ms";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_DEFAULT = 400;
+  public static final String  DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY = "dfs.client.block.write.retries";
+  public static final int     DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT = 3;
+  public static final String  DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY = "dfs.client.max.block.acquire.failures";
+  public static final int     DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT = 3;
+
+  public static final String  DFS_CLIENT_USE_LEGACY_BLOCKREADER = "dfs.client.use.legacy.blockreader";
+  public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT = false;
+  public static final String  DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL = "dfs.client.use.legacy.blockreader.local";
+  public static final boolean DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT = false;
+
+  public static final String  DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
+
+  public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
+  public static final String  DFS_CLIENT_READ_SHORTCIRCUIT_KEY = "dfs.client.read.shortcircuit";
+  public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT = false;
+  public static final String  DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY = "dfs.client.read.shortcircuit.skip.checksum";
+  public static final boolean DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT = false;
+  public static final String  DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY = "dfs.client.read.shortcircuit.buffer.size";
+  public static final int     DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT = 1024 * 1024;
+  public static final String  DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY = "dfs.client.read.shortcircuit.streams.cache.size";
+  public static final int     DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT = 256;
+  public static final String  DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY = "dfs.client.read.shortcircuit.streams.cache.expiry.ms";
+  public static final long    DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT = 5 * 60 * 1000;
+
+  public static final String  DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC = "dfs.client.domain.socket.data.traffic";
+  public static final boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
+  public static final String  DFS_CLIENT_MMAP_ENABLED= "dfs.client.mmap.enabled";
+  public static final boolean DFS_CLIENT_MMAP_ENABLED_DEFAULT = true;
+  public static final String  DFS_CLIENT_MMAP_CACHE_SIZE = "dfs.client.mmap.cache.size";
+  public static final int     DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 256;
+  public static final String  DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms";
+  public static final long    DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT  = 60 * 60 * 1000;
+  public static final String  DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS = "dfs.client.mmap.retry.timeout.ms";
+  public static final long    DFS_CLIENT_MMAP_RETRY_TIMEOUT_MS_DEFAULT = 5 * 60 * 1000;
+  public static final String  DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS = "dfs.client.short.circuit.replica.stale.threshold.ms";
+  public static final long    DFS_CLIENT_SHORT_CIRCUIT_REPLICA_STALE_THRESHOLD_MS_DEFAULT = 30 * 60 * 1000;
+
+  // The number of NN response dropped by client proactively in each RPC call.
+  // For testing NN retry cache, we can set this property with positive value.
+  public static final String  DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number";
+  public static final int     DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
+  public static final String  DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_KEY =
+      "dfs.client.slow.io.warning.threshold.ms";
+  public static final long    DFS_CLIENT_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 30000;
+  public static final String  DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS =
+      "dfs.client.key.provider.cache.expiry";
+  public static final long    DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT =
+      TimeUnit.DAYS.toMillis(10); // 10 days
+
+  // hedged read properties
+  public static final String  DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS =
+      "dfs.client.hedged.read.threshold.millis";
+  public static final long    DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS =
+      500;
+
+  public static final String  DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE =
+      "dfs.client.hedged.read.threadpool.size";
+  public static final int     DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE = 0;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
index 98c8b41..ec2223f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
@@ -24,8 +24,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -43,6 +41,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSClient.Conf;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -52,10 +51,10 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
-import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
-import org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -69,7 +68,9 @@ import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.io.retry.RetryUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.SecurityUtil;
@@ -79,9 +80,6 @@ import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolC
 import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB;
 import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB;
-import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
-import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
-import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolClientSideTranslatorPB;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB;
@@ -145,7 +143,6 @@ public class NameNodeProxies {
    *         delegation token service it corresponds to
    * @throws IOException if there is an error creating the proxy
    **/
-  @SuppressWarnings("unchecked")
   public static <T> ProxyAndInfo<T> createProxy(Configuration conf,
       URI nameNodeUri, Class<T> xface) throws IOException {
     return createProxy(conf, nameNodeUri, xface, null);
@@ -242,8 +239,8 @@ public class NameNodeProxies {
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
       int maxRetryAttempts = config.getInt(
-          DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
-          DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
+          HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_KEY,
+          HdfsClientConfigKeys.Retry.MAX_ATTEMPTS_DEFAULT);
       InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
               numResponseToDrop, failoverProxyProvider,
               RetryPolicies.failoverOnNetworkException(
@@ -284,7 +281,6 @@ public class NameNodeProxies {
    *         delegation token service it corresponds to
    * @throws IOException
    */
-  @SuppressWarnings("unchecked")
   public static <T> ProxyAndInfo<T> createNonHAProxy(
       Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
       UserGroupInformation ugi, boolean withRetries) throws IOException {
@@ -412,10 +408,10 @@ public class NameNodeProxies {
     final RetryPolicy defaultPolicy = 
         RetryUtils.getDefaultRetryPolicy(
             conf, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
-            DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
+            HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, 
+            HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, 
+            HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
+            HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
             SafeModeException.class);
     
     final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
index 9133cf3..f87de97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsUtils.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
@@ -60,7 +59,7 @@ public class HdfsUtils {
     //disable FileSystem cache
     conf.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
     //disable client retry for rpc connection and rpc calls
-    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, false);
+    conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, false);
     conf.setInt(
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
index 7510cfb..a3104a0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.junit.Test;
@@ -52,7 +53,7 @@ public class TestBlockMissingException {
     int numBlocks = 4;
     conf = new HdfsConfiguration();
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     try {
       dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
       dfs.waitActive();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
index 1c4134f..6a8f9db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocalLegacy.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -70,7 +71,7 @@ public class TestBlockReaderLocalLegacy {
         UserGroupInformation.getCurrentUser().getShortUserName());
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, false);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     return conf;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
index 3bc986f..0c9660e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -74,7 +75,7 @@ public class TestClientReportBadBlock {
     // disable block scanner
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); 
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
         .build();
     cluster.waitActive();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index f0c4c42..2b11fbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -30,19 +30,17 @@ import java.nio.channels.FileChannel;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClientFaultInjector;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.io.IOUtils;
 import org.junit.Before;
 import org.junit.Test;
-
 import org.mockito.Mockito;
-import org.mockito.stubbing.Answer;
 
 /**
  * A JUnit test for corrupted file handling.
@@ -90,7 +88,7 @@ public class TestCrcCorruption {
   public void testCorruptionDuringWrt() throws Exception {
     Configuration conf = new HdfsConfiguration();
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     MiniDFSCluster cluster = null;
 
     try {
@@ -156,7 +154,7 @@ public class TestCrcCorruption {
     short replFactor = 2;
     Random random = new Random();
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
@@ -339,7 +337,7 @@ public class TestCrcCorruption {
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
 
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index c4258eb..abbcd4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -24,7 +24,6 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Matchers.anyList;
 import static org.mockito.Matchers.anyLong;
 import static org.mockito.Matchers.anyObject;
 import static org.mockito.Matchers.anyShort;
@@ -52,7 +51,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CreateFlag;
@@ -63,6 +61,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsUtils;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
@@ -93,8 +92,8 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Assert;
-import org.junit.Test;
 import org.junit.Before;
+import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
@@ -291,7 +290,7 @@ public class TestDFSClientRetries {
     Path file = new Path("/testFile");
 
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     conf.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, 2 * 1000);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 
@@ -592,7 +591,7 @@ public class TestDFSClientRetries {
       xcievers);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
       retries);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, timeWin);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, timeWin);
     // Disable keepalive
     conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 0);
 
@@ -878,7 +877,7 @@ public class TestDFSClientRetries {
     if (isWebHDFS) {
       conf.setBoolean(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
     } else {
-      conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+      conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
     }
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
     conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
index 0a88208..c1bf771 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -1553,7 +1554,7 @@ public class TestDFSShell {
     final Path remotef = new Path(root, fname);
     final Configuration conf = new HdfsConfiguration();
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     TestGetRunner runner = new TestGetRunner() {
     	private int count = 0;
     	private final FsShell shell = new FsShell(conf);
@@ -2584,8 +2585,6 @@ public class TestDFSShell {
   /* HDFS-6413 xattr names erroneously handled as case-insensitive */
   @Test (timeout = 30000)
   public void testSetXAttrCaseSensitivity() throws Exception {
-    UserGroupInformation user = UserGroupInformation.
-        createUserForTesting("user", new String[] {"mygroup"});
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
index 30484d1..bf19c40 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptedTransfer.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -36,6 +35,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
@@ -305,7 +305,7 @@ public class TestEncryptedTransfer {
     try {
       Configuration conf = new Configuration();
       // Set short retry timeouts so this test runs faster
-      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+      conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       
       FileSystem fs = getFileSystem(conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
index 78ac19c..578b2cf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
@@ -23,12 +23,14 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.junit.Assert;
 import org.junit.Test;
 
 import javax.management.*;
+
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 
@@ -58,7 +60,7 @@ public class TestMissingBlocksAlert {
       Configuration conf = new HdfsConfiguration();
       //minimize test delay
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
-      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+      conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
       int fileLen = 10*1024;
       conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
index 00a3ebc..62f6c06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.io.IOUtils;
@@ -284,7 +285,7 @@ public class TestPread {
         numHedgedReadPoolThreads);
     conf.setLong(DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
         hedgedReadTimeoutMillis);
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0);
     // Set up the InjectionHandler
     DFSClientFaultInjector.instance = Mockito
         .mock(DFSClientFaultInjector.class);
@@ -435,7 +436,7 @@ public class TestPread {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 0);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 0);
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
index 52e5384..16ddc75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java
@@ -20,8 +20,9 @@ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY;
-
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 
 import java.io.IOException;
 
@@ -30,10 +31,10 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.io.IOUtils;
@@ -49,10 +50,8 @@ import org.junit.rules.Timeout;
 public class TestSaslDataTransfer extends SaslDataTransferTestCase {
 
   private static final int BLOCK_SIZE = 4096;
-  private static final int BUFFER_SIZE= 1024;
   private static final int NUM_BLOCKS = 3;
   private static final Path PATH  = new Path("/file1");
-  private static final short REPLICATION = 3;
 
   private MiniDFSCluster cluster;
   private FileSystem fs;
@@ -117,7 +116,7 @@ public class TestSaslDataTransfer extends SaslDataTransferTestCase {
     HdfsConfiguration clusterConf = createSecureConfig(
       "authentication,integrity,privacy");
     // Set short retry timeouts so this test runs faster
-    clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    clusterConf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     startCluster(clusterConf);
     HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
     clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index b15cb38..84e5c82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.RemotePeerFactory;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -212,7 +213,7 @@ public class TestBlockTokenWithDFS {
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, numDataNodes);
     conf.setInt("ipc.client.connect.max.retries", 0);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     return conf;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 70deb1b..f6bab7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -630,7 +631,7 @@ public class TestFsck {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
@@ -705,7 +706,7 @@ public class TestFsck {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     // Set minReplication to 2
     short minReplication=2;
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,minReplication);
@@ -1345,8 +1346,6 @@ public class TestFsck {
     short NUM_DN = 1;
     final long blockSize = 512;
     Random random = new Random();
-    DFSClient dfsClient;
-    LocatedBlocks blocks;
     ExtendedBlock block;
     short repFactor = 1;
     String [] racks = {"/rack1"};
@@ -1355,7 +1354,7 @@ public class TestFsck {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 5d319b4..7118b9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -69,7 +70,7 @@ public class TestListCorruptFileBlocks {
       conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans directories
       conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
       // Set short retry timeouts so this test runs faster
-      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+      conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
       cluster = new MiniDFSCluster.Builder(conf).build();
       FileSystem fs = cluster.getFileSystem();
 
@@ -148,7 +149,7 @@ public class TestListCorruptFileBlocks {
       conf.setFloat(DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,
                     0f);
       // Set short retry timeouts so this test runs faster
-      conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+      conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
       cluster = new MiniDFSCluster.Builder(conf).waitSafeMode(false).build();
       cluster.getNameNodeRpc().setSafeMode(
           HdfsConstants.SafeModeAction.SAFEMODE_LEAVE, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41b7a26b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
index 01995bb..151e7d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailoverWithBlockTokensEnabled.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -64,7 +65,7 @@ public class TestFailoverWithBlockTokensEnabled {
     conf = new Configuration();
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
     // Set short retry timeouts so this test runs faster
-    conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
+    conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleHATopology())
         .numDataNodes(1)


[47/47] hadoop git commit: HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. Contributed by Haohui Mai.

Posted by zj...@apache.org.
HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1581b37a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1581b37a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1581b37a

Branch: refs/heads/YARN-2928
Commit: 1581b37a524625c30b5192cb313dbe69e08423ef
Parents: 33b8fab
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Apr 9 14:36:27 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:57 2015 -0700

----------------------------------------------------------------------
 .../hdfs/client/HdfsClientConfigKeys.java       | 17 +++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 50 +++++++++++++++-----
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      | 25 +++++-----
 .../hadoop/hdfs/TestDFSClientRetries.java       |  2 +-
 5 files changed, 71 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1581b37a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 604d60e..7316e3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -60,4 +60,21 @@ public interface HdfsClientConfigKeys {
     public static final int     WINDOW_BASE_DEFAULT
         = 3000;
   }
+
+  // WebHDFS retry configuration policy
+  interface WebHdfsRetry {
+    String  PREFIX = HdfsClientConfigKeys.PREFIX + "http.client.";
+    String  RETRY_POLICY_ENABLED_KEY = PREFIX + "dfs.http.client.retry.policy.enabled";
+    boolean RETRY_POLICY_ENABLED_DEFAULT = false;
+    String  RETRY_POLICY_SPEC_KEY = PREFIX + "dfs.http.client.retry.policy.spec";
+    String  RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
+    String  FAILOVER_MAX_ATTEMPTS_KEY = PREFIX + "dfs.http.client.failover.max.attempts";
+    int     FAILOVER_MAX_ATTEMPTS_DEFAULT =  15;
+    String  RETRY_MAX_ATTEMPTS_KEY = PREFIX + "dfs.http.client.retry.max.attempts";
+    int     RETRY_MAX_ATTEMPTS_DEFAULT = 10;
+    String  FAILOVER_SLEEPTIME_BASE_KEY = PREFIX + "dfs.http.client.failover.sleep.base.millis";
+    int     FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
+    String  FAILOVER_SLEEPTIME_MAX_KEY = PREFIX + "dfs.http.client.failover.sleep.max.millis";
+    int     FAILOVER_SLEEPTIME_MAX_DEFAULT =  15000;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1581b37a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 695dc36..e091a65 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -412,6 +412,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8099. Change "DFSInputStream has been closed already" message to
     debug log level (Charles Lamb via Colin P. McCabe)
 
+    HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
+    (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1581b37a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d0ca125..ce08075 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -601,19 +601,43 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long   DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT = 60000;
 
   // WebHDFS retry policy
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY = "dfs.http.client.retry.policy.enabled";
-  public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT = false;
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY = "dfs.http.client.retry.policy.spec";
-  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
-  public static final String  DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
-  public static final int     DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
-  public static final String  DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts";
-  public static final int     DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
-  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
-  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
-  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";
-  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT = 15000;
-  
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY;
+  @Deprecated
+  public static final boolean DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_DEFAULT;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_KEY;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_DEFAULT;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_KEY;
+  @Deprecated
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT =
+      HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_DEFAULT;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_KEY;
+  @Deprecated
+  public static final int     DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT =
+      HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_DEFAULT;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_KEY;
+  @Deprecated
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT =
+      HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_DEFAULT;
+  @Deprecated
+  public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY =
+      HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_KEY;
+  @Deprecated
+  public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT
+      = HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_DEFAULT;
+
   // Handling unresolved DN topology mapping
   public static final String  DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY = 
       "dfs.namenode.reject-unresolved-dn-topology-mapping";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1581b37a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 383f2e4..044403e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -59,6 +59,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
@@ -172,25 +173,25 @@ public class WebHdfsFileSystem extends FileSystem
       this.retryPolicy =
           RetryUtils.getDefaultRetryPolicy(
               conf,
-              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY,
-              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT,
-              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY,
-              DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
+              HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY,
+              HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_DEFAULT,
+              HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_KEY,
+              HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_SPEC_DEFAULT,
               SafeModeException.class);
     } else {
 
       int maxFailoverAttempts = conf.getInt(
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_KEY,
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_MAX_ATTEMPTS_DEFAULT);
       int maxRetryAttempts = conf.getInt(
-          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
-          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
+          HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_KEY,
+          HdfsClientConfigKeys.WebHdfsRetry.RETRY_MAX_ATTEMPTS_DEFAULT);
       int failoverSleepBaseMillis = conf.getInt(
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_KEY,
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_BASE_DEFAULT);
       int failoverSleepMaxMillis = conf.getInt(
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
-          DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_KEY,
+          HdfsClientConfigKeys.WebHdfsRetry.FAILOVER_SLEEPTIME_MAX_DEFAULT);
 
       this.retryPolicy = RetryPolicies
           .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1581b37a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index abbcd4d..5d95a8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -875,7 +875,7 @@ public class TestDFSClientRetries {
     final Path dir = new Path("/testNamenodeRestart");
 
     if (isWebHDFS) {
-      conf.setBoolean(DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+      conf.setBoolean(HdfsClientConfigKeys.WebHdfsRetry.RETRY_POLICY_ENABLED_KEY, true);
     } else {
       conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true);
     }


[13/47] hadoop git commit: HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a package local class. Contributed by Haohui Mai.

Posted by zj...@apache.org.
HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a package local class. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd778809
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd778809
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd778809

Branch: refs/heads/YARN-2928
Commit: fd7788098039a05c40ed729a986836a9c481b205
Parents: 4937336
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Apr 7 21:23:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    | 444 +----------------
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 485 +++++++++++++++++++
 .../hadoop/hdfs/web/WebHdfsConstants.java       |  30 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  28 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  92 ----
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    |  14 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java       |  84 ++++
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      |   3 +-
 9 files changed, 625 insertions(+), 558 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac508cb..84e382a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -388,6 +388,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
     can be easily overrided. (Walter Su via vinayakumarb)
 
+    HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
+    package local class. (wheat9)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index d53bc31..252b0f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,34 +21,22 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.*;
 
 /** JSON Utilities */
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
-  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
 
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token<? extends TokenIdentifier> token
@@ -67,34 +55,6 @@ public class JsonUtil {
     return m;
   }
 
-  /** Convert a Json map to a Token. */
-  public static Token<? extends TokenIdentifier> toToken(
-      final Map<?, ?> m) throws IOException {
-    if (m == null) {
-      return null;
-    }
-
-    final Token<DelegationTokenIdentifier> token
-        = new Token<DelegationTokenIdentifier>();
-    token.decodeFromUrlString((String)m.get("urlString"));
-    return token;
-  }
-
-  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
-  @SuppressWarnings("unchecked")
-  public static Token<DelegationTokenIdentifier> toDelegationToken(
-      final Map<?, ?> json) throws IOException {
-    final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName());
-    return (Token<DelegationTokenIdentifier>)toToken(m);
-  }
-
-  /** Convert a Json map to a Token of BlockTokenIdentifier. */
-  @SuppressWarnings("unchecked")
-  private static Token<BlockTokenIdentifier> toBlockToken(
-      final Map<?, ?> m) throws IOException {
-    return (Token<BlockTokenIdentifier>)toToken(m);
-  }
-
   /** Convert an exception object to a Json string. */
   public static String toJsonString(final Exception e) {
     final Map<String, Object> m = new TreeMap<String, Object>();
@@ -104,14 +64,6 @@ public class JsonUtil {
     return toJsonString(RemoteException.class, m);
   }
 
-  /** Convert a Json map to a RemoteException. */
-  public static RemoteException toRemoteException(final Map<?, ?> json) {
-    final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName());
-    final String message = (String)m.get("message");
-    final String javaClassName = (String)m.get("javaClassName");
-    return new RemoteException(javaClassName, message);
-  }
-
   private static String toJsonString(final Class<?> clazz, final Object value) {
     return toJsonString(clazz.getSimpleName(), value);
   }
@@ -133,27 +85,6 @@ public class JsonUtil {
     return String.format("%o", permission.toShort());
   }
 
-  /** Convert a string to a FsPermission object. */
-  private static FsPermission toFsPermission(final String s, Boolean aclBit,
-      Boolean encBit) {
-    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
-    final boolean aBit = (aclBit != null) ? aclBit : false;
-    final boolean eBit = (encBit != null) ? encBit : false;
-    if (aBit || eBit) {
-      return new FsPermissionExtension(perm, aBit, eBit);
-    } else {
-      return perm;
-    }
-  }
-
-  static enum PathType {
-    FILE, DIRECTORY, SYMLINK;
-    
-    static PathType valueOf(HdfsFileStatus status) {
-      return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE;
-    }
-  }
-
   /** Convert a HdfsFileStatus object to a Json string. */
   public static String toJsonString(final HdfsFileStatus status,
       boolean includeType) {
@@ -162,7 +93,7 @@ public class JsonUtil {
     }
     final Map<String, Object> m = new TreeMap<String, Object>();
     m.put("pathSuffix", status.getLocalName());
-    m.put("type", PathType.valueOf(status));
+    m.put("type", WebHdfsConstants.PathType.valueOf(status));
     if (status.isSymlink()) {
       m.put("symlink", status.getSymlink());
     }
@@ -194,42 +125,6 @@ public class JsonUtil {
     return null;
   }
 
-  /** Convert a Json map to a HdfsFileStatus object. */
-  public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = includesType ? 
-        (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
-    final String localName = (String) m.get("pathSuffix");
-    final PathType type = PathType.valueOf((String) m.get("type"));
-    final byte[] symlink = type != PathType.SYMLINK? null
-        : DFSUtil.string2Bytes((String)m.get("symlink"));
-
-    final long len = ((Number) m.get("length")).longValue();
-    final String owner = (String) m.get("owner");
-    final String group = (String) m.get("group");
-    final FsPermission permission = toFsPermission((String) m.get("permission"),
-      (Boolean)m.get("aclBit"), (Boolean)m.get("encBit"));
-    final long aTime = ((Number) m.get("accessTime")).longValue();
-    final long mTime = ((Number) m.get("modificationTime")).longValue();
-    final long blockSize = ((Number) m.get("blockSize")).longValue();
-    final boolean isLazyPersist = m.containsKey("lazyPersist")
-        ? (Boolean) m.get("lazyPersist") : false;
-    final short replication = ((Number) m.get("replication")).shortValue();
-    final long fileId = m.containsKey("fileId") ?
-        ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
-    final int childrenNum = getInt(m, "childrenNum", -1);
-    final byte storagePolicy = m.containsKey("storagePolicy") ?
-        (byte) ((Number) m.get("storagePolicy")).longValue() :
-        BlockStoragePolicySuite.ID_UNSPECIFIED;
-    return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
-        blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
-        storagePolicy);
-  }
-
   /** Convert an ExtendedBlock to a Json map. */
   private static Map<String, Object> toJsonMap(final ExtendedBlock extendedblock) {
     if (extendedblock == null) {
@@ -244,20 +139,6 @@ public class JsonUtil {
     return m;
   }
 
-  /** Convert a Json map to an ExtendedBlock object. */
-  private static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
-    if (m == null) {
-      return null;
-    }
-    
-    final String blockPoolId = (String)m.get("blockPoolId");
-    final long blockId = ((Number) m.get("blockId")).longValue();
-    final long numBytes = ((Number) m.get("numBytes")).longValue();
-    final long generationStamp =
-        ((Number) m.get("generationStamp")).longValue();
-    return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
-  }
-  
   /** Convert a DatanodeInfo to a Json map. */
   static Map<String, Object> toJsonMap(final DatanodeInfo datanodeinfo) {
     if (datanodeinfo == null) {
@@ -291,101 +172,6 @@ public class JsonUtil {
     return m;
   }
 
-  private static int getInt(Map<?, ?> m, String key, final int defaultValue) {
-    Object value = m.get(key);
-    if (value == null) {
-      return defaultValue;
-    }
-    return ((Number) value).intValue();
-  }
-
-  private static long getLong(Map<?, ?> m, String key, final long defaultValue) {
-    Object value = m.get(key);
-    if (value == null) {
-      return defaultValue;
-    }
-    return ((Number) value).longValue();
-  }
-
-  private static String getString(Map<?, ?> m, String key,
-      final String defaultValue) {
-    Object value = m.get(key);
-    if (value == null) {
-      return defaultValue;
-    }
-    return (String) value;
-  }
-
-  static List<?> getList(Map<?, ?> m, String key) {
-    Object list = m.get(key);
-    if (list instanceof List<?>) {
-      return (List<?>) list;
-    } else {
-      return null;
-    }
-  }
-
-  /** Convert a Json map to an DatanodeInfo object. */
-  static DatanodeInfo toDatanodeInfo(final Map<?, ?> m) 
-    throws IOException {
-    if (m == null) {
-      return null;
-    }
-
-    // ipAddr and xferPort are the critical fields for accessing data.
-    // If any one of the two is missing, an exception needs to be thrown.
-
-    // Handle the case of old servers (1.x, 0.23.x) sending 'name' instead
-    //  of ipAddr and xferPort.
-    String ipAddr = getString(m, "ipAddr", null);
-    int xferPort = getInt(m, "xferPort", -1);
-    if (ipAddr == null) {
-      String name = getString(m, "name", null);
-      if (name != null) {
-        int colonIdx = name.indexOf(':');
-        if (colonIdx > 0) {
-          ipAddr = name.substring(0, colonIdx);
-          xferPort = Integer.parseInt(name.substring(colonIdx +1));
-        } else {
-          throw new IOException(
-              "Invalid value in server response: name=[" + name + "]");
-        }
-      } else {
-        throw new IOException(
-            "Missing both 'ipAddr' and 'name' in server response.");
-      }
-      // ipAddr is non-null & non-empty string at this point.
-    }
-
-    // Check the validity of xferPort.
-    if (xferPort == -1) {
-      throw new IOException(
-          "Invalid or missing 'xferPort' in server response.");
-    }
-
-    // TODO: Fix storageID
-    return new DatanodeInfo(
-        ipAddr,
-        (String)m.get("hostName"),
-        (String)m.get("storageID"),
-        xferPort,
-        ((Number) m.get("infoPort")).intValue(),
-        getInt(m, "infoSecurePort", 0),
-        ((Number) m.get("ipcPort")).intValue(),
-
-        getLong(m, "capacity", 0l),
-        getLong(m, "dfsUsed", 0l),
-        getLong(m, "remaining", 0l),
-        getLong(m, "blockPoolUsed", 0l),
-        getLong(m, "cacheCapacity", 0l),
-        getLong(m, "cacheUsed", 0l),
-        getLong(m, "lastUpdate", 0l),
-        getLong(m, "lastUpdateMonotonic", 0l),
-        getInt(m, "xceiverCount", 0),
-        getString(m, "networkLocation", ""),
-        AdminStates.valueOf(getString(m, "adminState", "NORMAL")));
-  }
-
   /** Convert a DatanodeInfo[] to a Json array. */
   private static Object[] toJsonArray(final DatanodeInfo[] array) {
     if (array == null) {
@@ -401,23 +187,6 @@ public class JsonUtil {
     }
   }
 
-  /** Convert an Object[] to a DatanodeInfo[]. */
-  private static DatanodeInfo[] toDatanodeInfoArray(final List<?> objects)
-      throws IOException {
-    if (objects == null) {
-      return null;
-    } else if (objects.isEmpty()) {
-      return EMPTY_DATANODE_INFO_ARRAY;
-    } else {
-      final DatanodeInfo[] array = new DatanodeInfo[objects.size()];
-      int i = 0;
-      for (Object object : objects) {
-        array[i++] = toDatanodeInfo((Map<?, ?>) object);
-      }
-      return array;
-    }
-  }
-  
   /** Convert a LocatedBlock to a Json map. */
   private static Map<String, Object> toJsonMap(final LocatedBlock locatedblock
       ) throws IOException {
@@ -435,26 +204,6 @@ public class JsonUtil {
     return m;
   }
 
-  /** Convert a Json map to LocatedBlock. */
-  private static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
-    if (m == null) {
-      return null;
-    }
-
-    final ExtendedBlock b = toExtendedBlock((Map<?, ?>)m.get("block"));
-    final DatanodeInfo[] locations = toDatanodeInfoArray(
-        getList(m, "locations"));
-    final long startOffset = ((Number) m.get("startOffset")).longValue();
-    final boolean isCorrupt = (Boolean)m.get("isCorrupt");
-    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(
-        getList(m, "cachedLocations"));
-
-    final LocatedBlock locatedblock = new LocatedBlock(b, locations,
-        null, null, startOffset, isCorrupt, cachedLocations);
-    locatedblock.setBlockToken(toBlockToken((Map<?, ?>)m.get("blockToken")));
-    return locatedblock;
-  }
-
   /** Convert a LocatedBlock[] to a Json array. */
   private static Object[] toJsonArray(final List<LocatedBlock> array
       ) throws IOException {
@@ -471,22 +220,6 @@ public class JsonUtil {
     }
   }
 
-  /** Convert an List of Object to a List of LocatedBlock. */
-  private static List<LocatedBlock> toLocatedBlockList(
-      final List<?> objects) throws IOException {
-    if (objects == null) {
-      return null;
-    } else if (objects.isEmpty()) {
-      return Collections.emptyList();
-    } else {
-      final List<LocatedBlock> list = new ArrayList<>(objects.size());
-      for (Object object : objects) {
-        list.add(toLocatedBlock((Map<?, ?>) object));
-      }
-      return list;
-    }
-  }
-
   /** Convert LocatedBlocks to a Json string. */
   public static String toJsonString(final LocatedBlocks locatedblocks
       ) throws IOException {
@@ -504,25 +237,6 @@ public class JsonUtil {
     return toJsonString(LocatedBlocks.class, m);
   }
 
-  /** Convert a Json map to LocatedBlock. */
-  public static LocatedBlocks toLocatedBlocks(final Map<?, ?> json
-      ) throws IOException {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName());
-    final long fileLength = ((Number) m.get("fileLength")).longValue();
-    final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
-    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
-        getList(m, "locatedBlocks"));
-    final LocatedBlock lastLocatedBlock = toLocatedBlock(
-        (Map<?, ?>)m.get("lastLocatedBlock"));
-    final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
-    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
-        lastLocatedBlock, isLastBlockComplete, null);
-  }
-
   /** Convert a ContentSummary to a Json string. */
   public static String toJsonString(final ContentSummary contentsummary) {
     if (contentsummary == null) {
@@ -539,25 +253,6 @@ public class JsonUtil {
     return toJsonString(ContentSummary.class, m);
   }
 
-  /** Convert a Json map to a ContentSummary. */
-  public static ContentSummary toContentSummary(final Map<?, ?> json) {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName());
-    final long length = ((Number) m.get("length")).longValue();
-    final long fileCount = ((Number) m.get("fileCount")).longValue();
-    final long directoryCount = ((Number) m.get("directoryCount")).longValue();
-    final long quota = ((Number) m.get("quota")).longValue();
-    final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
-    final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
-
-    return new ContentSummary.Builder().length(length).fileCount(fileCount).
-        directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).
-        spaceQuota(spaceQuota).build();
-  }
-
   /** Convert a MD5MD5CRC32FileChecksum to a Json string. */
   public static String toJsonString(final MD5MD5CRC32FileChecksum checksum) {
     if (checksum == null) {
@@ -571,49 +266,6 @@ public class JsonUtil {
     return toJsonString(FileChecksum.class, m);
   }
 
-  /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
-  public static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum(
-      final Map<?, ?> json) throws IOException {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
-    final String algorithm = (String)m.get("algorithm");
-    final int length = ((Number) m.get("length")).intValue();
-    final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
-
-    final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
-    final DataChecksum.Type crcType = 
-        MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm);
-    final MD5MD5CRC32FileChecksum checksum;
-
-    // Recreate what DFSClient would have returned.
-    switch(crcType) {
-      case CRC32:
-        checksum = new MD5MD5CRC32GzipFileChecksum();
-        break;
-      case CRC32C:
-        checksum = new MD5MD5CRC32CastagnoliFileChecksum();
-        break;
-      default:
-        throw new IOException("Unknown algorithm: " + algorithm);
-    }
-    checksum.readFields(in);
-
-    //check algorithm name
-    if (!checksum.getAlgorithmName().equals(algorithm)) {
-      throw new IOException("Algorithm not matched. Expected " + algorithm
-          + ", Received " + checksum.getAlgorithmName());
-    }
-    //check length
-    if (length != checksum.getLength()) {
-      throw new IOException("Length not matched: length=" + length
-          + ", checksum.getLength()=" + checksum.getLength());
-    }
-
-    return checksum;
-  }
   /** Convert a AclStatus object to a Json string. */
   public static String toJsonString(final AclStatus status) {
     if (status == null) {
@@ -653,35 +305,6 @@ public class JsonUtil {
     return null;
   }
 
-  /** Convert a Json map to a AclStatus object. */
-  public static AclStatus toAclStatus(final Map<?, ?> json) {
-    if (json == null) {
-      return null;
-    }
-
-    final Map<?, ?> m = (Map<?, ?>) json.get(AclStatus.class.getSimpleName());
-
-    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
-    aclStatusBuilder.owner((String) m.get("owner"));
-    aclStatusBuilder.group((String) m.get("group"));
-    aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
-    String permString = (String) m.get("permission");
-    if (permString != null) {
-      final FsPermission permission = toFsPermission(permString,
-          (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
-      aclStatusBuilder.setPermission(permission);
-    }
-    final List<?> entries = (List<?>) m.get("entries");
-
-    List<AclEntry> aclEntryList = new ArrayList<AclEntry>();
-    for (Object entry : entries) {
-      AclEntry aclEntry = AclEntry.parseAclEntry((String) entry, true);
-      aclEntryList.add(aclEntry);
-    }
-    aclStatusBuilder.addEntries(aclEntryList);
-    return aclStatusBuilder.build();
-  }
-  
   private static Map<String, Object> toJsonMap(final XAttr xAttr,
       final XAttrCodec encoding) throws IOException {
     if (xAttr == null) {
@@ -731,69 +354,4 @@ public class JsonUtil {
     return mapper.writeValueAsString(finalMap);
   }
 
-  public static byte[] getXAttr(final Map<?, ?> json, final String name) 
-      throws IOException {
-    if (json == null) {
-      return null;
-    }
-    
-    Map<String, byte[]> xAttrs = toXAttrs(json);
-    if (xAttrs != null) {
-      return xAttrs.get(name);
-    }
-    
-    return null;
-  }
-
-  public static Map<String, byte[]> toXAttrs(final Map<?, ?> json) 
-      throws IOException {
-    if (json == null) {
-      return null;
-    }
-    return toXAttrMap(getList(json, "XAttrs"));
-  }
-  
-  public static List<String> toXAttrNames(final Map<?, ?> json)
-      throws IOException {
-    if (json == null) {
-      return null;
-    }
-
-    final String namesInJson = (String) json.get("XAttrNames");
-    ObjectReader reader = new ObjectMapper().reader(List.class);
-    final List<Object> xattrs = reader.readValue(namesInJson);
-    final List<String> names =
-      Lists.newArrayListWithCapacity(json.keySet().size());
-
-    for (Object xattr : xattrs) {
-      names.add((String) xattr);
-    }
-    return names;
-  }
-
-  private static Map<String, byte[]> toXAttrMap(final List<?> objects)
-      throws IOException {
-    if (objects == null) {
-      return null;
-    } else if (objects.isEmpty()) {
-      return Maps.newHashMap();
-    } else {
-      final Map<String, byte[]> xAttrs = Maps.newHashMap();
-      for (Object object : objects) {
-        Map<?, ?> m = (Map<?, ?>) object;
-        String name = (String) m.get("name");
-        String value = (String) m.get("value");
-        xAttrs.put(name, decodeXAttrValue(value));
-      }
-      return xAttrs;
-    }
-  }
-  
-  private static byte[] decodeXAttrValue(String value) throws IOException {
-    if (value != null) {
-      return XAttrCodec.decodeValue(value);
-    } else {
-      return new byte[0];
-    }
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
new file mode 100644
index 0000000..6fa50fe
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
@@ -0,0 +1,485 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
+import org.apache.hadoop.fs.XAttrCodec;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.namenode.INodeId;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.StringUtils;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.ObjectReader;
+
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+
+class JsonUtilClient {
+  static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
+
+  /** Convert a Json map to a RemoteException. */
+  static RemoteException toRemoteException(final Map<?, ?> json) {
+    final Map<?, ?> m = (Map<?, ?>)json.get(RemoteException.class.getSimpleName());
+    final String message = (String)m.get("message");
+    final String javaClassName = (String)m.get("javaClassName");
+    return new RemoteException(javaClassName, message);
+  }
+
+  /** Convert a Json map to a Token. */
+  static Token<? extends TokenIdentifier> toToken(
+      final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final Token<DelegationTokenIdentifier> token
+        = new Token<>();
+    token.decodeFromUrlString((String)m.get("urlString"));
+    return token;
+  }
+
+  /** Convert a Json map to a Token of BlockTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  static Token<BlockTokenIdentifier> toBlockToken(
+      final Map<?, ?> m) throws IOException {
+    return (Token<BlockTokenIdentifier>)toToken(m);
+  }
+
+  /** Convert a string to a FsPermission object. */
+  static FsPermission toFsPermission(
+      final String s, Boolean aclBit, Boolean encBit) {
+    FsPermission perm = new FsPermission(Short.parseShort(s, 8));
+    final boolean aBit = (aclBit != null) ? aclBit : false;
+    final boolean eBit = (encBit != null) ? encBit : false;
+    if (aBit || eBit) {
+      return new FsPermissionExtension(perm, aBit, eBit);
+    } else {
+      return perm;
+    }
+  }
+
+  /** Convert a Json map to a HdfsFileStatus object. */
+  static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = includesType ?
+        (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
+    final String localName = (String) m.get("pathSuffix");
+    final WebHdfsConstants.PathType type = WebHdfsConstants.PathType.valueOf((String) m.get("type"));
+    final byte[] symlink = type != WebHdfsConstants.PathType.SYMLINK? null
+        : DFSUtil.string2Bytes((String) m.get("symlink"));
+
+    final long len = ((Number) m.get("length")).longValue();
+    final String owner = (String) m.get("owner");
+    final String group = (String) m.get("group");
+    final FsPermission permission = toFsPermission((String) m.get("permission"),
+                                                   (Boolean) m.get("aclBit"),
+                                                   (Boolean) m.get("encBit"));
+    final long aTime = ((Number) m.get("accessTime")).longValue();
+    final long mTime = ((Number) m.get("modificationTime")).longValue();
+    final long blockSize = ((Number) m.get("blockSize")).longValue();
+    final short replication = ((Number) m.get("replication")).shortValue();
+    final long fileId = m.containsKey("fileId") ?
+        ((Number) m.get("fileId")).longValue() : INodeId.GRANDFATHER_INODE_ID;
+    final int childrenNum = getInt(m, "childrenNum", -1);
+    final byte storagePolicy = m.containsKey("storagePolicy") ?
+        (byte) ((Number) m.get("storagePolicy")).longValue() :
+        BlockStoragePolicySuite.ID_UNSPECIFIED;
+    return new HdfsFileStatus(len, type == WebHdfsConstants.PathType.DIRECTORY, replication,
+        blockSize, mTime, aTime, permission, owner, group,
+        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null,
+        storagePolicy);
+  }
+
+  /** Convert a Json map to an ExtendedBlock object. */
+  static ExtendedBlock toExtendedBlock(final Map<?, ?> m) {
+    if (m == null) {
+      return null;
+    }
+
+    final String blockPoolId = (String)m.get("blockPoolId");
+    final long blockId = ((Number) m.get("blockId")).longValue();
+    final long numBytes = ((Number) m.get("numBytes")).longValue();
+    final long generationStamp =
+        ((Number) m.get("generationStamp")).longValue();
+    return new ExtendedBlock(blockPoolId, blockId, numBytes, generationStamp);
+  }
+
+  static int getInt(Map<?, ?> m, String key, final int defaultValue) {
+    Object value = m.get(key);
+    if (value == null) {
+      return defaultValue;
+    }
+    return ((Number) value).intValue();
+  }
+
+  static long getLong(Map<?, ?> m, String key, final long defaultValue) {
+    Object value = m.get(key);
+    if (value == null) {
+      return defaultValue;
+    }
+    return ((Number) value).longValue();
+  }
+
+  static String getString(
+      Map<?, ?> m, String key, final String defaultValue) {
+    Object value = m.get(key);
+    if (value == null) {
+      return defaultValue;
+    }
+    return (String) value;
+  }
+
+  static List<?> getList(Map<?, ?> m, String key) {
+    Object list = m.get(key);
+    if (list instanceof List<?>) {
+      return (List<?>) list;
+    } else {
+      return null;
+    }
+  }
+
+  /** Convert a Json map to an DatanodeInfo object. */
+  static DatanodeInfo toDatanodeInfo(final Map<?, ?> m)
+    throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    // ipAddr and xferPort are the critical fields for accessing data.
+    // If any one of the two is missing, an exception needs to be thrown.
+
+    // Handle the case of old servers (1.x, 0.23.x) sending 'name' instead
+    //  of ipAddr and xferPort.
+    String ipAddr = getString(m, "ipAddr", null);
+    int xferPort = getInt(m, "xferPort", -1);
+    if (ipAddr == null) {
+      String name = getString(m, "name", null);
+      if (name != null) {
+        int colonIdx = name.indexOf(':');
+        if (colonIdx > 0) {
+          ipAddr = name.substring(0, colonIdx);
+          xferPort = Integer.parseInt(name.substring(colonIdx +1));
+        } else {
+          throw new IOException(
+              "Invalid value in server response: name=[" + name + "]");
+        }
+      } else {
+        throw new IOException(
+            "Missing both 'ipAddr' and 'name' in server response.");
+      }
+      // ipAddr is non-null & non-empty string at this point.
+    }
+
+    // Check the validity of xferPort.
+    if (xferPort == -1) {
+      throw new IOException(
+          "Invalid or missing 'xferPort' in server response.");
+    }
+
+    // TODO: Fix storageID
+    return new DatanodeInfo(
+        ipAddr,
+        (String)m.get("hostName"),
+        (String)m.get("storageID"),
+        xferPort,
+        ((Number) m.get("infoPort")).intValue(),
+        getInt(m, "infoSecurePort", 0),
+        ((Number) m.get("ipcPort")).intValue(),
+
+        getLong(m, "capacity", 0l),
+        getLong(m, "dfsUsed", 0l),
+        getLong(m, "remaining", 0l),
+        getLong(m, "blockPoolUsed", 0l),
+        getLong(m, "cacheCapacity", 0l),
+        getLong(m, "cacheUsed", 0l),
+        getLong(m, "lastUpdate", 0l),
+        getLong(m, "lastUpdateMonotonic", 0l),
+        getInt(m, "xceiverCount", 0),
+        getString(m, "networkLocation", ""),
+        DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")));
+  }
+
+  /** Convert an Object[] to a DatanodeInfo[]. */
+  static DatanodeInfo[] toDatanodeInfoArray(final List<?> objects)
+      throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.isEmpty()) {
+      return EMPTY_DATANODE_INFO_ARRAY;
+    } else {
+      final DatanodeInfo[] array = new DatanodeInfo[objects.size()];
+      int i = 0;
+      for (Object object : objects) {
+        array[i++] = toDatanodeInfo((Map<?, ?>) object);
+      }
+      return array;
+    }
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  static LocatedBlock toLocatedBlock(final Map<?, ?> m) throws IOException {
+    if (m == null) {
+      return null;
+    }
+
+    final ExtendedBlock b = toExtendedBlock((Map<?, ?>)m.get("block"));
+    final DatanodeInfo[] locations = toDatanodeInfoArray(
+        getList(m, "locations"));
+    final long startOffset = ((Number) m.get("startOffset")).longValue();
+    final boolean isCorrupt = (Boolean)m.get("isCorrupt");
+    final DatanodeInfo[] cachedLocations = toDatanodeInfoArray(
+        getList(m, "cachedLocations"));
+
+    final LocatedBlock locatedblock = new LocatedBlock(b, locations,
+        null, null, startOffset, isCorrupt, cachedLocations);
+    locatedblock.setBlockToken(toBlockToken((Map<?, ?>)m.get("blockToken")));
+    return locatedblock;
+  }
+
+  /** Convert an List of Object to a List of LocatedBlock. */
+  static List<LocatedBlock> toLocatedBlockList(
+      final List<?> objects) throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.isEmpty()) {
+      return Collections.emptyList();
+    } else {
+      final List<LocatedBlock> list = new ArrayList<>(objects.size());
+      for (Object object : objects) {
+        list.add(toLocatedBlock((Map<?, ?>) object));
+      }
+      return list;
+    }
+  }
+
+  /** Convert a Json map to a ContentSummary. */
+  static ContentSummary toContentSummary(final Map<?, ?> json) {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>)json.get(ContentSummary.class.getSimpleName());
+    final long length = ((Number) m.get("length")).longValue();
+    final long fileCount = ((Number) m.get("fileCount")).longValue();
+    final long directoryCount = ((Number) m.get("directoryCount")).longValue();
+    final long quota = ((Number) m.get("quota")).longValue();
+    final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
+    final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
+
+    return new ContentSummary.Builder().length(length).fileCount(fileCount).
+        directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).
+        spaceQuota(spaceQuota).build();
+  }
+
+  /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
+  static MD5MD5CRC32FileChecksum toMD5MD5CRC32FileChecksum(
+      final Map<?, ?> json) throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
+    final String algorithm = (String)m.get("algorithm");
+    final int length = ((Number) m.get("length")).intValue();
+    final byte[] bytes = StringUtils.hexStringToByte((String) m.get("bytes"));
+
+    final DataInputStream in = new DataInputStream(new ByteArrayInputStream(bytes));
+    final DataChecksum.Type crcType =
+        MD5MD5CRC32FileChecksum.getCrcTypeFromAlgorithmName(algorithm);
+    final MD5MD5CRC32FileChecksum checksum;
+
+    // Recreate what DFSClient would have returned.
+    switch(crcType) {
+      case CRC32:
+        checksum = new MD5MD5CRC32GzipFileChecksum();
+        break;
+      case CRC32C:
+        checksum = new MD5MD5CRC32CastagnoliFileChecksum();
+        break;
+      default:
+        throw new IOException("Unknown algorithm: " + algorithm);
+    }
+    checksum.readFields(in);
+
+    //check algorithm name
+    if (!checksum.getAlgorithmName().equals(algorithm)) {
+      throw new IOException("Algorithm not matched. Expected " + algorithm
+          + ", Received " + checksum.getAlgorithmName());
+    }
+    //check length
+    if (length != checksum.getLength()) {
+      throw new IOException("Length not matched: length=" + length
+          + ", checksum.getLength()=" + checksum.getLength());
+    }
+
+    return checksum;
+  }
+
+  /** Convert a Json map to a AclStatus object. */
+  static AclStatus toAclStatus(final Map<?, ?> json) {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>) json.get(AclStatus.class.getSimpleName());
+
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder();
+    aclStatusBuilder.owner((String) m.get("owner"));
+    aclStatusBuilder.group((String) m.get("group"));
+    aclStatusBuilder.stickyBit((Boolean) m.get("stickyBit"));
+    String permString = (String) m.get("permission");
+    if (permString != null) {
+      final FsPermission permission = toFsPermission(permString,
+          (Boolean) m.get("aclBit"), (Boolean) m.get("encBit"));
+      aclStatusBuilder.setPermission(permission);
+    }
+    final List<?> entries = (List<?>) m.get("entries");
+
+    List<AclEntry> aclEntryList = new ArrayList<>();
+    for (Object entry : entries) {
+      AclEntry aclEntry = AclEntry.parseAclEntry((String) entry, true);
+      aclEntryList.add(aclEntry);
+    }
+    aclStatusBuilder.addEntries(aclEntryList);
+    return aclStatusBuilder.build();
+  }
+
+  static byte[] getXAttr(final Map<?, ?> json, final String name)
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    Map<String, byte[]> xAttrs = toXAttrs(json);
+    if (xAttrs != null) {
+      return xAttrs.get(name);
+    }
+
+    return null;
+  }
+
+  static Map<String, byte[]> toXAttrs(final Map<?, ?> json)
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+    return toXAttrMap(getList(json, "XAttrs"));
+  }
+
+  static List<String> toXAttrNames(final Map<?, ?> json)
+      throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    final String namesInJson = (String) json.get("XAttrNames");
+    ObjectReader reader = new ObjectMapper().reader(List.class);
+    final List<Object> xattrs = reader.readValue(namesInJson);
+    final List<String> names =
+      Lists.newArrayListWithCapacity(json.keySet().size());
+
+    for (Object xattr : xattrs) {
+      names.add((String) xattr);
+    }
+    return names;
+  }
+
+  static Map<String, byte[]> toXAttrMap(final List<?> objects)
+      throws IOException {
+    if (objects == null) {
+      return null;
+    } else if (objects.isEmpty()) {
+      return Maps.newHashMap();
+    } else {
+      final Map<String, byte[]> xAttrs = Maps.newHashMap();
+      for (Object object : objects) {
+        Map<?, ?> m = (Map<?, ?>) object;
+        String name = (String) m.get("name");
+        String value = (String) m.get("value");
+        xAttrs.put(name, decodeXAttrValue(value));
+      }
+      return xAttrs;
+    }
+  }
+
+  static byte[] decodeXAttrValue(String value) throws IOException {
+    if (value != null) {
+      return XAttrCodec.decodeValue(value);
+    } else {
+      return new byte[0];
+    }
+  }
+
+  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
+  @SuppressWarnings("unchecked")
+  static Token<DelegationTokenIdentifier> toDelegationToken(
+      final Map<?, ?> json) throws IOException {
+    final Map<?, ?> m = (Map<?, ?>)json.get(Token.class.getSimpleName());
+    return (Token<DelegationTokenIdentifier>) toToken(m);
+  }
+
+  /** Convert a Json map to LocatedBlock. */
+  static LocatedBlocks toLocatedBlocks(
+      final Map<?, ?> json) throws IOException {
+    if (json == null) {
+      return null;
+    }
+
+    final Map<?, ?> m = (Map<?, ?>)json.get(LocatedBlocks.class.getSimpleName());
+    final long fileLength = ((Number) m.get("fileLength")).longValue();
+    final boolean isUnderConstruction = (Boolean)m.get("isUnderConstruction");
+    final List<LocatedBlock> locatedBlocks = toLocatedBlockList(
+        getList(m, "locatedBlocks"));
+    final LocatedBlock lastLocatedBlock = toLocatedBlock(
+        (Map<?, ?>) m.get("lastLocatedBlock"));
+    final boolean isLastBlockComplete = (Boolean)m.get("isLastBlockComplete");
+    return new LocatedBlocks(fileLength, isUnderConstruction, locatedBlocks,
+        lastLocatedBlock, isLastBlockComplete, null);
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
new file mode 100644
index 0000000..544ffe5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+
+class WebHdfsConstants {
+  enum PathType {
+    FILE, DIRECTORY, SYMLINK;
+
+    static PathType valueOf(HdfsFileStatus status) {
+      return status.isDir()? DIRECTORY: status.isSymlink()? SYMLINK: FILE;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 6a55899..383f2e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -353,7 +353,7 @@ public class WebHdfsFileSystem extends FileSystem
         return m;
       }
 
-      IOException re = JsonUtil.toRemoteException(m);
+      IOException re = JsonUtilClient.toRemoteException(m);
       // extract UGI-related exceptions and unwrap InvalidToken
       // the NN mangles these exceptions but the DN does not and may need
       // to re-fetch a token if either report the token is expired
@@ -841,7 +841,7 @@ public class WebHdfsFileSystem extends FileSystem
     HdfsFileStatus status = new FsPathResponseRunner<HdfsFileStatus>(op, f) {
       @Override
       HdfsFileStatus decodeResponse(Map<?,?> json) {
-        return JsonUtil.toFileStatus(json, true);
+        return JsonUtilClient.toFileStatus(json, true);
       }
     }.run();
     if (status == null) {
@@ -870,7 +870,7 @@ public class WebHdfsFileSystem extends FileSystem
     AclStatus status = new FsPathResponseRunner<AclStatus>(op, f) {
       @Override
       AclStatus decodeResponse(Map<?,?> json) {
-        return JsonUtil.toAclStatus(json);
+        return JsonUtilClient.toAclStatus(json);
       }
     }.run();
     if (status == null) {
@@ -945,7 +945,7 @@ public class WebHdfsFileSystem extends FileSystem
         new XAttrEncodingParam(XAttrCodec.HEX)) {
       @Override
       byte[] decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.getXAttr(json, name);
+        return JsonUtilClient.getXAttr(json, name);
       }
     }.run();
   }
@@ -957,7 +957,7 @@ public class WebHdfsFileSystem extends FileSystem
         new XAttrEncodingParam(XAttrCodec.HEX)) {
       @Override
       Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.toXAttrs(json);
+        return JsonUtilClient.toXAttrs(json);
       }
     }.run();
   }
@@ -977,7 +977,7 @@ public class WebHdfsFileSystem extends FileSystem
     return new FsPathResponseRunner<Map<String, byte[]>>(op, parameters, p) {
       @Override
       Map<String, byte[]> decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.toXAttrs(json);
+        return JsonUtilClient.toXAttrs(json);
       }
     }.run();
   }
@@ -988,7 +988,7 @@ public class WebHdfsFileSystem extends FileSystem
     return new FsPathResponseRunner<List<String>>(op, p) {
       @Override
       List<String> decodeResponse(Map<?, ?> json) throws IOException {
-        return JsonUtil.toXAttrNames(json);
+        return JsonUtilClient.toXAttrNames(json);
       }
     }.run();
   }
@@ -1291,15 +1291,15 @@ public class WebHdfsFileSystem extends FileSystem
       @Override
       FileStatus[] decodeResponse(Map<?,?> json) {
         final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
-        final List<?> array = JsonUtil.getList(
-            rootmap, FileStatus.class.getSimpleName());
+        final List<?> array = JsonUtilClient.getList(rootmap,
+                                                     FileStatus.class.getSimpleName());
 
         //convert FileStatus
         final FileStatus[] statuses = new FileStatus[array.size()];
         int i = 0;
         for (Object object : array) {
           final Map<?, ?> m = (Map<?, ?>) object;
-          statuses[i++] = makeQualified(JsonUtil.toFileStatus(m, false), f);
+          statuses[i++] = makeQualified(JsonUtilClient.toFileStatus(m, false), f);
         }
         return statuses;
       }
@@ -1316,7 +1316,7 @@ public class WebHdfsFileSystem extends FileSystem
       @Override
       Token<DelegationTokenIdentifier> decodeResponse(Map<?,?> json)
           throws IOException {
-        return JsonUtil.toDelegationToken(json);
+        return JsonUtilClient.toDelegationToken(json);
       }
     }.run();
     if (token != null) {
@@ -1384,7 +1384,7 @@ public class WebHdfsFileSystem extends FileSystem
       @Override
       BlockLocation[] decodeResponse(Map<?,?> json) throws IOException {
         return DFSUtil.locatedBlocks2Locations(
-            JsonUtil.toLocatedBlocks(json));
+            JsonUtilClient.toLocatedBlocks(json));
       }
     }.run();
   }
@@ -1403,7 +1403,7 @@ public class WebHdfsFileSystem extends FileSystem
     return new FsPathResponseRunner<ContentSummary>(op, p) {
       @Override
       ContentSummary decodeResponse(Map<?,?> json) {
-        return JsonUtil.toContentSummary(json);        
+        return JsonUtilClient.toContentSummary(json);
       }
     }.run();
   }
@@ -1417,7 +1417,7 @@ public class WebHdfsFileSystem extends FileSystem
     return new FsPathResponseRunner<MD5MD5CRC32FileChecksum>(op, p) {
       @Override
       MD5MD5CRC32FileChecksum decodeResponse(Map<?,?> json) throws IOException {
-        return JsonUtil.toMD5MD5CRC32FileChecksum(json);
+        return JsonUtilClient.toMD5MD5CRC32FileChecksum(json);
       }
     }.run();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
index f86e9b0..fced3b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
@@ -33,10 +33,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
-import org.apache.hadoop.hdfs.web.JsonUtil;
-import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.SecurityUtil;
@@ -49,10 +46,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
-import org.mortbay.util.ajax.JSON;
 
-import javax.servlet.http.HttpServletResponse;
-import javax.ws.rs.core.Response;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -61,10 +55,8 @@ import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.Map;
 
 import static org.junit.Assert.*;
-import static org.mockito.Mockito.mock;
 
 /**
  * Test case for client support of delegation tokens in an HA cluster.
@@ -373,90 +365,6 @@ public class TestDelegationTokensWithHA {
     token.renew(conf);
     token.cancel(conf);
   }
-  
-  /**
-   * Test if StandbyException can be thrown from StandbyNN, when it's requested for 
-   * password. (HDFS-6475). With StandbyException, the client can failover to try
-   * activeNN.
-   */
-  @Test(timeout = 300000)
-  public void testDelegationTokenStandbyNNAppearFirst() throws Exception {
-    // make nn0 the standby NN, and nn1 the active NN
-    cluster.transitionToStandby(0);
-    cluster.transitionToActive(1);
-
-    final DelegationTokenSecretManager stSecretManager = 
-        NameNodeAdapter.getDtSecretManager(
-            nn1.getNamesystem());
-
-    // create token
-    final Token<DelegationTokenIdentifier> token =
-        getDelegationToken(fs, "JobTracker");
-    final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
-    byte[] tokenId = token.getIdentifier();
-    identifier.readFields(new DataInputStream(
-             new ByteArrayInputStream(tokenId)));
-
-    assertTrue(null != stSecretManager.retrievePassword(identifier));
-
-    final UserGroupInformation ugi = UserGroupInformation
-        .createRemoteUser("JobTracker");
-    ugi.addToken(token);
-    
-    ugi.doAs(new PrivilegedExceptionAction<Object>() {
-      @Override
-      public Object run() {
-        try {
-          try {
-            byte[] tmppw = dtSecretManager.retrievePassword(identifier);
-            fail("InvalidToken with cause StandbyException is expected"
-                + " since nn0 is standby");
-            return tmppw;
-          } catch (IOException e) {
-            // Mimic the UserProvider class logic (server side) by throwing
-            // SecurityException here
-            throw new SecurityException(
-                SecurityUtil.FAILED_TO_GET_UGI_MSG_HEADER + " " + e, e);
-          }
-        } catch (Exception oe) {
-          //
-          // The exception oe caught here is
-          //     java.lang.SecurityException: Failed to obtain user group
-          //     information: org.apache.hadoop.security.token.
-          //     SecretManager$InvalidToken: StandbyException
-          //
-          HttpServletResponse response = mock(HttpServletResponse.class);
-          ExceptionHandler eh = new ExceptionHandler();
-          eh.initResponse(response);
-          
-          // The Response (resp) below is what the server will send to client          
-          //
-          // BEFORE HDFS-6475 fix, the resp.entity is
-          //     {"RemoteException":{"exception":"SecurityException",
-          //      "javaClassName":"java.lang.SecurityException",
-          //      "message":"Failed to obtain user group information: 
-          //      org.apache.hadoop.security.token.SecretManager$InvalidToken:
-          //        StandbyException"}}
-          // AFTER the fix, the resp.entity is
-          //     {"RemoteException":{"exception":"StandbyException",
-          //      "javaClassName":"org.apache.hadoop.ipc.StandbyException",
-          //      "message":"Operation category READ is not supported in
-          //       state standby"}}
-          //
-          Response resp = eh.toResponse(oe);
-          
-          // Mimic the client side logic by parsing the response from server
-          //
-          Map<?, ?> m = (Map<?, ?>)JSON.parse(resp.getEntity().toString());
-          RemoteException re = JsonUtil.toRemoteException(m);
-          Exception unwrapped = ((RemoteException)re).unwrapRemoteException(
-              StandbyException.class);
-          assertTrue (unwrapped instanceof StandbyException);
-          return null;
-        }
-      }
-    });
-  }
 
   @SuppressWarnings("unchecked")
   private Token<DelegationTokenIdentifier> getDelegationToken(FileSystem fs,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index 0ed38f2..dfca023 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -73,7 +73,7 @@ public class TestJsonUtil {
     System.out.println("json    = " + json.replace(",", ",\n  "));
     ObjectReader reader = new ObjectMapper().reader(Map.class);
     final HdfsFileStatus s2 =
-        JsonUtil.toFileStatus((Map<?, ?>) reader.readValue(json), true);
+        JsonUtilClient.toFileStatus((Map<?, ?>) reader.readValue(json), true);
     final FileStatus fs2 = toFileStatus(s2, parent);
     System.out.println("s2      = " + s2);
     System.out.println("fs2     = " + fs2);
@@ -102,7 +102,7 @@ public class TestJsonUtil {
     response.put("cacheCapacity", 123l);
     response.put("cacheUsed", 321l);
     
-    JsonUtil.toDatanodeInfo(response);
+    JsonUtilClient.toDatanodeInfo(response);
   }
 
   @Test
@@ -128,7 +128,7 @@ public class TestJsonUtil {
     response.put("cacheCapacity", 123l);
     response.put("cacheUsed", 321l);
 
-    DatanodeInfo di = JsonUtil.toDatanodeInfo(response);
+    DatanodeInfo di = JsonUtilClient.toDatanodeInfo(response);
     Assert.assertEquals(name, di.getXferAddr());
 
     // The encoded result should contain name, ipAddr and xferPort.
@@ -175,7 +175,7 @@ public class TestJsonUtil {
     aclStatusBuilder.stickyBit(false);
 
     Assert.assertEquals("Should be equal", aclStatusBuilder.build(),
-        JsonUtil.toAclStatus(json));
+        JsonUtilClient.toAclStatus(json));
   }
 
   @Test
@@ -229,7 +229,7 @@ public class TestJsonUtil {
     xAttrs.add(xAttr1);
     xAttrs.add(xAttr2);
     Map<String, byte[]> xAttrMap = XAttrHelper.buildXAttrMap(xAttrs);
-    Map<String, byte[]> parsedXAttrMap = JsonUtil.toXAttrs(json);
+    Map<String, byte[]> parsedXAttrMap = JsonUtilClient.toXAttrs(json);
     
     Assert.assertEquals(xAttrMap.size(), parsedXAttrMap.size());
     Iterator<Entry<String, byte[]>> iter = xAttrMap.entrySet().iterator();
@@ -249,13 +249,13 @@ public class TestJsonUtil {
     Map<?, ?> json = reader.readValue(jsonString);
 
     // Get xattr: user.a2
-    byte[] value = JsonUtil.getXAttr(json, "user.a2");
+    byte[] value = JsonUtilClient.getXAttr(json, "user.a2");
     Assert.assertArrayEquals(XAttrCodec.decodeValue("0x313131"), value);
   }
 
   private void checkDecodeFailure(Map<String, Object> map) {
     try {
-      JsonUtil.toDatanodeInfo(map);
+      JsonUtilClient.toDatanodeInfo(map);
       Assert.fail("Exception not thrown against bad input.");
     } catch (Exception e) {
       // expected

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
index 0340b95..da45cbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
@@ -19,9 +19,12 @@
 package org.apache.hadoop.hdfs.web;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.URI;
 import java.util.HashMap;
@@ -38,14 +41,26 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.resources.ExceptionHandler;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.StandbyException;
+import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
+import org.mortbay.util.ajax.JSON;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
 
 public class TestWebHDFSForHA {
   private static final String LOGICAL_NAME = "minidfs";
@@ -124,6 +139,75 @@ public class TestWebHDFSForHA {
   }
 
   @Test
+  public void testClientFailoverWhenStandbyNNHasStaleCredentials()
+      throws IOException {
+    Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    conf.setBoolean(DFSConfigKeys
+                        .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+    MiniDFSCluster cluster = null;
+    WebHdfsFileSystem fs = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo).numDataNodes(
+          0).build();
+
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+      cluster.waitActive();
+
+      fs = (WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf);
+
+      cluster.transitionToActive(0);
+      Token<?> token = fs.getDelegationToken(null);
+      final DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+      identifier.readFields(
+          new DataInputStream(new ByteArrayInputStream(token.getIdentifier())));
+      cluster.transitionToStandby(0);
+      cluster.transitionToActive(1);
+
+      final DelegationTokenSecretManager secretManager = NameNodeAdapter.getDtSecretManager(
+          cluster.getNamesystem(0));
+
+      ExceptionHandler eh = new ExceptionHandler();
+      eh.initResponse(mock(HttpServletResponse.class));
+      Response resp = null;
+      try {
+        secretManager.retrievePassword(identifier);
+      } catch (IOException e) {
+        // Mimic the UserProvider class logic (server side) by throwing
+        // SecurityException here
+        Assert.assertTrue(e instanceof SecretManager.InvalidToken);
+        resp = eh.toResponse(new SecurityException(e));
+      }
+      // The Response (resp) below is what the server will send to client
+      //
+      // BEFORE HDFS-6475 fix, the resp.entity is
+      //     {"RemoteException":{"exception":"SecurityException",
+      //      "javaClassName":"java.lang.SecurityException",
+      //      "message":"Failed to obtain user group information:
+      //      org.apache.hadoop.security.token.SecretManager$InvalidToken:
+      //        StandbyException"}}
+      // AFTER the fix, the resp.entity is
+      //     {"RemoteException":{"exception":"StandbyException",
+      //      "javaClassName":"org.apache.hadoop.ipc.StandbyException",
+      //      "message":"Operation category READ is not supported in
+      //       state standby"}}
+      //
+
+      // Mimic the client side logic by parsing the response from server
+      //
+      Map<?, ?> m = (Map<?, ?>) JSON.parse(resp.getEntity().toString());
+      RemoteException re = JsonUtilClient.toRemoteException(m);
+      Exception unwrapped = re.unwrapRemoteException(StandbyException.class);
+      Assert.assertTrue(unwrapped instanceof StandbyException);
+    } finally {
+      IOUtils.cleanup(null, fs);
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
   public void testFailoverAfterOpen() throws IOException {
     Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
     conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd778809/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
index db08325..6930835 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
@@ -36,7 +36,6 @@ import java.net.URLConnection;
 import java.security.PrivilegedExceptionAction;
 import java.util.Map;
 
-import org.apache.commons.httpclient.HttpConnection;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -281,7 +280,7 @@ public class TestWebHdfsTokens {
             @Override
             Token<DelegationTokenIdentifier> decodeResponse(Map<?, ?> json)
                 throws IOException {
-              return JsonUtil.toDelegationToken(json);
+              return JsonUtilClient.toDelegationToken(json);
             }
           }.run();
 


[06/47] hadoop git commit: HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop servers. (Larry McCay via omalley)

Posted by zj...@apache.org.
HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop
servers. (Larry McCay via omalley)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b1be9e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b1be9e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b1be9e2

Branch: refs/heads/YARN-2928
Commit: 7b1be9e2e55a817c02cb80ba544882e0486215bb
Parents: 2073197
Author: Owen O'Malley <om...@apache.org>
Authored: Tue Apr 7 08:09:41 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-auth/pom.xml       |  11 +
 .../JWTRedirectAuthenticationHandler.java       | 363 ++++++++++++++++
 .../authentication/util/CertificateUtil.java    |  65 +++
 .../TestJWTRedirectAuthentictionHandler.java    | 418 +++++++++++++++++++
 .../util/TestCertificateUtil.java               |  96 +++++
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 hadoop-project/pom.xml                          |  13 +
 7 files changed, 969 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-auth/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml
index 5f7d774..3999d5a 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -108,6 +108,17 @@
       <scope>compile</scope>
     </dependency>
     <dependency>
+      <groupId>com.nimbusds</groupId>
+      <artifactId>nimbus-jose-jwt</artifactId>
+      <scope>compile</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>org.bouncycastle</groupId>
+          <artifactId>bcprov-jdk15on</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
       <groupId>org.apache.directory.server</groupId>
       <artifactId>apacheds-kerberos-codec</artifactId>
       <scope>compile</scope>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
new file mode 100644
index 0000000..42df6a0
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import java.io.IOException;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+import java.text.ParseException;
+
+import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.PublicKey;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.cert.CertificateException;
+import java.security.interfaces.RSAPublicKey;
+
+import org.apache.commons.codec.binary.Base64;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.util.CertificateUtil;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.jose.JOSEException;
+import com.nimbusds.jose.JWSObject;
+import com.nimbusds.jose.JWSVerifier;
+import com.nimbusds.jose.crypto.RSASSAVerifier;
+
+/**
+ * The {@link JWTRedirectAuthenticationHandler} extends
+ * AltKerberosAuthenticationHandler to add WebSSO behavior for UIs. The expected
+ * SSO token is a JsonWebToken (JWT). The supported algorithm is RS256 which
+ * uses PKI between the token issuer and consumer. The flow requires a redirect
+ * to a configured authentication server URL and a subsequent request with the
+ * expected JWT token. This token is cryptographically verified and validated.
+ * The user identity is then extracted from the token and used to create an
+ * AuthenticationToken - as expected by the AuthenticationFilter.
+ *
+ * <p/>
+ * The supported configuration properties are:
+ * <ul>
+ * <li>authentication.provider.url: the full URL to the authentication server.
+ * This is the URL that the handler will redirect the browser to in order to
+ * authenticate the user. It does not have a default value.</li>
+ * <li>public.key.pem: This is the PEM formatted public key of the issuer of the
+ * JWT token. It is required for verifying that the issuer is a trusted party.
+ * DO NOT include the PEM header and footer portions of the PEM encoded
+ * certificate. It does not have a default value.</li>
+ * <li>expected.jwt.audiences: This is a list of strings that identify
+ * acceptable audiences for the JWT token. The audience is a way for the issuer
+ * to indicate what entity/s that the token is intended for. Default value is
+ * null which indicates that all audiences will be accepted.</li>
+ * <li>jwt.cookie.name: the name of the cookie that contains the JWT token.
+ * Default value is "hadoop-jwt".</li>
+ * </ul>
+ */
+public class JWTRedirectAuthenticationHandler extends
+    AltKerberosAuthenticationHandler {
+  private static Logger LOG = LoggerFactory
+      .getLogger(JWTRedirectAuthenticationHandler.class);
+
+  public static final String AUTHENTICATION_PROVIDER_URL = "authentication.provider.url";
+  public static final String PUBLIC_KEY_PEM = "public.key.pem";
+  public static final String EXPECTED_JWT_AUDIENCES = "expected.jwt.audiences";
+  public static final String JWT_COOKIE_NAME = "jwt.cookie.name";
+  private static final String ORIGINAL_URL_QUERY_PARAM = "originalUrl=";
+  private String authenticationProviderUrl = null;
+  private RSAPublicKey publicKey = null;
+  private List<String> audiences = null;
+  private String cookieName = "hadoop-jwt";
+
+  /**
+   * Primarily for testing, this provides a way to set the publicKey for
+   * signature verification without needing to get a PEM encoded value.
+   *
+   * @param pk
+   */
+  public void setPublicKey(RSAPublicKey pk) {
+    publicKey = pk;
+  }
+
+  /**
+   * Initializes the authentication handler instance.
+   * <p/>
+   * This method is invoked by the {@link AuthenticationFilter#init} method.
+   *
+   * @param config
+   *          configuration properties to initialize the handler.
+   *
+   * @throws ServletException
+   *           thrown if the handler could not be initialized.
+   */
+  @Override
+  public void init(Properties config) throws ServletException {
+    super.init(config);
+    // setup the URL to redirect to for authentication
+    authenticationProviderUrl = config
+        .getProperty(AUTHENTICATION_PROVIDER_URL);
+    if (authenticationProviderUrl == null) {
+      throw new ServletException(
+          "Authentication provider URL must not be null - configure: "
+              + AUTHENTICATION_PROVIDER_URL);
+    }
+
+    // setup the public key of the token issuer for verification
+    if (publicKey == null) {
+      String pemPublicKey = config.getProperty(PUBLIC_KEY_PEM);
+      if (pemPublicKey == null) {
+        throw new ServletException(
+            "Public key for signature validation must be provisioned.");
+      }
+      publicKey = CertificateUtil.parseRSAPublicKey(pemPublicKey);
+    }
+    // setup the list of valid audiences for token validation
+    String auds = config.getProperty(EXPECTED_JWT_AUDIENCES);
+    if (auds != null) {
+      // parse into the list
+      String[] audArray = auds.split(",");
+      audiences = new ArrayList<String>();
+      for (String a : audArray) {
+        audiences.add(a);
+      }
+    }
+
+    // setup custom cookie name if configured
+    String customCookieName = config.getProperty(JWT_COOKIE_NAME);
+    if (customCookieName != null) {
+      cookieName = customCookieName;
+    }
+  }
+
+  @Override
+  public AuthenticationToken alternateAuthenticate(HttpServletRequest request,
+      HttpServletResponse response) throws IOException,
+      AuthenticationException {
+    AuthenticationToken token = null;
+
+    String serializedJWT = null;
+    HttpServletRequest req = (HttpServletRequest) request;
+    serializedJWT = getJWTFromCookie(req);
+    if (serializedJWT == null) {
+      String loginURL = constructLoginURL(request, response);
+      LOG.info("sending redirect to: " + loginURL);
+      ((HttpServletResponse) response).sendRedirect(loginURL);
+    } else {
+      String userName = null;
+      SignedJWT jwtToken = null;
+      boolean valid = false;
+      try {
+        jwtToken = SignedJWT.parse(serializedJWT);
+        valid = validateToken(jwtToken);
+        if (valid) {
+          userName = jwtToken.getJWTClaimsSet().getSubject();
+          LOG.info("USERNAME: " + userName);
+        } else {
+          LOG.warn("jwtToken failed validation: " + jwtToken.serialize());
+        }
+      } catch(ParseException pe) {
+        // unable to parse the token let's try and get another one
+        LOG.warn("Unable to parse the JWT token", pe);
+      }
+      if (valid) {
+        LOG.debug("Issuing AuthenticationToken for user.");
+        token = new AuthenticationToken(userName, userName, getType());
+      } else {
+        String loginURL = constructLoginURL(request, response);
+        LOG.info("token validation failed - sending redirect to: " + loginURL);
+        ((HttpServletResponse) response).sendRedirect(loginURL);
+      }
+    }
+    return token;
+  }
+
+  /**
+   * Encapsulate the acquisition of the JWT token from HTTP cookies within the
+   * request.
+   *
+   * @param serializedJWT
+   * @param req
+   * @return serialized JWT token
+   */
+  protected String getJWTFromCookie(HttpServletRequest req) {
+    String serializedJWT = null;
+    Cookie[] cookies = req.getCookies();
+    String userName = null;
+    if (cookies != null) {
+      for (Cookie cookie : cookies) {
+        if (cookieName.equals(cookie.getName())) {
+          LOG.info(cookieName
+              + " cookie has been found and is being processed");
+          serializedJWT = cookie.getValue();
+          break;
+        }
+      }
+    }
+    return serializedJWT;
+  }
+
+  /**
+   * Create the URL to be used for authentication of the user in the absence of
+   * a JWT token within the incoming request.
+   *
+   * @param request
+   * @param response
+   * @return url to use as login url for redirect
+   */
+  protected String constructLoginURL(HttpServletRequest request,
+      HttpServletResponse response) {
+    String delimiter = "?";
+    if (authenticationProviderUrl.contains("?")) {
+      delimiter = "&";
+    }
+    String loginURL = authenticationProviderUrl + delimiter
+        + ORIGINAL_URL_QUERY_PARAM
+        + request.getRequestURL().toString();
+    return loginURL;
+  }
+
+  /**
+   * This method provides a single method for validating the JWT for use in
+   * request processing. It provides for the override of specific aspects of
+   * this implementation through submethods used within but also allows for the
+   * override of the entire token validation algorithm.
+   *
+   * @param jwtToken
+   * @return true if valid
+   * @throws AuthenticationException
+   */
+  protected boolean validateToken(SignedJWT jwtToken) {
+    boolean sigValid = validateSignature(jwtToken);
+    if (!sigValid) {
+      LOG.warn("Signature could not be verified");
+    }
+    boolean audValid = validateAudiences(jwtToken);
+    if (!audValid) {
+      LOG.warn("Audience validation failed.");
+    }
+    boolean expValid = validateExpiration(jwtToken);
+    if (!expValid) {
+      LOG.info("Expiration validation failed.");
+    }
+
+    return sigValid && audValid && expValid;
+  }
+
+  /**
+   * Verify the signature of the JWT token in this method. This method depends
+   * on the public key that was established during init based upon the
+   * provisioned public key. Override this method in subclasses in order to
+   * customize the signature verification behavior.
+   *
+   * @param jwtToken
+   * @throws AuthenticationException
+   */
+  protected boolean validateSignature(SignedJWT jwtToken) {
+    boolean valid = false;
+    if (JWSObject.State.SIGNED == jwtToken.getState()) {
+      LOG.debug("JWT token is in a SIGNED state");
+      if (jwtToken.getSignature() != null) {
+        LOG.debug("JWT token signature is not null");
+        try {
+          JWSVerifier verifier = new RSASSAVerifier(publicKey);
+          if (jwtToken.verify(verifier)) {
+            valid = true;
+            LOG.debug("JWT token has been successfully verified");
+          } else {
+            LOG.warn("JWT signature verification failed.");
+          }
+        } catch (JOSEException je) {
+          LOG.warn("Error while validating signature", je);
+        }
+      }
+    }
+    return valid;
+  }
+
+  /**
+   * Validate whether any of the accepted audience claims is present in the
+   * issued token claims list for audience. Override this method in subclasses
+   * in order to customize the audience validation behavior.
+   *
+   * @param jwtToken
+   *          the JWT token where the allowed audiences will be found
+   * @return true if an expected audience is present, otherwise false
+   */
+  protected boolean validateAudiences(SignedJWT jwtToken) {
+    boolean valid = false;
+    try {
+      List<String> tokenAudienceList = jwtToken.getJWTClaimsSet()
+          .getAudience();
+      // if there were no expected audiences configured then just
+      // consider any audience acceptable
+      if (audiences == null) {
+        valid = true;
+      } else {
+        // if any of the configured audiences is found then consider it
+        // acceptable
+        boolean found = false;
+        for (String aud : tokenAudienceList) {
+          if (audiences.contains(aud)) {
+            LOG.debug("JWT token audience has been successfully validated");
+            valid = true;
+            break;
+          }
+        }
+        if (!valid) {
+          LOG.warn("JWT audience validation failed.");
+        }
+      }
+    } catch (ParseException pe) {
+      LOG.warn("Unable to parse the JWT token.", pe);
+    }
+    return valid;
+  }
+
+  /**
+   * Validate that the expiration time of the JWT token has not been violated.
+   * If it has then throw an AuthenticationException. Override this method in
+   * subclasses in order to customize the expiration validation behavior.
+   *
+   * @param jwtToken
+   * @throws AuthenticationException
+   */
+  protected boolean validateExpiration(SignedJWT jwtToken) {
+    boolean valid = false;
+    try {
+      Date expires = jwtToken.getJWTClaimsSet().getExpirationTime();
+      if (expires != null && new Date().before(expires)) {
+        LOG.debug("JWT token expiration date has been "
+            + "successfully validated");
+        valid = true;
+      } else {
+        LOG.warn("JWT expiration date validation failed.");
+      }
+    } catch (ParseException pe) {
+      LOG.warn("JWT expiration date validation failed.", pe);
+    }
+    return valid;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
new file mode 100644
index 0000000..77b2530
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/CertificateUtil.java
@@ -0,0 +1,65 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.PublicKey;
+import java.security.cert.CertificateException;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.interfaces.RSAPublicKey;
+
+import javax.servlet.ServletException;
+
+public class CertificateUtil {
+  private static final String PEM_HEADER = "-----BEGIN CERTIFICATE-----\n";
+  private static final String PEM_FOOTER = "\n-----END CERTIFICATE-----";
+
+  /**
+   * Gets an RSAPublicKey from the provided PEM encoding.
+   *
+   * @param pem
+   *          - the pem encoding from config without the header and footer
+   * @return RSAPublicKey
+   */
+  public static RSAPublicKey parseRSAPublicKey(String pem) throws ServletException {
+    String fullPem = PEM_HEADER + pem + PEM_FOOTER;
+    PublicKey key = null;
+    try {
+      CertificateFactory fact = CertificateFactory.getInstance("X.509");
+      ByteArrayInputStream is = new ByteArrayInputStream(
+          fullPem.getBytes("UTF8"));
+
+      X509Certificate cer = (X509Certificate) fact.generateCertificate(is);
+      key = cer.getPublicKey();
+    } catch (CertificateException ce) {
+      String message = null;
+      if (pem.startsWith(PEM_HEADER)) {
+        message = "CertificateException - be sure not to include PEM header "
+            + "and footer in the PEM configuration element.";
+      } else {
+        message = "CertificateException - PEM may be corrupt";
+      }
+      throw new ServletException(message, ce);
+    } catch (UnsupportedEncodingException uee) {
+      throw new ServletException(uee);
+    }
+    return (RSAPublicKey) key;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestJWTRedirectAuthentictionHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestJWTRedirectAuthentictionHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestJWTRedirectAuthentictionHandler.java
new file mode 100644
index 0000000..4ac9535
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestJWTRedirectAuthentictionHandler.java
@@ -0,0 +1,418 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.security.KeyPair;
+import java.security.KeyPairGenerator;
+import java.security.NoSuchAlgorithmException;
+import java.security.interfaces.RSAPrivateKey;
+import java.security.interfaces.RSAPublicKey;
+import java.util.Arrays;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Properties;
+import java.util.Vector;
+import java.util.Date;
+
+import javax.servlet.ServletException;
+import javax.servlet.http.Cookie;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
+import org.apache.hadoop.security.authentication.KerberosTestUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import com.nimbusds.jose.*;
+import com.nimbusds.jwt.JWTClaimsSet;
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.jose.crypto.RSASSASigner;
+import com.nimbusds.jose.crypto.RSASSAVerifier;
+import com.nimbusds.jose.util.Base64URL;
+
+public class TestJWTRedirectAuthentictionHandler extends
+    KerberosSecurityTestcase {
+  private static final String SERVICE_URL = "https://localhost:8888/resource";
+  private static final String REDIRECT_LOCATION =
+      "https://localhost:8443/authserver?originalUrl=" + SERVICE_URL;
+  RSAPublicKey publicKey = null;
+  RSAPrivateKey privateKey = null;
+  JWTRedirectAuthenticationHandler handler = null;
+
+  @Test
+  public void testNoPublicKeyJWT() throws Exception {
+    try {
+      Properties props = getProperties();
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      fail("alternateAuthentication should have thrown a ServletException");
+    } catch (ServletException se) {
+      assertTrue(se.getMessage().contains(
+          "Public key for signature validation must be provisioned"));
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testCustomCookieNameJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      props.put(JWTRedirectAuthenticationHandler.JWT_COOKIE_NAME, "jowt");
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("jowt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Assert.assertEquals("bob", token.getUserName());
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException: "
+          + se.getMessage());
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testNoProviderURLJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      props
+          .remove(JWTRedirectAuthenticationHandler.AUTHENTICATION_PROVIDER_URL);
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      fail("alternateAuthentication should have thrown an AuthenticationException");
+    } catch (ServletException se) {
+      assertTrue(se.getMessage().contains(
+          "Authentication provider URL must not be null"));
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testUnableToParseJWT() throws Exception {
+    try {
+      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
+      kpg.initialize(2048);
+
+      KeyPair kp = kpg.genKeyPair();
+      RSAPublicKey publicKey = (RSAPublicKey) kp.getPublic();
+
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", "ljm" + jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testFailedSignatureValidationJWT() throws Exception {
+    try {
+
+      // Create a public key that doesn't match the one needed to
+      // verify the signature - in order to make it fail verification...
+      KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
+      kpg.initialize(2048);
+
+      KeyPair kp = kpg.genKeyPair();
+      RSAPublicKey publicKey = (RSAPublicKey) kp.getPublic();
+
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testExpiredJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() - 1000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testInvalidAudienceJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      props
+          .put(JWTRedirectAuthenticationHandler.EXPECTED_JWT_AUDIENCES, "foo");
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Mockito.verify(response).sendRedirect(REDIRECT_LOCATION);
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown a AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testValidAudienceJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      props
+          .put(JWTRedirectAuthenticationHandler.EXPECTED_JWT_AUDIENCES, "bar");
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("bob", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Assert.assertEquals("bob", token.getUserName());
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown an AuthenticationException");
+    }
+  }
+
+  @Test
+  public void testValidJWT() throws Exception {
+    try {
+      handler.setPublicKey(publicKey);
+
+      Properties props = getProperties();
+      handler.init(props);
+
+      SignedJWT jwt = getJWT("alice", new Date(new Date().getTime() + 5000),
+          privateKey);
+
+      Cookie cookie = new Cookie("hadoop-jwt", jwt.serialize());
+      HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+      Mockito.when(request.getCookies()).thenReturn(new Cookie[] { cookie });
+      Mockito.when(request.getRequestURL()).thenReturn(
+          new StringBuffer(SERVICE_URL));
+      HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+      Mockito.when(response.encodeRedirectURL(SERVICE_URL)).thenReturn(
+          SERVICE_URL);
+
+      AuthenticationToken token = handler.alternateAuthenticate(request,
+          response);
+      Assert.assertNotNull("Token should not be null.", token);
+      Assert.assertEquals("alice", token.getUserName());
+    } catch (ServletException se) {
+      fail("alternateAuthentication should NOT have thrown a ServletException.");
+    } catch (AuthenticationException ae) {
+      fail("alternateAuthentication should NOT have thrown an AuthenticationException");
+    }
+  }
+
+  @Before
+  public void setup() throws Exception, NoSuchAlgorithmException {
+    setupKerberosRequirements();
+
+    KeyPairGenerator kpg = KeyPairGenerator.getInstance("RSA");
+    kpg.initialize(2048);
+
+    KeyPair kp = kpg.genKeyPair();
+    publicKey = (RSAPublicKey) kp.getPublic();
+    privateKey = (RSAPrivateKey) kp.getPrivate();
+
+    handler = new JWTRedirectAuthenticationHandler();
+  }
+
+  protected void setupKerberosRequirements() throws Exception {
+    String[] keytabUsers = new String[] { "HTTP/host1", "HTTP/host2",
+        "HTTP2/host1", "XHTTP/host" };
+    String keytab = KerberosTestUtils.getKeytabFile();
+    getKdc().createPrincipal(new File(keytab), keytabUsers);
+  }
+
+  @After
+  public void teardown() throws Exception {
+    handler.destroy();
+  }
+
+  protected Properties getProperties() {
+    Properties props = new Properties();
+    props.setProperty(
+        JWTRedirectAuthenticationHandler.AUTHENTICATION_PROVIDER_URL,
+        "https://localhost:8443/authserver");
+    props.setProperty("kerberos.principal",
+        KerberosTestUtils.getServerPrincipal());
+    props.setProperty("kerberos.keytab", KerberosTestUtils.getKeytabFile());
+    return props;
+  }
+
+  protected SignedJWT getJWT(String sub, Date expires, RSAPrivateKey privateKey)
+      throws Exception {
+    JWTClaimsSet claimsSet = new JWTClaimsSet();
+    claimsSet.setSubject(sub);
+    claimsSet.setIssueTime(new Date(new Date().getTime()));
+    claimsSet.setIssuer("https://c2id.com");
+    claimsSet.setCustomClaim("scope", "openid");
+    claimsSet.setExpirationTime(expires);
+    List<String> aud = new ArrayList<String>();
+    aud.add("bar");
+    claimsSet.setAudience("bar");
+
+    JWSHeader header = new JWSHeader.Builder(JWSAlgorithm.RS256).build();
+
+    SignedJWT signedJWT = new SignedJWT(header, claimsSet);
+    Base64URL sigInput = Base64URL.encode(signedJWT.getSigningInput());
+    JWSSigner signer = new RSASSASigner(privateKey);
+
+    signedJWT.sign(signer);
+
+    return signedJWT;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
new file mode 100644
index 0000000..f52b6d2
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.security.interfaces.RSAPublicKey;
+
+import javax.servlet.ServletException;
+
+import org.junit.Test;
+
+public class TestCertificateUtil {
+
+  @Test
+  public void testInvalidPEMWithHeaderAndFooter() throws Exception {
+    String pem = "-----BEGIN CERTIFICATE-----\n"
+        + "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+        + "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+        + "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+        + "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+        + "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+        + "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+        + "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+        + "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+        + "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+        + "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+        + "Mzc1xA==" + "\n-----END CERTIFICATE-----";
+    try {
+      CertificateUtil.parseRSAPublicKey(pem);
+      fail("Should not have thrown ServletException");
+    } catch (ServletException se) {
+      assertTrue(se.getMessage().contains("PEM header"));
+    }
+  }
+
+  @Test
+  public void testCorruptPEM() throws Exception {
+    String pem = "LJMLJMMIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+        + "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+        + "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+        + "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+        + "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+        + "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+        + "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+        + "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+        + "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+        + "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+        + "Mzc1xA==";
+    try {
+      CertificateUtil.parseRSAPublicKey(pem);
+      fail("Should not have thrown ServletException");
+    } catch (ServletException se) {
+      assertTrue(se.getMessage().contains("corrupt"));
+    }
+  }
+
+  @Test
+  public void testValidPEM() throws Exception {
+    String pem = "MIICOjCCAaOgAwIBAgIJANXi/oWxvJNzMA0GCSqGSIb3DQEBBQUAMF8xCzAJBgNVBAYTAlVTMQ0w"
+        + "CwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRvb3AxDTALBgNVBAsTBFRl"
+        + "c3QxEjAQBgNVBAMTCWxvY2FsaG9zdDAeFw0xNTAxMDIyMTE5MjRaFw0xNjAxMDIyMTE5MjRaMF8x"
+        + "CzAJBgNVBAYTAlVTMQ0wCwYDVQQIEwRUZXN0MQ0wCwYDVQQHEwRUZXN0MQ8wDQYDVQQKEwZIYWRv"
+        + "b3AxDTALBgNVBAsTBFRlc3QxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB"
+        + "jQAwgYkCgYEAwpfpLdi7dWTHNzETt+L7618/dWUQFb/C7o1jIxFgbKOVIB6d5YmvUbJck5PYxFkz"
+        + "C25fmU5H71WGOI1Kle5TFDmIo+hqh5xqu1YNRZz9i6D94g+2AyYr9BpvH4ZfdHs7r9AU7c3kq68V"
+        + "7OPuuaHb25J8isiOyA3RiWuJGQlXTdkCAwEAATANBgkqhkiG9w0BAQUFAAOBgQAdRUyCUqE9sdim"
+        + "Fbll9BuZDKV16WXeWGq+kTd7ETe7l0fqXjq5EnrifOai0L/pXwVvS2jrFkKQRlRxRGUNaeEBZ2Wy"
+        + "9aTyR+HGHCfvwoCegc9rAVw/DLaRriSO/jnEXzYK6XLVKH+hx5UXrJ7Oyc7JjZUc3g9kCWORThCX"
+        + "Mzc1xA==";
+    try {
+      RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
+      assertTrue(pk != null);
+      assertTrue(pk.getAlgorithm().equals("RSA"));
+    } catch (ServletException se) {
+      fail("Should not have thrown ServletException");
+    }
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index f52e09f..5a8cda4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -481,6 +481,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-9805. Refactor RawLocalFileSystem#rename for improved testability.
     (Jean-Pierre Matsumoto via cnauroth)
 
+    HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop
+    servers. (Larry McCay via omalley)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b1be9e2/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a2def1a..2c8837c 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -970,6 +970,19 @@
        <scope>test</scope>
      </dependency>
       
+      <dependency>
+          <groupId>com.nimbusds</groupId>
+          <artifactId>nimbus-jose-jwt</artifactId>
+          <version>3.9</version>
+          <scope>compile</scope>
+          <exclusions>
+          <exclusion>
+            <groupId>org.bouncycastle</groupId>
+            <artifactId>bcprov-jdk15on</artifactId>
+          </exclusion>
+        </exclusions>
+      </dependency>
+
     </dependencies>
   </dependencyManagement>
 


[24/47] hadoop git commit: YARN-2890. MiniYarnCluster should turn on timeline service if configured to do so. Contributed by Mit Desai.

Posted by zj...@apache.org.
YARN-2890. MiniYarnCluster should turn on timeline service if configured to do so. Contributed by Mit Desai.

Conflicts:
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8118c95f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8118c95f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8118c95f

Branch: refs/heads/YARN-2928
Commit: 8118c95f8cc253a2da0e449fc408705272ab5ea5
Parents: 1c7c608
Author: Hitesh Shah <hi...@apache.org>
Authored: Wed Apr 8 14:13:10 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:14:38 2015 -0700

----------------------------------------------------------------------
 .../jobhistory/TestJobHistoryEventHandler.java  |   2 +-
 .../mapred/TestMRTimelineEventHandling.java     |  52 ++++++++-
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../distributedshell/TestDistributedShell.java  |   2 +-
 .../hadoop/yarn/server/MiniYARNCluster.java     |   6 +-
 .../hadoop/yarn/server/TestMiniYarnCluster.java | 115 +++++++++++++++++++
 6 files changed, 172 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
index 43e3dbe..de260c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java
@@ -453,7 +453,7 @@ public class TestJobHistoryEventHandler {
     long currentTime = System.currentTimeMillis();
     try {
       yarnCluster = new MiniYARNCluster(
-            TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1, true);
+            TestJobHistoryEventHandler.class.getSimpleName(), 1, 1, 1, 1);
       yarnCluster.init(conf);
       yarnCluster.start();
       jheh.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
index c2ef128..eab9026 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMRTimelineEventHandling.java
@@ -35,6 +35,52 @@ import org.junit.Test;
 public class TestMRTimelineEventHandling {
 
   @Test
+  public void testTimelineServiceStartInMiniCluster() throws Exception {
+    Configuration conf = new YarnConfiguration();
+
+    /*
+     * Timeline service should not start if the config is set to false
+     * Regardless to the value of MAPREDUCE_JOB_EMIT_TIMELINE_DATA
+     */
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, true);
+    MiniMRYarnCluster cluster = null;
+    try {
+      cluster = new MiniMRYarnCluster(
+          TestJobHistoryEventHandler.class.getSimpleName(), 1);
+      cluster.init(conf);
+      cluster.start();
+
+      //verify that the timeline service is not started.
+      Assert.assertNull("Timeline Service should not have been started",
+          cluster.getApplicationHistoryServer());
+    }
+    finally {
+      if(cluster != null) {
+        cluster.stop();
+      }
+    }
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+    conf.setBoolean(MRJobConfig.MAPREDUCE_JOB_EMIT_TIMELINE_DATA, false);
+    cluster = null;
+    try {
+      cluster = new MiniMRYarnCluster(
+          TestJobHistoryEventHandler.class.getSimpleName(), 1);
+      cluster.init(conf);
+      cluster.start();
+
+      //verify that the timeline service is not started.
+      Assert.assertNull("Timeline Service should not have been started",
+          cluster.getApplicationHistoryServer());
+    }
+    finally {
+      if(cluster != null) {
+        cluster.stop();
+      }
+    }
+  }
+
+  @Test
   public void testMRTimelineEventHandling() throws Exception {
     Configuration conf = new YarnConfiguration();
     conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
@@ -42,7 +88,7 @@ public class TestMRTimelineEventHandling {
     MiniMRYarnCluster cluster = null;
     try {
       cluster = new MiniMRYarnCluster(
-              TestJobHistoryEventHandler.class.getSimpleName(), 1, true);
+          TestJobHistoryEventHandler.class.getSimpleName(), 1);
       cluster.init(conf);
       cluster.start();
       TimelineStore ts = cluster.getApplicationHistoryServer()
@@ -96,7 +142,7 @@ public class TestMRTimelineEventHandling {
     MiniMRYarnCluster cluster = null;
     try {
       cluster = new MiniMRYarnCluster(
-          TestJobHistoryEventHandler.class.getSimpleName(), 1, true);
+          TestJobHistoryEventHandler.class.getSimpleName(), 1);
       cluster.init(conf);
       cluster.start();
       TimelineStore ts = cluster.getApplicationHistoryServer()
@@ -133,7 +179,7 @@ public class TestMRTimelineEventHandling {
     cluster = null;
     try {
       cluster = new MiniMRYarnCluster(
-          TestJobHistoryEventHandler.class.getSimpleName(), 1, true);
+          TestJobHistoryEventHandler.class.getSimpleName(), 1);
       cluster.init(conf);
       cluster.start();
       TimelineStore ts = cluster.getApplicationHistoryServer()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index aaab195..f47be48 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -218,6 +218,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3459. Fix failiure of TestLog4jWarningErrorMetricsAppender.
     (Varun Vasudev via wangda)
 
+    YARN-2890. MiniYarnCluster should turn on timeline service if
+    configured to do so. (Mit Desai via hitesh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
index cc5f5e2..b887c77 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java
@@ -123,7 +123,7 @@ public class TestDistributedShell {
     if (yarnCluster == null) {
       yarnCluster =
           new MiniYARNCluster(TestDistributedShell.class.getSimpleName(), 1,
-              numNodeManager, 1, 1, enableATSV1);
+              numNodeManager, 1, 1);
       yarnCluster.init(conf);
       
       yarnCluster.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
index 365e0bb..f8b27b3 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
@@ -57,7 +57,6 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResp
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.MemoryApplicationHistoryStore;
-import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService;
 import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
@@ -262,8 +261,9 @@ public class MiniYARNCluster extends CompositeService {
       addService(new NodeManagerWrapper(index));
     }
 
-    if (enableAHS) {
-      addService(new ApplicationHistoryServerWrapper());
+    if(conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED,
+        YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED) || enableAHS) {
+        addService(new ApplicationHistoryServerWrapper());
     }
     
     super.serviceInit(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8118c95f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
new file mode 100644
index 0000000..8a3c9e7
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestMiniYarnCluster.java
@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestMiniYarnCluster {
+  @Test
+  public void testTimelineServiceStartInMiniCluster() throws Exception {
+    Configuration conf = new YarnConfiguration();
+    int numNodeManagers = 1;
+    int numLocalDirs = 1;
+    int numLogDirs = 1;
+    boolean enableAHS;
+
+    /*
+     * Timeline service should not start if TIMELINE_SERVICE_ENABLED == false
+     * and enableAHS flag == false
+     */
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+    enableAHS = false;
+    MiniYARNCluster cluster = null;
+    try {
+      cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
+          numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
+      cluster.init(conf);
+      cluster.start();
+
+      //verify that the timeline service is not started.
+      Assert.assertNull("Timeline Service should not have been started",
+          cluster.getApplicationHistoryServer());
+    }
+    finally {
+      if(cluster != null) {
+        cluster.stop();
+      }
+    }
+
+    /*
+     * Timeline service should start if TIMELINE_SERVICE_ENABLED == true
+     * and enableAHS == false
+     */
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true);
+    enableAHS = false;
+    cluster = null;
+    try {
+      cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
+          numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
+      cluster.init(conf);
+      cluster.start();
+
+      //Timeline service may sometime take a while to get started
+      int wait = 0;
+      while(cluster.getApplicationHistoryServer() == null && wait < 20) {
+        Thread.sleep(500);
+        wait++;
+      }
+      //verify that the timeline service is started.
+      Assert.assertNotNull("Timeline Service should have been started",
+          cluster.getApplicationHistoryServer());
+    }
+    finally {
+      if(cluster != null) {
+        cluster.stop();
+      }
+    }
+    /*
+     * Timeline service should start if TIMELINE_SERVICE_ENABLED == false
+     * and enableAHS == true
+     */
+    conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, false);
+    enableAHS = true;
+    cluster = null;
+    try {
+      cluster = new MiniYARNCluster(TestMiniYarnCluster.class.getSimpleName(),
+          numNodeManagers, numLocalDirs, numLogDirs, numLogDirs, enableAHS);
+      cluster.init(conf);
+      cluster.start();
+
+      //Timeline service may sometime take a while to get started
+      int wait = 0;
+      while(cluster.getApplicationHistoryServer() == null && wait < 20) {
+        Thread.sleep(500);
+        wait++;
+      }
+      //verify that the timeline service is started.
+      Assert.assertNotNull("Timeline Service should have been started",
+          cluster.getApplicationHistoryServer());
+    }
+    finally {
+      if(cluster != null) {
+        cluster.stop();
+      }
+    }
+  }
+}


[43/47] hadoop git commit: Adding release 2.7.1 to CHANGES.txt

Posted by zj...@apache.org.
Adding release 2.7.1 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33b8fab8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33b8fab8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33b8fab8

Branch: refs/heads/YARN-2928
Commit: 33b8fab8e583dfe1049ef6a02e0ed696c11eab34
Parents: ed83be6
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Thu Apr 9 13:53:35 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:56 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 12 ++++++++++++
 hadoop-mapreduce-project/CHANGES.txt            | 12 ++++++++++++
 hadoop-yarn-project/CHANGES.txt                 | 12 ++++++++++++
 4 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33b8fab8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 397161d..f181a96 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -509,6 +509,18 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11800. Clean up some test methods in TestCodec.java.
     (Brahma Reddy Battula via aajisaka)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33b8fab8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 979534e..695dc36 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -463,6 +463,18 @@ Release 2.8.0 - UNRELEASED
     HDFS-8091: ACLStatus and XAttributes should be presented to
     INodeAttributesProvider before returning to client (asuresh)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33b8fab8/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b9a75e3..4a45386 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -327,6 +327,18 @@ Release 2.8.0 - UNRELEASED
     MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
     (rchiang via rkanter)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/33b8fab8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d8c5515..26061de 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -224,6 +224,18 @@ Release 2.8.0 - UNRELEASED
     YARN-3465. Use LinkedHashMap to preserve order of resource requests. 
     (Zhihai Xu via kasha)
 
+Release 2.7.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[36/47] hadoop git commit: HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even for failed transfers (Contributed by Vinayakumar B)

Posted by zj...@apache.org.
HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and even for failed transfers (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/680c2eea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/680c2eea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/680c2eea

Branch: refs/heads/YARN-2928
Commit: 680c2eea11488f3d1f4afda4299f383b258006de
Parents: 1efbe9c
Author: Vinayakumar B <vi...@apache.org>
Authored: Thu Apr 9 11:58:00 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:54 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/datanode/BPOfferService.java    |  1 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |  2 ++
 .../datanode/metrics/DataNodeMetrics.java       |  4 +--
 .../server/datanode/TestDataNodeMetrics.java    | 37 +++++++++++++++++---
 5 files changed, 40 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/680c2eea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 19f264a..74ed624 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -451,6 +451,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7725. Incorrect "nodes in service" metrics caused all writes to fail.
     (Ming Ma via wang)
 
+    HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
+    even for failed transfers (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/680c2eea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index da9642a..1b42b19 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -656,7 +656,6 @@ class BPOfferService {
       // Send a copy of a block to another datanode
       dn.transferBlocks(bcmd.getBlockPoolId(), bcmd.getBlocks(),
           bcmd.getTargets(), bcmd.getTargetStorageTypes());
-      dn.metrics.incrBlocksReplicated(bcmd.getBlocks().length);
       break;
     case DatanodeProtocol.DNA_INVALIDATE:
       //

http://git-wip-us.apache.org/repos/asf/hadoop/blob/680c2eea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 50dccb8..8c08871 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -2174,6 +2174,8 @@ public class DataNode extends ReconfigurableBase
                   + Arrays.asList(targets));
             }
           }
+        } else {
+          metrics.incrBlocksReplicated();
         }
       } catch (IOException ie) {
         LOG.warn(bpReg + ":Failed to transfer " + b + " to " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/680c2eea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 2e8eb22..2e62b3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -210,8 +210,8 @@ public class DataNodeMetrics {
     cacheReports.add(latency);
   }
 
-  public void incrBlocksReplicated(int delta) {
-    blocksReplicated.incr(delta);
+  public void incrBlocksReplicated() {
+    blocksReplicated.incr();
   }
 
   public void incrBlocksWritten() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/680c2eea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
index 5d27fe6..0b47344 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
@@ -21,17 +21,15 @@ import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.Closeable;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.util.List;
-import java.util.Map;
 
 import com.google.common.collect.Lists;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -45,6 +43,8 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.util.Time;
@@ -293,4 +293,33 @@ public class TestDataNodeMetrics {
     }
   }
 
+  @Test
+  public void testDatanodeBlocksReplicatedMetric() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      List<DataNode> datanodes = cluster.getDataNodes();
+      assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+
+      MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
+      long blocksReplicated = getLongCounter("BlocksReplicated", rb);
+      assertEquals("No blocks replicated yet", 0, blocksReplicated);
+
+      Path path = new Path("/counter.txt");
+      DFSTestUtil.createFile(fs, path, 1024, (short) 2, Time.monotonicNow());
+      cluster.startDataNodes(conf, 1, true, StartupOption.REGULAR, null);
+      ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fs, path);
+      DFSTestUtil.waitForReplication(cluster, firstBlock, 1, 2, 0);
+
+      MetricsRecordBuilder rbNew = getMetrics(datanode.getMetrics().name());
+      blocksReplicated = getLongCounter("BlocksReplicated", rbNew);
+      assertEquals("blocks replicated counter incremented", 1, blocksReplicated);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }


[17/47] hadoop git commit: HDFS-5215. dfs.datanode.du.reserved is not considered while computing available space ( Brahma Reddy Battula via Yongjun Zhang)

Posted by zj...@apache.org.
HDFS-5215. dfs.datanode.du.reserved is not considered while computing
available space ( Brahma Reddy Battula via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6529c50b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6529c50b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6529c50b

Branch: refs/heads/YARN-2928
Commit: 6529c50babbd4e0ac568bb23832bf84832f32cc5
Parents: 273fef2
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Tue Apr 7 19:31:58 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:58 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt         |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java      |  6 +++---
 .../datanode/fsdataset/impl/FsVolumeImpl.java       | 16 ++++++++++++----
 3 files changed, 18 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6529c50b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 51d84f4..46d0217 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -421,6 +421,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
     goes for infinite loop (vinayakumarb)
 
+    HDFS-5215. dfs.datanode.du.reserved is not considered while computing
+    available space ( Brahma Reddy Battula via Yongjun Zhang)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6529c50b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6bcbe5a..6800984 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2494,9 +2494,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    */
   private static class VolumeInfo {
     final String directory;
-    final long usedSpace;
-    final long freeSpace;
-    final long reservedSpace;
+    final long usedSpace; // size of space used by HDFS
+    final long freeSpace; // size of free space excluding reserved space
+    final long reservedSpace; // size of space reserved for non-HDFS and RBW
 
     VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
       this.directory = v.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6529c50b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 4dbc7f1..922ede5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -305,9 +305,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
   
   /**
-   * Calculate the capacity of the filesystem, after removing any
-   * reserved capacity.
-   * @return the unreserved number of bytes left in this filesystem. May be zero.
+   * Return either the configured capacity of the file system if configured; or
+   * the capacity of the file system excluding space reserved for non-HDFS.
+   * 
+   * @return the unreserved number of bytes left in this filesystem. May be
+   *         zero.
    */
   @VisibleForTesting
   public long getCapacity() {
@@ -329,10 +331,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
     this.configuredCapacity = capacity;
   }
 
+  /*
+   * Calculate the available space of the filesystem, excluding space reserved
+   * for non-HDFS and space reserved for RBW
+   * 
+   * @return the available number of bytes left in this filesystem. May be zero.
+   */
   @Override
   public long getAvailable() throws IOException {
     long remaining = getCapacity() - getDfsUsed() - reservedForRbw.get();
-    long available = usage.getAvailable();
+    long available = usage.getAvailable() - reserved - reservedForRbw.get();
     if (remaining > available) {
       remaining = available;
     }


[27/47] hadoop git commit: HDFS-8089. Move o.a.h.hdfs.web.resources.* to the client jars. Contributed by Haohui Mai.

Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
deleted file mode 100644
index 94a7f8e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Integer parameter. */
-abstract class IntegerParam extends Param<Integer, IntegerParam.Domain> {
-  IntegerParam(final Domain domain, final Integer value,
-      final Integer min, final Integer max) {
-    super(domain, value);
-    checkRange(min, max);
-  }
-
-  private void checkRange(final Integer min, final Integer max) {
-    if (value == null) {
-      return;
-    }
-    if (min != null && value < min) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " < " + domain.toString(min));
-    }
-    if (max != null && value > max) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " > " + domain.toString(max));
-    }
-  }
-  
-  @Override
-  public String toString() {
-    return getName() + "=" + domain.toString(getValue());
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return domain.toString(getValue());
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.Domain<Integer> {
-    /** The radix of the number. */
-    final int radix;
-
-    Domain(final String paramName) {
-      this(paramName, 10);
-    }
-
-    Domain(final String paramName, final int radix) {
-      super(paramName);
-      this.radix = radix;
-    }
-
-    @Override
-    public String getDomain() {
-      return "<" + NULL + " | int in radix " + radix + ">";
-    }
-
-    @Override
-    Integer parse(final String str) {
-      try{
-        return NULL.equals(str) || str == null ? null : Integer.parseInt(str,
-          radix);
-      } catch(NumberFormatException e) {
-        throw new IllegalArgumentException("Failed to parse \"" + str
-            + "\" as a radix-" + radix + " integer.", e);
-      }
-    }
-
-    /** Convert an Integer to a String. */ 
-    String toString(final Integer n) {
-      return n == null? NULL: Integer.toString(n, radix);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
deleted file mode 100644
index 5a609ee..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Length parameter. */
-public class LengthParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "length";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public LengthParam(final Long value) {
-    super(DOMAIN, value, 0L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public LengthParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  public long getLength() {
-    Long v = getValue();
-    return v == null ? -1 : v;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
deleted file mode 100644
index 5f30094..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
+++ /dev/null
@@ -1,87 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Long parameter. */
-abstract class LongParam extends Param<Long, LongParam.Domain> {
-  LongParam(final Domain domain, final Long value, final Long min, final Long max) {
-    super(domain, value);
-    checkRange(min, max);
-  }
-
-  private void checkRange(final Long min, final Long max) {
-    if (value == null) {
-      return;
-    }
-    if (min != null && value < min) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " < " + domain.toString(min));
-    }
-    if (max != null && value > max) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " > " + domain.toString(max));
-    }
-  }
-  
-  @Override
-  public String toString() {
-    return getName() + "=" + domain.toString(getValue());
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return domain.toString(getValue());
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.Domain<Long> {
-    /** The radix of the number. */
-    final int radix;
-
-    Domain(final String paramName) {
-      this(paramName, 10);
-    }
-
-    Domain(final String paramName, final int radix) {
-      super(paramName);
-      this.radix = radix;
-    }
-
-    @Override
-    public String getDomain() {
-      return "<" + NULL + " | long in radix " + radix + ">";
-    }
-
-    @Override
-    Long parse(final String str) {
-      try {
-        return NULL.equals(str) || str == null ? null: Long.parseLong(str,
-          radix);
-      } catch(NumberFormatException e) {
-        throw new IllegalArgumentException("Failed to parse \"" + str
-            + "\" as a radix-" + radix + " long integer.", e);
-      }
-    }
-
-    /** Convert a Long to a String. */ 
-    String toString(final Long n) {
-      return n == null? NULL: Long.toString(n, radix);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
deleted file mode 100644
index 59911d7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Modification time parameter. */
-public class ModificationTimeParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "modificationtime";
-  /** Default parameter value. */
-  public static final String DEFAULT = "-1";
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public ModificationTimeParam(final Long value) {
-    super(DOMAIN, value, -1L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public ModificationTimeParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
deleted file mode 100644
index 83aba9e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/NewLengthParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** NewLength parameter. */
-public class NewLengthParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "newlength";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public NewLengthParam(final Long value) {
-    super(DOMAIN, value, 0L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public NewLengthParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
deleted file mode 100644
index 6d88703..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Offset parameter. */
-public class OffsetParam extends LongParam {
-  /** Parameter name. */
-  public static final String NAME = "offset";
-  /** Default parameter value. */
-  public static final String DEFAULT = "0";
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public OffsetParam(final Long value) {
-    super(DOMAIN, value, 0L, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public OffsetParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  public Long getOffset() {
-    Long offset = getValue();
-    return (offset == null) ? Long.valueOf(0) : offset;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
deleted file mode 100644
index 511adc7..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OldSnapshotNameParam.java
+++ /dev/null
@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/**
- * The old snapshot name parameter for renameSnapshot operation.
- */
-public class OldSnapshotNameParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "oldsnapshotname";
-
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-  
-  public OldSnapshotNameParam(final String str) {
-    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
deleted file mode 100644
index f6945bb..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OverwriteParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Overwrite parameter. */
-public class OverwriteParam extends BooleanParam {
-  /** Parameter name. */
-  public static final String NAME = "overwrite";
-  /** Default parameter value. */
-  public static final String DEFAULT = FALSE;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public OverwriteParam(final Boolean value) {
-    super(DOMAIN, value);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public OverwriteParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
deleted file mode 100644
index a1c10aa..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OwnerParam.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Owner parameter. */
-public class OwnerParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "owner";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public OwnerParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
deleted file mode 100644
index 79a831b..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/Param.java
+++ /dev/null
@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.io.UnsupportedEncodingException;
-import java.net.URLEncoder;
-import java.util.Arrays;
-import java.util.Comparator;
-
-
-/** Base class of parameters. */
-public abstract class Param<T, D extends Param.Domain<T>> {
-  static final String NULL = "null";
-  
-  static final Comparator<Param<?,?>> NAME_CMP = new Comparator<Param<?,?>>() {
-    @Override
-    public int compare(Param<?, ?> left, Param<?, ?> right) {
-      return left.getName().compareTo(right.getName());
-    }
-  };
-
-  /** Convert the parameters to a sorted String.
-   *
-   * @param separator URI parameter separator character
-   * @param parameters parameters to encode into a string
-   * @return the encoded URI string
-   */
-  public static String toSortedString(final String separator,
-      final Param<?, ?>... parameters) {
-    Arrays.sort(parameters, NAME_CMP);
-    final StringBuilder b = new StringBuilder();
-    try {
-      for(Param<?, ?> p : parameters) {
-        if (p.getValue() != null) {
-          b.append(separator).append(
-              URLEncoder.encode(p.getName(), "UTF-8")
-              + "="
-              + URLEncoder.encode(p.getValueString(), "UTF-8"));
-        }
-      }
-  } catch (UnsupportedEncodingException e) {
-    // Sane systems know about UTF-8, so this should never happen.
-    throw new RuntimeException(e);
-  }
-    return b.toString();
-  }
-
-  /** The domain of the parameter. */
-  final D domain;
-  /** The actual parameter value. */
-  final T value;
-
-  Param(final D domain, final T value) {
-    this.domain = domain;
-    this.value = value;
-  }
-
-  /** @return the parameter value. */
-  public final T getValue() {
-    return value;
-  }
-
-  /** @return the parameter value as a string */
-  public abstract String getValueString();
-
-  /** @return the parameter name. */
-  public abstract String getName();
-
-  @Override
-  public String toString() {
-    return getName() + "=" + value;
-  }
-
-  /** Base class of parameter domains. */
-  static abstract class Domain<T> {
-    /** Parameter name. */
-    final String paramName;
-    
-    Domain(final String paramName) {
-      this.paramName = paramName;
-    }
- 
-    /** @return the parameter name. */
-    public final String getParamName() {
-      return paramName;
-    }
-
-    /** @return a string description of the domain of the parameter. */
-    public abstract String getDomain();
-
-    /** @return the parameter value represented by the string. */
-    abstract T parse(String str);
-
-    /** Parse the given string.
-     * @return the parameter value represented by the string.
-     */
-    public final T parse(final String varName, final String str) {
-      try {
-        return str != null && str.trim().length() > 0 ? parse(str) : null;
-      } catch(Exception e) {
-        throw new IllegalArgumentException("Failed to parse \"" + str
-            + "\" for the parameter " + varName
-            + ".  The value must be in the domain " + getDomain(), e);
-      }
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
deleted file mode 100644
index b22b9c3..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/** Permission parameter, use a Short to represent a FsPermission. */
-public class PermissionParam extends ShortParam {
-  /** Parameter name. */
-  public static final String NAME = "permission";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME, 8);
-
-  private static final short DEFAULT_PERMISSION = 0755;
-
-  /** @return the default FsPermission. */
-  public static FsPermission getDefaultFsPermission() {
-    return new FsPermission(DEFAULT_PERMISSION);
-  }
-  
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public PermissionParam(final FsPermission value) {
-    super(DOMAIN, value == null? null: value.toShort(), null, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public PermissionParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str), (short)0, (short)01777);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  /** @return the represented FsPermission. */
-  public FsPermission getFsPermission() {
-    final Short v = getValue();
-    return new FsPermission(v != null? v: DEFAULT_PERMISSION);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
deleted file mode 100644
index 13f792e..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/** Http POST operation parameter. */
-public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
-  /** Post operations. */
-  public static enum Op implements HttpOpParam.Op {
-    APPEND(true, HttpURLConnection.HTTP_OK),
-
-    CONCAT(false, HttpURLConnection.HTTP_OK),
-
-    TRUNCATE(false, HttpURLConnection.HTTP_OK),
-
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
-
-    final boolean doOutputAndRedirect;
-    final int expectedHttpResponseCode;
-
-    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
-      this.doOutputAndRedirect = doOutputAndRedirect;
-      this.expectedHttpResponseCode = expectedHttpResponseCode;
-    }
-
-    @Override
-    public Type getType() {
-      return Type.POST;
-    }
-    
-    @Override
-    public boolean getRequireAuth() {
-      return false;
-    }
-
-    @Override
-    public boolean getDoOutput() {
-      return doOutputAndRedirect;
-    }
-
-    @Override
-    public boolean getRedirect() {
-      return doOutputAndRedirect;
-    }
-
-    @Override
-    public int getExpectedHttpResponseCode() {
-      return expectedHttpResponseCode;
-    }
-
-    /** @return a URI query string. */
-    @Override
-    public String toQueryString() {
-      return NAME + "=" + this;
-    }
-  }
-
-  private static final Domain<Op> DOMAIN = new Domain<PostOpParam.Op>(NAME, Op.class);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public PostOpParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
deleted file mode 100644
index 7fd2b71..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.net.HttpURLConnection;
-
-/** Http POST operation parameter. */
-public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
-  /** Put operations. */
-  public static enum Op implements HttpOpParam.Op {
-    CREATE(true, HttpURLConnection.HTTP_CREATED),
-
-    MKDIRS(false, HttpURLConnection.HTTP_OK),
-    CREATESYMLINK(false, HttpURLConnection.HTTP_OK),
-    RENAME(false, HttpURLConnection.HTTP_OK),
-    SETREPLICATION(false, HttpURLConnection.HTTP_OK),
-
-    SETOWNER(false, HttpURLConnection.HTTP_OK),
-    SETPERMISSION(false, HttpURLConnection.HTTP_OK),
-    SETTIMES(false, HttpURLConnection.HTTP_OK),
-    
-    RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
-    CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK, true),
-    
-    MODIFYACLENTRIES(false, HttpURLConnection.HTTP_OK),
-    REMOVEACLENTRIES(false, HttpURLConnection.HTTP_OK),
-    REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
-    REMOVEACL(false, HttpURLConnection.HTTP_OK),
-    SETACL(false, HttpURLConnection.HTTP_OK),
-    
-    SETXATTR(false, HttpURLConnection.HTTP_OK), 
-    REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
-
-    CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
-    RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
-    
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
-
-    final boolean doOutputAndRedirect;
-    final int expectedHttpResponseCode;
-    final boolean requireAuth;
-
-    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode) {
-      this(doOutputAndRedirect, expectedHttpResponseCode, false);
-    }
-    
-    Op(final boolean doOutputAndRedirect, final int expectedHttpResponseCode,
-       final boolean requireAuth) {
-      this.doOutputAndRedirect = doOutputAndRedirect;
-      this.expectedHttpResponseCode = expectedHttpResponseCode;
-      this.requireAuth = requireAuth;
-    }
-
-    @Override
-    public HttpOpParam.Type getType() {
-      return HttpOpParam.Type.PUT;
-    }
-    
-    @Override
-    public boolean getRequireAuth() {
-      return requireAuth;
-    }
-
-    @Override
-    public boolean getDoOutput() {
-      return doOutputAndRedirect;
-    }
-
-    @Override
-    public boolean getRedirect() {
-      return doOutputAndRedirect;
-    }
-
-    @Override
-    public int getExpectedHttpResponseCode() {
-      return expectedHttpResponseCode;
-    }
-
-    @Override
-    public String toQueryString() {
-      return NAME + "=" + this;
-    }
-  }
-
-  private static final Domain<Op> DOMAIN = new Domain<Op>(NAME, Op.class);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public PutOpParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
deleted file mode 100644
index 4890a61..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RecursiveParam.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Recursive parameter. */
-public class RecursiveParam extends BooleanParam {
-  /** Parameter name. */
-  public static final String NAME = "recursive";
-  /** Default parameter value. */
-  public static final String DEFAULT = FALSE;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public RecursiveParam(final Boolean value) {
-    super(DOMAIN, value);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public RecursiveParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
deleted file mode 100644
index d7c157d..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenameOptionSetParam.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.Options;
-
-/** Rename option set parameter. */
-public class RenameOptionSetParam extends EnumSetParam<Options.Rename> {
-  /** Parameter name. */
-  public static final String NAME = "renameoptions";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain<Options.Rename> DOMAIN = new Domain<Options.Rename>(
-      NAME, Options.Rename.class);
-
-  /**
-   * Constructor.
-   * @param options rename options.
-   */
-  public RenameOptionSetParam(final Options.Rename... options) {
-    super(DOMAIN, toEnumSet(Options.Rename.class, options));
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public RenameOptionSetParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
deleted file mode 100644
index 750e8bc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/RenewerParam.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Renewer parameter. */
-public class RenewerParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "renewer";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public RenewerParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
deleted file mode 100644
index 797709a..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-
-import org.apache.hadoop.conf.Configuration;
-
-/** Replication parameter. */
-public class ReplicationParam extends ShortParam {
-  /** Parameter name. */
-  public static final String NAME = "replication";
-  /** Default parameter value. */
-  public static final String DEFAULT = NULL;
-
-  private static final Domain DOMAIN = new Domain(NAME);
-
-  /**
-   * Constructor.
-   * @param value the parameter value.
-   */
-  public ReplicationParam(final Short value) {
-    super(DOMAIN, value, (short)1, null);
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public ReplicationParam(final String str) {
-    this(DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-
-  /** @return the value or, if it is null, return the default from conf. */
-  public short getValue(final Configuration conf) {
-    return getValue() != null? getValue()
-        : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
deleted file mode 100644
index 43ebbf4..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/** Short parameter. */
-abstract class ShortParam extends Param<Short, ShortParam.Domain> {
-  ShortParam(final Domain domain, final Short value,
-      final Short min, final Short max) {
-    super(domain, value);
-    checkRange(min, max);
-  }
-
-  private void checkRange(final Short min, final Short max) {
-    if (value == null) {
-      return;
-    }
-    if (min != null && value < min) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " < " + domain.toString(min));
-    }
-    if (max != null && value > max) {
-      throw new IllegalArgumentException("Invalid parameter range: " + getName()
-          + " = " + domain.toString(value) + " > " + domain.toString(max));
-    }
-  }
-  
-  @Override
-  public String toString() {
-    return getName() + "=" + domain.toString(getValue());
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public final String getValueString() {
-    return domain.toString(getValue());
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.Domain<Short> {
-    /** The radix of the number. */
-    final int radix;
-
-    Domain(final String paramName) {
-      this(paramName, 10);
-    }
-
-    Domain(final String paramName, final int radix) {
-      super(paramName);
-      this.radix = radix;
-    }
-
-    @Override
-    public String getDomain() {
-      return "<" + NULL + " | short in radix " + radix + ">";
-    }
-
-    @Override
-    Short parse(final String str) {
-      try {
-        return NULL.equals(str) || str == null ? null : Short.parseShort(str,
-          radix);
-      } catch(NumberFormatException e) {
-        throw new IllegalArgumentException("Failed to parse \"" + str
-            + "\" as a radix-" + radix + " short integer.", e);
-      }
-    }
-
-    /** Convert a Short to a String. */ 
-    String toString(final Short n) {
-      return n == null? NULL: Integer.toString(n, radix);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
deleted file mode 100644
index 72589cc..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/SnapshotNameParam.java
+++ /dev/null
@@ -1,41 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/**
- * The snapshot name parameter for createSnapshot and deleteSnapshot operation.
- * Also used to indicate the new snapshot name for renameSnapshot operation.
- */
-public class SnapshotNameParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "snapshotname";
-
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-  
-  public SnapshotNameParam(final String str) {
-    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
deleted file mode 100644
index f063120..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/StringParam.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.regex.Pattern;
-
-/** String parameter. */
-abstract class StringParam extends Param<String, StringParam.Domain> {
-  StringParam(final Domain domain, String str) {
-    super(domain, domain.parse(str));
-  }
-
-  /** @return the parameter value as a string */
-  @Override
-  public String getValueString() {
-    return value;
-  }
-
-  /** The domain of the parameter. */
-  static final class Domain extends Param.Domain<String> {
-    /** The pattern defining the domain; null . */
-    private final Pattern pattern;
-
-    Domain(final String paramName, final Pattern pattern) {
-      super(paramName);
-      this.pattern = pattern;
-    }
-
-    @Override
-    public final String getDomain() {
-      return pattern == null ? "<String>" : pattern.pattern();
-    }
-
-    @Override
-    final String parse(final String str) {
-      if (str != null && pattern != null) {
-        if (!pattern.matcher(str).matches()) {
-          throw new IllegalArgumentException("Invalid value: \"" + str
-              + "\" does not belong to the domain " + getDomain());
-        }
-      }
-      return str;
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
deleted file mode 100644
index 53b38ac..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-/**
- * Represents delegation token parameter as method arguments. This is
- * different from {@link DelegationParam}.
- */
-public class TokenArgumentParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "token";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain DOMAIN = new Domain(NAME, null);
-
-  /**
-   * Constructor.
-   * @param str A string representation of the parameter value.
-   */
-  public TokenArgumentParam(final String str) {
-    super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
deleted file mode 100644
index 481d8fe..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserParam.java
+++ /dev/null
@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
-import org.apache.hadoop.security.UserGroupInformation;
-import com.google.common.annotations.VisibleForTesting;
- 
-import java.text.MessageFormat;
-import java.util.regex.Pattern;
-
-/** User parameter. */
-public class UserParam extends StringParam {
-  /** Parameter name. */
-  public static final String NAME = "user.name";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static Domain domain = new Domain(NAME, Pattern.compile(DFS_WEBHDFS_USER_PATTERN_DEFAULT));
-
-  @VisibleForTesting
-  public static Domain getUserPatternDomain() {
-    return domain;
-  }
-
-  @VisibleForTesting
-  public static void setUserPatternDomain(Domain dm) {
-    domain = dm;
-  }
-
-  public static void setUserPattern(String pattern) {
-    domain = new Domain(NAME, Pattern.compile(pattern));
-  }
-
-  private static String validateLength(String str) {
-    if (str == null) {
-      throw new IllegalArgumentException(
-        MessageFormat.format("Parameter [{0}], cannot be NULL", NAME));
-    }
-    int len = str.length();
-    if (len < 1) {
-      throw new IllegalArgumentException(MessageFormat.format(
-        "Parameter [{0}], it's length must be at least 1", NAME));
-    }
-    return str;
-  }
-
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public UserParam(final String str) {
-    super(domain, str == null || str.equals(DEFAULT)? null : validateLength(str));
-  }
-
-  /**
-   * Construct an object from a UGI.
-   */
-  public UserParam(final UserGroupInformation ugi) {
-    this(ugi.getShortUserName());
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
deleted file mode 100644
index 36057c5..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrEncodingParam.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import org.apache.hadoop.fs.XAttrCodec;
-
-public class XAttrEncodingParam extends EnumParam<XAttrCodec> {
-  /** Parameter name. */
-  public static final String NAME = "encoding";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-  
-  private static final Domain<XAttrCodec> DOMAIN = 
-      new Domain<XAttrCodec>(NAME, XAttrCodec.class);
-  
-  public XAttrEncodingParam(final XAttrCodec encoding) {
-    super(DOMAIN, encoding);
-  }
-  
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public XAttrEncodingParam(final String str) {
-    super(DOMAIN, str != null && !str.isEmpty() ? DOMAIN.parse(str) : null);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-  
-  @Override
-  public String getValueString() {
-    return value.toString();
-  }
-  
-  public XAttrCodec getEncoding() {
-    return getValue();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
deleted file mode 100644
index 8137b44..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.regex.Pattern;
-
-public class XAttrNameParam extends StringParam {
-  /** Parameter name. **/
-  public static final String NAME = "xattr.name";
-  /** Default parameter value. **/
-  public static final String DEFAULT = "";
-  
-  private static Domain DOMAIN = new Domain(NAME,
-      Pattern.compile(".*"));
-  
-  public XAttrNameParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-  
-  public String getXAttrName() {
-    final String v = getValue();
-    return v;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
deleted file mode 100644
index 7fa2982..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrSetFlagParam.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.util.EnumSet;
-
-import org.apache.hadoop.fs.XAttrSetFlag;
-
-public class XAttrSetFlagParam extends EnumSetParam<XAttrSetFlag> {
-  /** Parameter name. */
-  public static final String NAME = "flag";
-  /** Default parameter value. */
-  public static final String DEFAULT = "";
-
-  private static final Domain<XAttrSetFlag> DOMAIN = new Domain<XAttrSetFlag>(
-      NAME, XAttrSetFlag.class);
-
-  public XAttrSetFlagParam(final EnumSet<XAttrSetFlag> flag) {
-    super(DOMAIN, flag);
-  }
-  
-  /**
-   * Constructor.
-   * @param str a string representation of the parameter value.
-   */
-  public XAttrSetFlagParam(final String str) {
-    super(DOMAIN, DOMAIN.parse(str));
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-  
-  public EnumSet<XAttrSetFlag> getFlag() {
-    return getValue();
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21655165/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
deleted file mode 100644
index 60f86ae..0000000
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrValueParam.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.web.resources;
-
-import java.io.IOException;
-
-import org.apache.hadoop.fs.XAttrCodec;
-
-public class XAttrValueParam extends StringParam {
-  /** Parameter name. **/
-  public static final String NAME = "xattr.value";
-  /** Default parameter value. **/
-  public static final String DEFAULT = "";
-  
-  private static Domain DOMAIN = new Domain(NAME, null);
-  
-  public XAttrValueParam(final String str) {
-    super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
-  }
-
-  @Override
-  public String getName() {
-    return NAME;
-  }
-  
-  public byte[] getXAttrValue() throws IOException {
-    final String v = getValue();
-    return XAttrCodec.decodeValue(v);
-  }
-}


[04/47] hadoop git commit: YARN-3110. Few issues in ApplicationHistory web ui. Contributed by Naganarasimha G R

Posted by zj...@apache.org.
YARN-3110. Few issues in ApplicationHistory web ui. Contributed by Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e441d5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e441d5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e441d5d

Branch: refs/heads/YARN-2928
Commit: 2e441d5d6716af0ba69441b6b39d30c3d0737f32
Parents: 7b1be9e
Author: Xuan <xg...@apache.org>
Authored: Tue Apr 7 08:22:39 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                        |  2 ++
 .../ApplicationHistoryManagerOnTimelineStore.java      |  8 +++++---
 .../hadoop/yarn/server/webapp/AppAttemptBlock.java     | 13 +++++++------
 3 files changed, 14 insertions(+), 9 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e441d5d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1142baf..8a0589f 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -207,6 +207,8 @@ Release 2.8.0 - UNRELEASED
     YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
     error message:Invalid AMRMToken (zxu via rkanter)
 
+    YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via xgong)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e441d5d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 49041c7..db00d2c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -219,10 +219,11 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
     String type = null;
     long createdTime = 0;
     long finishedTime = 0;
+    float progress = 0.0f;
     ApplicationAttemptId latestApplicationAttemptId = null;
     String diagnosticsInfo = null;
     FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
-    YarnApplicationState state = null;
+    YarnApplicationState state = YarnApplicationState.ACCEPTED;
     ApplicationResourceUsageReport appResources = null;
     Map<ApplicationAccessType, String> appViewACLs =
         new HashMap<ApplicationAccessType, String>();
@@ -245,7 +246,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
             ConverterUtils.toApplicationId(entity.getEntityId()),
             latestApplicationAttemptId, user, queue, name, null, -1, null, state,
             diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null,
-            null, 1.0F, type, null), appViewACLs);
+            null, progress, type, null), appViewACLs);
       }
       if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
         queue =
@@ -279,6 +280,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
           createdTime = event.getTimestamp();
         } else if (event.getEventType().equals(
             ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
+          progress=1.0F;
           finishedTime = event.getTimestamp();
           Map<String, Object> eventInfo = event.getEventInfo();
           if (eventInfo == null) {
@@ -321,7 +323,7 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
         ConverterUtils.toApplicationId(entity.getEntityId()),
         latestApplicationAttemptId, user, queue, name, null, -1, null, state,
         diagnosticsInfo, null, createdTime, finishedTime, finalStatus, appResources,
-        null, 1.0F, type, null), appViewACLs);
+        null, progress, type, null), appViewACLs);
   }
 
   private static ApplicationAttemptReport convertToApplicationAttemptReport(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e441d5d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index 8df94e6..8695c6c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -194,17 +194,18 @@ public class AppAttemptBlock extends HtmlBlock {
   protected void generateOverview(ApplicationAttemptReport appAttemptReport,
       Collection<ContainerReport> containers, AppAttemptInfo appAttempt,
       String node) {
+    String amContainerId = appAttempt.getAmContainerId();
     info("Application Attempt Overview")
       ._(
         "Application Attempt State:",
         appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
           .getAppAttemptState())
-      ._(
-        "AM Container:",
-        appAttempt.getAmContainerId() == null || containers == null
-            || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-            ? null : root_url("container", appAttempt.getAmContainerId()),
-        String.valueOf(appAttempt.getAmContainerId()))
+      ._("AM Container:",
+          amContainerId == null
+              || containers == null
+              || !hasAMContainer(appAttemptReport.getAMContainerId(),
+                  containers) ? null : root_url("container", amContainerId),
+          amContainerId == null ? "N/A" : amContainerId)
       ._("Node:", node)
       ._(
         "Tracking URL:",


[03/47] hadoop git commit: YARN-2666. Commit 53959e6

Posted by zj...@apache.org.
YARN-2666. Commit 53959e6


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ac57481
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ac57481
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ac57481

Branch: refs/heads/YARN-2928
Commit: 9ac574817f94944a1b6dcf09ee517fbda9f45fb2
Parents: 2e441d5
Author: Karthik Kambatla <ka...@apache.org>
Authored: Tue Apr 7 09:07:57 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------

----------------------------------------------------------------------



[33/47] hadoop git commit: HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. Contributed by Li Lu.

Posted by zj...@apache.org.
HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c4c2dd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c4c2dd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c4c2dd4

Branch: refs/heads/YARN-2928
Commit: 9c4c2dd49486dd83057fa5a9d4c0a995675d3dab
Parents: 2165516
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Apr 8 17:56:23 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:54 2015 -0700

----------------------------------------------------------------------
 .../ExcludePrivateAnnotationsJDiffDoclet.java   |   2 +-
 ...ExcludePrivateAnnotationsStandardDoclet.java |   2 +-
 .../classification/tools/RootDocProcessor.java  | 250 +++++++++----------
 .../classification/tools/StabilityOptions.java  |  12 +-
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 5 files changed, 136 insertions(+), 133 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4c2dd4/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
index 66913ff..5cc422f 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsJDiffDoclet.java
@@ -38,7 +38,7 @@ public class ExcludePrivateAnnotationsJDiffDoclet {
   
   public static boolean start(RootDoc root) {
     System.out.println(
-	ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
+        ExcludePrivateAnnotationsJDiffDoclet.class.getSimpleName());
     return JDiff.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4c2dd4/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
index 62c44ea..2176ea5 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/ExcludePrivateAnnotationsStandardDoclet.java
@@ -37,7 +37,7 @@ public class ExcludePrivateAnnotationsStandardDoclet {
   
   public static boolean start(RootDoc root) {
     System.out.println(
-	ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
+        ExcludePrivateAnnotationsStandardDoclet.class.getSimpleName());
     return Standard.start(RootDocProcessor.process(root));
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4c2dd4/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index a6ce035..8042f17 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -48,47 +48,47 @@ import org.apache.hadoop.classification.InterfaceStability;
  * Based on code from http://www.sixlegs.com/blog/java/exclude-javadoc-tag.html.
  */
 class RootDocProcessor {
-  
+
   static String stability = StabilityOptions.UNSTABLE_OPTION;
   static boolean treatUnannotatedClassesAsPrivate = false;
-  
+
   public static RootDoc process(RootDoc root) {
     return (RootDoc) process(root, RootDoc.class);
   }
-  
-  private static Object process(Object obj, Class<?> type) { 
-    if (obj == null) { 
-      return null; 
-    } 
-    Class<?> cls = obj.getClass(); 
-    if (cls.getName().startsWith("com.sun.")) { 
-      return getProxy(obj); 
-    } else if (obj instanceof Object[]) { 
-      Class<?> componentType = type.isArray() ? type.getComponentType() 
-	  : cls.getComponentType();
+
+  private static Object process(Object obj, Class<?> type) {
+    if (obj == null) {
+      return null;
+    }
+    Class<?> cls = obj.getClass();
+    if (cls.getName().startsWith("com.sun.")) {
+      return getProxy(obj);
+    } else if (obj instanceof Object[]) {
+      Class<?> componentType = type.isArray() ? type.getComponentType()
+          : cls.getComponentType();
       Object[] array = (Object[]) obj;
       Object[] newArray = (Object[]) Array.newInstance(componentType,
-	  array.length); 
+          array.length);
       for (int i = 0; i < array.length; ++i) {
         newArray[i] = process(array[i], componentType);
       }
       return newArray;
-    } 
-    return obj; 
+    }
+    return obj;
   }
-  
+
   private static Map<Object, Object> proxies =
-    new WeakHashMap<Object, Object>(); 
-  
-  private static Object getProxy(Object obj) { 
-    Object proxy = proxies.get(obj); 
-    if (proxy == null) { 
-      proxy = Proxy.newProxyInstance(obj.getClass().getClassLoader(), 
-        obj.getClass().getInterfaces(), new ExcludeHandler(obj)); 
-      proxies.put(obj, proxy); 
-    } 
-    return proxy; 
-  } 
+    new WeakHashMap<Object, Object>();
+
+  private static Object getProxy(Object obj) {
+    Object proxy = proxies.get(obj);
+    if (proxy == null) {
+      proxy = Proxy.newProxyInstance(obj.getClass().getClassLoader(),
+        obj.getClass().getInterfaces(), new ExcludeHandler(obj));
+      proxies.put(obj, proxy);
+    }
+    return proxy;
+  }
 
   private static class ExcludeHandler implements InvocationHandler {
     private Object target;
@@ -96,116 +96,116 @@ class RootDocProcessor {
     public ExcludeHandler(Object target) {
       this.target = target;
     }
-    
+
     @Override
     public Object invoke(Object proxy, Method method, Object[] args)
-	throws Throwable {
+        throws Throwable {
       String methodName = method.getName();
       if (target instanceof Doc) {
-	if (methodName.equals("isIncluded")) {
-	  Doc doc = (Doc) target;
-	  return !exclude(doc) && doc.isIncluded();
-	}
-	if (target instanceof RootDoc) {
-	  if (methodName.equals("classes")) {
-	    return filter(((RootDoc) target).classes(), ClassDoc.class);
-	  } else if (methodName.equals("specifiedClasses")) {
-	    return filter(((RootDoc) target).specifiedClasses(), ClassDoc.class);
-	  } else if (methodName.equals("specifiedPackages")) {
-	    return filter(((RootDoc) target).specifiedPackages(), PackageDoc.class);
-	  }
-	} else if (target instanceof ClassDoc) {
-	  if (isFiltered(args)) {
-	    if (methodName.equals("methods")) {
-	      return filter(((ClassDoc) target).methods(true), MethodDoc.class);
-	    } else if (methodName.equals("fields")) {
-	      return filter(((ClassDoc) target).fields(true), FieldDoc.class);
-	    } else if (methodName.equals("innerClasses")) {
-	      return filter(((ClassDoc) target).innerClasses(true),
-		  ClassDoc.class);
-	    } else if (methodName.equals("constructors")) {
-	      return filter(((ClassDoc) target).constructors(true),
-		  ConstructorDoc.class);
-	    }
-	  }
-	} else if (target instanceof PackageDoc) {
-	  if (methodName.equals("allClasses")) {
-	    if (isFiltered(args)) {
-	      return filter(((PackageDoc) target).allClasses(true),
-		ClassDoc.class);
-	    } else {
-	      return filter(((PackageDoc) target).allClasses(), ClassDoc.class);  
-	    }
-	  } else if (methodName.equals("annotationTypes")) {
-	    return filter(((PackageDoc) target).annotationTypes(),
-		AnnotationTypeDoc.class);
-	  } else if (methodName.equals("enums")) {
-	    return filter(((PackageDoc) target).enums(),
-		ClassDoc.class);
-	  } else if (methodName.equals("errors")) {
-	    return filter(((PackageDoc) target).errors(),
-		ClassDoc.class);
-	  } else if (methodName.equals("exceptions")) {
-	    return filter(((PackageDoc) target).exceptions(),
-		ClassDoc.class);
-	  } else if (methodName.equals("interfaces")) {
-	    return filter(((PackageDoc) target).interfaces(),
-		ClassDoc.class);
-	  } else if (methodName.equals("ordinaryClasses")) {
-	    return filter(((PackageDoc) target).ordinaryClasses(),
-		ClassDoc.class);
-	  }
-	}
+        if (methodName.equals("isIncluded")) {
+          Doc doc = (Doc) target;
+          return !exclude(doc) && doc.isIncluded();
+        }
+        if (target instanceof RootDoc) {
+          if (methodName.equals("classes")) {
+            return filter(((RootDoc) target).classes(), ClassDoc.class);
+          } else if (methodName.equals("specifiedClasses")) {
+            return filter(((RootDoc) target).specifiedClasses(), ClassDoc.class);
+          } else if (methodName.equals("specifiedPackages")) {
+            return filter(((RootDoc) target).specifiedPackages(), PackageDoc.class);
+          }
+        } else if (target instanceof ClassDoc) {
+          if (isFiltered(args)) {
+            if (methodName.equals("methods")) {
+              return filter(((ClassDoc) target).methods(true), MethodDoc.class);
+            } else if (methodName.equals("fields")) {
+              return filter(((ClassDoc) target).fields(true), FieldDoc.class);
+            } else if (methodName.equals("innerClasses")) {
+              return filter(((ClassDoc) target).innerClasses(true),
+                  ClassDoc.class);
+            } else if (methodName.equals("constructors")) {
+              return filter(((ClassDoc) target).constructors(true),
+                  ConstructorDoc.class);
+            }
+          }
+        } else if (target instanceof PackageDoc) {
+          if (methodName.equals("allClasses")) {
+            if (isFiltered(args)) {
+              return filter(((PackageDoc) target).allClasses(true),
+                  ClassDoc.class);
+            } else {
+              return filter(((PackageDoc) target).allClasses(), ClassDoc.class);
+            }
+          } else if (methodName.equals("annotationTypes")) {
+            return filter(((PackageDoc) target).annotationTypes(),
+                AnnotationTypeDoc.class);
+          } else if (methodName.equals("enums")) {
+            return filter(((PackageDoc) target).enums(),
+                ClassDoc.class);
+          } else if (methodName.equals("errors")) {
+            return filter(((PackageDoc) target).errors(),
+                ClassDoc.class);
+          } else if (methodName.equals("exceptions")) {
+            return filter(((PackageDoc) target).exceptions(),
+                ClassDoc.class);
+          } else if (methodName.equals("interfaces")) {
+            return filter(((PackageDoc) target).interfaces(),
+                ClassDoc.class);
+          } else if (methodName.equals("ordinaryClasses")) {
+            return filter(((PackageDoc) target).ordinaryClasses(),
+                ClassDoc.class);
+          }
+        }
       }
 
       if (args != null) {
-	if (methodName.equals("compareTo") || methodName.equals("equals")
-	    || methodName.equals("overrides")
-	    || methodName.equals("subclassOf")) {
-	  args[0] = unwrap(args[0]);
-	}
+        if (methodName.equals("compareTo") || methodName.equals("equals")
+            || methodName.equals("overrides")
+            || methodName.equals("subclassOf")) {
+          args[0] = unwrap(args[0]);
+        }
       }
       try {
-	return process(method.invoke(target, args), method.getReturnType());
+        return process(method.invoke(target, args), method.getReturnType());
       } catch (InvocationTargetException e) {
-	throw e.getTargetException();
+        throw e.getTargetException();
       }
     }
-      
+
     private static boolean exclude(Doc doc) {
       AnnotationDesc[] annotations = null;
       if (doc instanceof ProgramElementDoc) {
-	annotations = ((ProgramElementDoc) doc).annotations();
+        annotations = ((ProgramElementDoc) doc).annotations();
       } else if (doc instanceof PackageDoc) {
-	annotations = ((PackageDoc) doc).annotations();
+        annotations = ((PackageDoc) doc).annotations();
       }
       if (annotations != null) {
-	for (AnnotationDesc annotation : annotations) {
-	  String qualifiedTypeName = annotation.annotationType().qualifiedTypeName();
-	  if (qualifiedTypeName.equals(
-	        InterfaceAudience.Private.class.getCanonicalName())
-	    || qualifiedTypeName.equals(
-                InterfaceAudience.LimitedPrivate.class.getCanonicalName())) {
-	    return true;
-	  }
-	  if (stability.equals(StabilityOptions.EVOLVING_OPTION)) {
-	    if (qualifiedTypeName.equals(
-		InterfaceStability.Unstable.class.getCanonicalName())) {
-	      return true;
-	    }
-	  }
-	  if (stability.equals(StabilityOptions.STABLE_OPTION)) {
-	    if (qualifiedTypeName.equals(
-		InterfaceStability.Unstable.class.getCanonicalName())
+        for (AnnotationDesc annotation : annotations) {
+          String qualifiedTypeName = annotation.annotationType().qualifiedTypeName();
+          if (qualifiedTypeName.equals(
+              InterfaceAudience.Private.class.getCanonicalName())
               || qualifiedTypeName.equals(
-  		InterfaceStability.Evolving.class.getCanonicalName())) {
-	      return true;
-	    }
-	  }
-	}
+              InterfaceAudience.LimitedPrivate.class.getCanonicalName())) {
+            return true;
+          }
+          if (stability.equals(StabilityOptions.EVOLVING_OPTION)) {
+            if (qualifiedTypeName.equals(
+                InterfaceStability.Unstable.class.getCanonicalName())) {
+              return true;
+            }
+          }
+          if (stability.equals(StabilityOptions.STABLE_OPTION)) {
+            if (qualifiedTypeName.equals(
+                InterfaceStability.Unstable.class.getCanonicalName())
+                || qualifiedTypeName.equals(
+                InterfaceStability.Evolving.class.getCanonicalName())) {
+              return true;
+            }
+          }
+        }
         for (AnnotationDesc annotation : annotations) {
           String qualifiedTypeName =
-            annotation.annotationType().qualifiedTypeName();
+              annotation.annotationType().qualifiedTypeName();
           if (qualifiedTypeName.equals(
               InterfaceAudience.Public.class.getCanonicalName())) {
             return false;
@@ -217,27 +217,27 @@ class RootDocProcessor {
       }
       return false;
     }
-      
+
     private static Object[] filter(Doc[] array, Class<?> componentType) {
       if (array == null || array.length == 0) {
-	return array;
+        return array;
       }
       List<Object> list = new ArrayList<Object>(array.length);
       for (Doc entry : array) {
-	if (!exclude(entry)) {
-	  list.add(process(entry, componentType));
-	}
+        if (!exclude(entry)) {
+          list.add(process(entry, componentType));
+        }
       }
       return list.toArray((Object[]) Array.newInstance(componentType, list
-	  .size()));
+          .size()));
     }
 
     private Object unwrap(Object proxy) {
       if (proxy instanceof Proxy)
-	return ((ExcludeHandler) Proxy.getInvocationHandler(proxy)).target;
+        return ((ExcludeHandler) Proxy.getInvocationHandler(proxy)).target;
       return proxy;
     }
-      
+
     private boolean isFiltered(Object[] args) {
       return args != null && Boolean.TRUE.equals(args[0]);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4c2dd4/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java
index 657dbce..5b2d70d 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/StabilityOptions.java
@@ -41,11 +41,11 @@ class StabilityOptions {
     for (int i = 0; i < options.length; i++) {
       String opt = options[i][0].toLowerCase(Locale.ENGLISH);
       if (opt.equals(UNSTABLE_OPTION)) {
-	RootDocProcessor.stability = UNSTABLE_OPTION;
+        RootDocProcessor.stability = UNSTABLE_OPTION;
       } else if (opt.equals(EVOLVING_OPTION)) {
-	RootDocProcessor.stability = EVOLVING_OPTION;
+        RootDocProcessor.stability = EVOLVING_OPTION;
       } else if (opt.equals(STABLE_OPTION)) {
-	RootDocProcessor.stability = STABLE_OPTION;	
+        RootDocProcessor.stability = STABLE_OPTION;
       }
     }
   }
@@ -54,9 +54,9 @@ class StabilityOptions {
     List<String[]> optionsList = new ArrayList<String[]>();
     for (int i = 0; i < options.length; i++) {
       if (!options[i][0].equalsIgnoreCase(UNSTABLE_OPTION)
-	  && !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)
-	  && !options[i][0].equalsIgnoreCase(STABLE_OPTION)) {
-	optionsList.add(options[i]);
+          && !options[i][0].equalsIgnoreCase(EVOLVING_OPTION)
+          && !options[i][0].equalsIgnoreCase(STABLE_OPTION)) {
+        optionsList.add(options[i]);
       }
     }
     String[][] filteredOptions = new String[optionsList.size()][];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c4c2dd4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index ce292b2..50fb4d7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -487,6 +487,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop
     servers. (Larry McCay via omalley)
 
+    HADOOP-11814. Reformat hadoop-annotations, o.a.h.classification.tools.
+    (Li Lu via wheat9)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp


[32/47] hadoop git commit: Revert HDFS-7813.

Posted by zj...@apache.org.
Revert HDFS-7813.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09afdc25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09afdc25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09afdc25

Branch: refs/heads/YARN-2928
Commit: 09afdc25d72219ad767798adb99d7419ee81229d
Parents: 6e10f2b
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Apr 8 16:02:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:53 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 ---
 .../org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java   | 1 -
 2 files changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09afdc25/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d4a8c0b..b203770 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1241,9 +1241,6 @@ Release 2.7.0 - UNRELEASED
     HDFS-7814. Fix usage string of storageType parameter for
     "dfsadmin -setSpaceQuota/clrSpaceQuota". (Xiaoyu Yao via cnauroth)
 
-    HDFS-7813. TestDFSHAAdminMiniCluster#testFencer testcase is failing
-    frequently. (Rakesh R via cnauroth)
-
     HDFS-7009. Active NN and standby NN have different live nodes.
     (Ming Ma via cnauroth)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09afdc25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
index 2910004..ee1c184 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java
@@ -155,7 +155,6 @@ public class TestDFSHAAdminMiniCluster {
     tool.setConf(conf);
     assertEquals(0, runTool("-transitionToActive", "nn1"));
     assertEquals(0, runTool("-failover", "nn1", "nn2"));
-    assertEquals(0, runTool("-failover", "nn2", "nn1"));
     
     // Test failover with fencer and nameservice
     assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1"));


[40/47] hadoop git commit: HDFS-8099. Change "DFSInputStream has been closed already" message to debug log level (Charles Lamb via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-8099. Change "DFSInputStream has been closed already" message to debug log level (Charles Lamb via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9c639ab1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9c639ab1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9c639ab1

Branch: refs/heads/YARN-2928
Commit: 9c639ab1e940ff893b3c8cb3c138e085ffc4995e
Parents: 00fc4a0
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Apr 9 10:50:44 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:55 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java      | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c639ab1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 727bec7..59cab03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
     classes at runtime. (Sean Busbey via atm)
 
+    HDFS-8099. Change "DFSInputStream has been closed already" message to
+    debug log level (Charles Lamb via Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9c639ab1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index a9f2746..41b9d50 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -666,7 +666,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
   @Override
   public synchronized void close() throws IOException {
     if (!closed.compareAndSet(false, true)) {
-      DFSClient.LOG.warn("DFSInputStream has been closed already");
+      DFSClient.LOG.debug("DFSInputStream has been closed already");
       return;
     }
     dfsClient.checkOpen();


[02/47] hadoop git commit: YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for a fixed time period. Contributed by Varun Vasudev

Posted by zj...@apache.org.
YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for
a fixed time period. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfe3ba35
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfe3ba35
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfe3ba35

Branch: refs/heads/YARN-2928
Commit: cfe3ba35a3bee853f1f73fff269be1f59116adb3
Parents: 9ac5748
Author: Xuan <xg...@apache.org>
Authored: Tue Apr 7 09:52:36 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../apache/hadoop/yarn/util/AdHocLogDumper.java | 131 +++++++++++++++++++
 .../hadoop/yarn/util/TestAdHocLogDumper.java    |  86 ++++++++++++
 .../webapp/CapacitySchedulerPage.java           |  34 +++++
 .../resourcemanager/webapp/RMWebServices.java   |  26 ++++
 5 files changed, 280 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfe3ba35/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8a0589f..af86f02 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -160,6 +160,9 @@ Release 2.8.0 - UNRELEASED
     YARN-2901. Add errors and warning metrics page to RM, NM web UI. 
     (Varun Vasudev via wangda)
 
+    YARN-3294. Allow dumping of Capacity Scheduler debug logs via
+    web UI for a fixed time period. (Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfe3ba35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
new file mode 100644
index 0000000..d2e4c74
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.log4j.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AdHocLogDumper {
+
+  private static final Log LOG = LogFactory.getLog(AdHocLogDumper.class);
+
+  private String name;
+  private String targetFilename;
+  private Map<String, Priority> appenderLevels;
+  private Level currentLogLevel;
+  public static final String AD_HOC_DUMPER_APPENDER = "ad-hoc-dumper-appender";
+  private static boolean logFlag = false;
+  private static final Object lock = new Object();
+
+  public AdHocLogDumper(String name, String targetFilename) {
+    this.name = name;
+    this.targetFilename = targetFilename;
+    appenderLevels = new HashMap<>();
+  }
+
+  public void dumpLogs(String level, int timePeriod)
+      throws YarnRuntimeException, IOException {
+    synchronized (lock) {
+      if (logFlag) {
+        LOG.info("Attempt to dump logs when appender is already running");
+        throw new YarnRuntimeException("Appender is already dumping logs");
+      }
+      Level targetLevel = Level.toLevel(level);
+      Log log = LogFactory.getLog(name);
+      appenderLevels.clear();
+      if (log instanceof Log4JLogger) {
+        Logger packageLogger = ((Log4JLogger) log).getLogger();
+        currentLogLevel = packageLogger.getLevel();
+        Level currentEffectiveLevel = packageLogger.getEffectiveLevel();
+
+        // make sure we can create the appender first
+        Layout layout = new PatternLayout("%d{ISO8601} %p %c: %m%n");
+        FileAppender fApp;
+        File file =
+            new File(System.getProperty("yarn.log.dir"), targetFilename);
+        try {
+          fApp = new FileAppender(layout, file.getAbsolutePath(), false);
+        } catch (IOException ie) {
+          LOG
+            .warn(
+              "Error creating file, can't dump logs to "
+                  + file.getAbsolutePath(), ie);
+          throw ie;
+        }
+        fApp.setName(AdHocLogDumper.AD_HOC_DUMPER_APPENDER);
+        fApp.setThreshold(targetLevel);
+
+        // get current threshold of all appenders and set it to the effective
+        // level
+        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
+          .hasMoreElements();) {
+          Object obj = appenders.nextElement();
+          if (obj instanceof AppenderSkeleton) {
+            AppenderSkeleton appender = (AppenderSkeleton) obj;
+            appenderLevels.put(appender.getName(), appender.getThreshold());
+            appender.setThreshold(currentEffectiveLevel);
+          }
+        }
+
+        packageLogger.addAppender(fApp);
+        LOG.info("Dumping adhoc logs for " + name + " to "
+            + file.getAbsolutePath() + " for " + timePeriod + " milliseconds");
+        packageLogger.setLevel(targetLevel);
+        logFlag = true;
+
+        TimerTask restoreLogLevel = new RestoreLogLevel();
+        Timer restoreLogLevelTimer = new Timer();
+        restoreLogLevelTimer.schedule(restoreLogLevel, timePeriod);
+      }
+    }
+  }
+
+  class RestoreLogLevel extends TimerTask {
+    @Override
+    public void run() {
+      Log log = LogFactory.getLog(name);
+      if (log instanceof Log4JLogger) {
+        Logger logger = ((Log4JLogger) log).getLogger();
+        logger.removeAppender(AD_HOC_DUMPER_APPENDER);
+        logger.setLevel(currentLogLevel);
+        for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
+          .hasMoreElements();) {
+          Object obj = appenders.nextElement();
+          if (obj instanceof AppenderSkeleton) {
+            AppenderSkeleton appender = (AppenderSkeleton) obj;
+            appender.setThreshold(appenderLevels.get(appender.getName()));
+          }
+        }
+        logFlag = false;
+        LOG.info("Done dumping adhoc logs for " + name);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfe3ba35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
new file mode 100644
index 0000000..046c94e
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestAdHocLogDumper.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.util.Time;
+import org.apache.log4j.Appender;
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.Logger;
+import org.apache.log4j.Priority;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TestAdHocLogDumper {
+
+  private static final Log LOG = LogFactory.getLog(TestAdHocLogDumper.class);
+
+  @Test
+  public void testDumpingSchedulerLogs() throws Exception {
+
+    Map<Appender, Priority> levels = new HashMap<>();
+    String logHierarchy = TestAdHocLogDumper.class.getName();
+    String logFilename = "test.log";
+    Log log = LogFactory.getLog(logHierarchy);
+    if (log instanceof Log4JLogger) {
+      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
+        .hasMoreElements();) {
+        Object obj = appenders.nextElement();
+        if (obj instanceof AppenderSkeleton) {
+          AppenderSkeleton appender = (AppenderSkeleton) obj;
+          levels.put(appender, appender.getThreshold());
+        }
+      }
+    }
+
+    AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logFilename);
+    dumper.dumpLogs("DEBUG", 1000);
+    LOG.debug("test message 1");
+    LOG.info("test message 2");
+    File logFile = new File(logFilename);
+    Assert.assertTrue(logFile.exists());
+    Thread.sleep(2000);
+    long lastWrite = logFile.lastModified();
+    Assert.assertTrue(lastWrite < Time.now());
+    Assert.assertTrue(logFile.length() != 0);
+
+    // make sure levels are set back to their original values
+    if (log instanceof Log4JLogger) {
+      for (Enumeration appenders = Logger.getRootLogger().getAllAppenders(); appenders
+        .hasMoreElements();) {
+        Object obj = appenders.nextElement();
+        if (obj instanceof AppenderSkeleton) {
+          AppenderSkeleton appender = (AppenderSkeleton) obj;
+          Assert.assertEquals(levels.get(appender), appender.getThreshold());
+        }
+      }
+    }
+    boolean del = logFile.delete();
+    if(!del) {
+      LOG.info("Couldn't clean up after test");
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfe3ba35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
index e62fd70..f1e1e8c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
@@ -196,6 +196,40 @@ class CapacitySchedulerPage extends RmView {
     @Override
     public void render(Block html) {
       html._(MetricsOverviewTable.class);
+      // Dump CapacityScheduler debug logs
+      html.div()
+          .button()
+          .$onclick("confirmAction()").b("Dump scheduler logs")._()
+          .select().$id("time")
+            .option().$value("60")._("1 min")._()
+            .option().$value("300")._("5 min")._()
+            .option().$value("600")._("10 min")._()
+          ._()._();
+
+      StringBuilder script = new StringBuilder();
+      script.append("function confirmAction() {")
+          .append(" b = confirm(\"Are you sure you wish to generate scheduler logs?\");")
+          .append(" if (b == true) {")
+          .append(" var timePeriod = $(\"#time\").val();")
+          .append(" $.ajax({")
+          .append(" type: 'POST',")
+          .append(" url: '/ws/v1/cluster/scheduler/logs',")
+          .append(" contentType: 'text/plain',")
+          .append(" data: 'time=' + timePeriod,")
+          .append(" dataType: 'text'")
+          .append(" }).done(function(data){")
+          .append(" setTimeout(function(){")
+          .append(" alert(\"Scheduler log is being generated.\");")
+          .append(" }, 1000);")
+          .append(" }).fail(function(data){")
+          .append(" alert(\"Scheduler log generation failed. Please check the ResourceManager log for more informtion.\");")
+          .append(" console.log(data);")
+          .append(" });")
+          .append(" }")
+          .append("}");
+
+      html.script().$type("text/javascript")._(script.toString())._();
+
       UL<DIV<DIV<Hamlet>>> ul = html.
         div("#cs-wrapper.ui-widget").
           div(".ui-widget-header.ui-corner-top").

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfe3ba35/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
index 36f2b1d..584da7d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
@@ -38,6 +38,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
+import javax.ws.rs.FormParam;
 import javax.ws.rs.GET;
 import javax.ws.rs.POST;
 import javax.ws.rs.PUT;
@@ -139,6 +140,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemIn
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+import org.apache.hadoop.yarn.util.AdHocLogDumper;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
 import org.apache.hadoop.yarn.webapp.NotFoundException;
@@ -238,6 +240,30 @@ public class RMWebServices {
     return new SchedulerTypeInfo(sinfo);
   }
 
+  @POST
+  @Path("/scheduler/logs")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public String dumpSchedulerLogs(@FormParam("time") String time) throws IOException {
+    init();
+    ResourceScheduler rs = rm.getResourceScheduler();
+    int period = Integer.parseInt(time);
+    if (period <= 0) {
+      throw new BadRequestException("Period must be greater than 0");
+    }
+    final String logHierarchy =
+        "org.apache.hadoop.yarn.server.resourcemanager.scheduler";
+    String logfile = "yarn-scheduler-debug.log";
+    if (rs instanceof CapacityScheduler) {
+      logfile = "yarn-capacity-scheduler-debug.log";
+    } else if (rs instanceof FairScheduler) {
+      logfile = "yarn-fair-scheduler-debug.log";
+    }
+    AdHocLogDumper dumper = new AdHocLogDumper(logHierarchy, logfile);
+    // time period is sent to us in seconds
+    dumper.dumpLogs("DEBUG", period * 1000);
+    return "Capacity scheduler logs are being created.";
+  }
+
   /**
    * Returns all nodes in the cluster. If the states param is given, returns
    * all nodes that are in the comma-separated list of states.


[46/47] hadoop git commit: YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.

Posted by zj...@apache.org.
YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token renewal of applications part of a bigger workflow. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87bd06a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87bd06a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87bd06a1

Branch: refs/heads/YARN-2928
Commit: 87bd06a1f3ed94a14dc535221feb7096e6ba56c7
Parents: e77547e
Author: Vinod Kumar Vavilapalli <vi...@apache.org>
Authored: Thu Apr 9 13:08:53 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:56 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |   3 +
 .../security/DelegationTokenRenewer.java        | 137 ++++++++++++-------
 .../security/TestDelegationTokenRenewer.java    |  87 +++++++++++-
 3 files changed, 173 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bd06a1/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5e77b20..d8c5515 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -971,6 +971,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3466. Fix RM nodes web page to sort by node HTTP-address, #containers 
     and node-label column (Jason Lowe via wangda)
 
+    YARN-3055. Fixed ResourceManager's DelegationTokenRenewer to not stop token
+    renewal of applications part of a bigger workflow. (Daryn Sharp via vinodkv)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bd06a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index 2619971..d49ecfc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -22,6 +22,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
@@ -229,15 +230,16 @@ public class DelegationTokenRenewer extends AbstractService {
   @VisibleForTesting
   protected static class DelegationTokenToRenew {
     public final Token<?> token;
-    public final ApplicationId applicationId;
+    public final Collection<ApplicationId> referringAppIds;
     public final Configuration conf;
     public long expirationDate;
-    public TimerTask timerTask;
+    public RenewalTimerTask timerTask;
     public volatile boolean shouldCancelAtEnd;
     public long maxDate;
     public String user;
 
-    public DelegationTokenToRenew(ApplicationId jId, Token<?> token,
+    public DelegationTokenToRenew(Collection<ApplicationId> applicationIds,
+        Token<?> token,
         Configuration conf, long expirationDate, boolean shouldCancelAtEnd,
         String user) {
       this.token = token;
@@ -251,20 +253,33 @@ public class DelegationTokenRenewer extends AbstractService {
           throw new YarnRuntimeException(e);
         }
       }
-      this.applicationId = jId;
+      this.referringAppIds = Collections.synchronizedSet(
+          new HashSet<ApplicationId>(applicationIds));
       this.conf = conf;
       this.expirationDate = expirationDate;
       this.timerTask = null;
       this.shouldCancelAtEnd = shouldCancelAtEnd;
     }
     
-    public void setTimerTask(TimerTask tTask) {
+    public void setTimerTask(RenewalTimerTask tTask) {
       timerTask = tTask;
     }
-    
+
+    @VisibleForTesting
+    public void cancelTimer() {
+      if (timerTask != null) {
+        timerTask.cancel();
+      }
+    }
+
+    @VisibleForTesting
+    public boolean isTimerCancelled() {
+      return (timerTask != null) && timerTask.cancelled.get();
+    }
+
     @Override
     public String toString() {
-      return token + ";exp=" + expirationDate;
+      return token + ";exp=" + expirationDate + "; apps=" + referringAppIds;
     }
     
     @Override
@@ -415,19 +430,16 @@ public class DelegationTokenRenewer extends AbstractService {
         }
 
         DelegationTokenToRenew dttr = allTokens.get(token);
-        if (dttr != null) {
-          // If any of the jobs sharing the same token doesn't want to cancel
-          // the token, we should not cancel the token.
-          if (!evt.shouldCancelAtEnd) {
-            dttr.shouldCancelAtEnd = evt.shouldCancelAtEnd;
-            LOG.info("Set shouldCancelAtEnd=" + shouldCancelAtEnd
-                + " for token " + dttr.token);
+        if (dttr == null) {
+          dttr = new DelegationTokenToRenew(Arrays.asList(applicationId), token,
+              getConfig(), now, shouldCancelAtEnd, evt.getUser());
+          try {
+            renewToken(dttr);
+          } catch (IOException ioe) {
+            throw new IOException("Failed to renew token: " + dttr.token, ioe);
           }
-          continue;
         }
-
-        tokenList.add(new DelegationTokenToRenew(applicationId, token,
-          getConfig(), now, shouldCancelAtEnd, evt.getUser()));
+        tokenList.add(dttr);
       }
     }
 
@@ -436,21 +448,21 @@ public class DelegationTokenRenewer extends AbstractService {
       // If user provides incorrect token then it should not be added for
       // renewal.
       for (DelegationTokenToRenew dtr : tokenList) {
-        try {
-          renewToken(dtr);
-        } catch (IOException ioe) {
-          throw new IOException("Failed to renew token: " + dtr.token, ioe);
+        DelegationTokenToRenew currentDtr =
+            allTokens.putIfAbsent(dtr.token, dtr);
+        if (currentDtr != null) {
+          // another job beat us
+          currentDtr.referringAppIds.add(applicationId);
+          appTokens.get(applicationId).add(currentDtr);
+        } else {
+          appTokens.get(applicationId).add(dtr);
+          setTimerForTokenRenewal(dtr);
         }
       }
-      for (DelegationTokenToRenew dtr : tokenList) {
-        appTokens.get(applicationId).add(dtr);
-        allTokens.put(dtr.token, dtr);
-        setTimerForTokenRenewal(dtr);
-      }
     }
 
     if (!hasHdfsToken) {
-      requestNewHdfsDelegationToken(applicationId, evt.getUser(),
+      requestNewHdfsDelegationToken(Arrays.asList(applicationId), evt.getUser(),
         shouldCancelAtEnd);
     }
   }
@@ -478,7 +490,7 @@ public class DelegationTokenRenewer extends AbstractService {
       try {
         requestNewHdfsDelegationTokenIfNeeded(dttr);
         // if the token is not replaced by a new token, renew the token
-        if (appTokens.get(dttr.applicationId).contains(dttr)) {
+        if (!dttr.isTimerCancelled()) {
           renewToken(dttr);
           setTimerForTokenRenewal(dttr);// set the next one
         } else {
@@ -508,12 +520,12 @@ public class DelegationTokenRenewer extends AbstractService {
     long expiresIn = token.expirationDate - System.currentTimeMillis();
     long renewIn = token.expirationDate - expiresIn/10; // little bit before the expiration
     // need to create new task every time
-    TimerTask tTask = new RenewalTimerTask(token);
+    RenewalTimerTask tTask = new RenewalTimerTask(token);
     token.setTimerTask(tTask); // keep reference to the timer
 
     renewalTimer.schedule(token.timerTask, new Date(renewIn));
     LOG.info("Renew " + token + " in " + expiresIn + " ms, appId = "
-        + token.applicationId);
+        + token.referringAppIds);
   }
 
   // renew a token
@@ -535,7 +547,7 @@ public class DelegationTokenRenewer extends AbstractService {
       throw new IOException(e);
     }
     LOG.info("Renewed delegation-token= [" + dttr + "], for "
-        + dttr.applicationId);
+        + dttr.referringAppIds);
   }
 
   // Request new hdfs token if the token is about to expire, and remove the old
@@ -548,30 +560,37 @@ public class DelegationTokenRenewer extends AbstractService {
         && dttr.maxDate - dttr.expirationDate < credentialsValidTimeRemaining
         && dttr.token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
 
+      final Collection<ApplicationId> applicationIds;
+      synchronized (dttr.referringAppIds) {
+        applicationIds = new HashSet<>(dttr.referringAppIds);
+        dttr.referringAppIds.clear();
+      }
       // remove all old expiring hdfs tokens for this application.
-      Set<DelegationTokenToRenew> tokenSet = appTokens.get(dttr.applicationId);
-      if (tokenSet != null && !tokenSet.isEmpty()) {
+      for (ApplicationId appId : applicationIds) {
+        Set<DelegationTokenToRenew> tokenSet = appTokens.get(appId);
+        if (tokenSet == null || tokenSet.isEmpty()) {
+          continue;
+        }
         Iterator<DelegationTokenToRenew> iter = tokenSet.iterator();
         synchronized (tokenSet) {
           while (iter.hasNext()) {
             DelegationTokenToRenew t = iter.next();
             if (t.token.getKind().equals(new Text("HDFS_DELEGATION_TOKEN"))) {
               iter.remove();
-              if (t.timerTask != null) {
-                t.timerTask.cancel();
-              }
+              t.cancelTimer();
               LOG.info("Removed expiring token " + t);
             }
           }
         }
       }
       LOG.info("Token= (" + dttr + ") is expiring, request new token.");
-      requestNewHdfsDelegationToken(dttr.applicationId, dttr.user,
-        dttr.shouldCancelAtEnd);
+      requestNewHdfsDelegationToken(applicationIds, dttr.user,
+          dttr.shouldCancelAtEnd);
     }
   }
 
-  private void requestNewHdfsDelegationToken(ApplicationId applicationId,
+  private void requestNewHdfsDelegationToken(
+      Collection<ApplicationId> referringAppIds,
       String user, boolean shouldCancelAtEnd) throws IOException,
       InterruptedException {
     if (!hasProxyUserPrivileges) {
@@ -583,18 +602,20 @@ public class DelegationTokenRenewer extends AbstractService {
     Token<?>[] newTokens = obtainSystemTokensForUser(user, credentials);
 
     // Add new tokens to the toRenew list.
-    LOG.info("Received new tokens for " + applicationId + ". Received "
+    LOG.info("Received new tokens for " + referringAppIds + ". Received "
         + newTokens.length + " tokens.");
     if (newTokens.length > 0) {
       for (Token<?> token : newTokens) {
         if (token.isManaged()) {
           DelegationTokenToRenew tokenToRenew =
-              new DelegationTokenToRenew(applicationId, token, getConfig(),
+              new DelegationTokenToRenew(referringAppIds, token, getConfig(),
                 Time.now(), shouldCancelAtEnd, user);
           // renew the token to get the next expiration date.
           renewToken(tokenToRenew);
           setTimerForTokenRenewal(tokenToRenew);
-          appTokens.get(applicationId).add(tokenToRenew);
+          for (ApplicationId applicationId : referringAppIds) {
+            appTokens.get(applicationId).add(tokenToRenew);
+          }
           LOG.info("Received new token " + token);
         }
       }
@@ -602,7 +623,9 @@ public class DelegationTokenRenewer extends AbstractService {
     DataOutputBuffer dob = new DataOutputBuffer();
     credentials.writeTokenStorageToStream(dob);
     ByteBuffer byteBuffer = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-    rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer);
+    for (ApplicationId applicationId : referringAppIds) {
+      rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer);
+    }
   }
 
   @VisibleForTesting
@@ -644,16 +667,18 @@ public class DelegationTokenRenewer extends AbstractService {
    * removing failed DT
    */
   private void removeFailedDelegationToken(DelegationTokenToRenew t) {
-    ApplicationId applicationId = t.applicationId;
-    LOG.error("removing failed delegation token for appid=" + applicationId
-        + ";t=" + t.token.getService());
-    appTokens.get(applicationId).remove(t);
+    Collection<ApplicationId> applicationIds = t.referringAppIds;
+    synchronized (applicationIds) {
+      LOG.error("removing failed delegation token for appid=" + applicationIds
+          + ";t=" + t.token.getService());
+      for (ApplicationId applicationId : applicationIds) {
+        appTokens.get(applicationId).remove(t);
+      }
+    }
     allTokens.remove(t.token);
 
     // cancel the timer
-    if (t.timerTask != null) {
-      t.timerTask.cancel();
-    }
+    t.cancelTimer();
   }
 
   /**
@@ -706,9 +731,15 @@ public class DelegationTokenRenewer extends AbstractService {
                 + "; token=" + dttr.token.getService());
           }
 
+          // continue if the app list isn't empty
+          synchronized(dttr.referringAppIds) {
+            dttr.referringAppIds.remove(applicationId);
+            if (!dttr.referringAppIds.isEmpty()) {
+              continue;
+            }
+          }
           // cancel the timer
-          if (dttr.timerTask != null)
-            dttr.timerTask.cancel();
+          dttr.cancelTimer();
 
           // cancel the token
           cancelToken(dttr);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87bd06a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 99a506a..bc9c295 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -89,6 +89,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
+import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer.DelegationTokenToRenew;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -123,7 +124,7 @@ public class TestDelegationTokenRenewer {
       counter = 0;
       lastRenewed = null;
       tokenToRenewIn2Sec = null;
-
+      cancelled = false;
     }
 
     @Override
@@ -1046,4 +1047,88 @@ public class TestDelegationTokenRenewer {
     delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
     Assert.assertEquals(oldCounter, MyFS.getInstanceCounter());
   }
+  
+  // Test submitting an application with the token obtained by a previously
+  // submitted application that is set to be cancelled.  Token should be
+  // renewed while all apps are running, and then cancelled when all apps
+  // complete
+  @Test (timeout = 30000)
+  public void testCancelWithMultipleAppSubmissions() throws Exception{
+    MockRM rm = new TestSecurityMockRM(conf, null);
+    rm.start();
+    final MockNM nm1 =
+        new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
+    nm1.registerNode();
+
+    //MyFS fs = (MyFS)FileSystem.get(conf);
+    //MyToken token1 = fs.getDelegationToken("user123");
+
+    // create Token1:
+    Text userText1 = new Text("user");
+    DelegationTokenIdentifier dtId1 =
+        new DelegationTokenIdentifier(userText1, new Text("renewer1"),
+          userText1);
+    final Token<DelegationTokenIdentifier> token1 =
+        new Token<DelegationTokenIdentifier>(dtId1.getBytes(),
+          "password1".getBytes(), dtId1.getKind(), new Text("service1"));
+
+    Credentials credentials = new Credentials();
+    credentials.addToken(token1.getService(), token1);
+
+    DelegationTokenRenewer renewer =
+        rm.getRMContext().getDelegationTokenRenewer();
+    Assert.assertTrue(renewer.getAllTokens().isEmpty());
+    Assert.assertFalse(Renewer.cancelled);
+
+    RMApp app1 =
+        rm.submitApp(200, "name", "user", null, false, null, 2, credentials,
+          null, true, false, false, null, 0, null, true);
+    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1);
+    rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING);
+
+    DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1);
+    Assert.assertNotNull(dttr);
+    Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
+    RMApp app2 =
+        rm.submitApp(200, "name", "user", null, false, null, 2, credentials,
+          null, true, false, false, null, 0, null, true);
+    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1);
+    rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING);
+    Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
+    Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
+    Assert.assertTrue(dttr.referringAppIds.contains(app2.getApplicationId()));
+    Assert.assertFalse(Renewer.cancelled);
+
+    MockRM.finishAMAndVerifyAppState(app2, rm, nm1, am2);
+    // app2 completes, app1 is still running, check the token is not cancelled
+    Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
+    Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
+    Assert.assertFalse(dttr.referringAppIds.contains(app2.getApplicationId()));
+    Assert.assertFalse(dttr.isTimerCancelled());
+    Assert.assertFalse(Renewer.cancelled);
+
+    RMApp app3 =
+        rm.submitApp(200, "name", "user", null, false, null, 2, credentials,
+          null, true, false, false, null, 0, null, true);
+    MockAM am3 = MockRM.launchAndRegisterAM(app3, rm, nm1);
+    rm.waitForState(app3.getApplicationId(), RMAppState.RUNNING);
+    Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
+    Assert.assertTrue(dttr.referringAppIds.contains(app1.getApplicationId()));
+    Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
+    Assert.assertFalse(dttr.isTimerCancelled());
+    Assert.assertFalse(Renewer.cancelled);
+
+    MockRM.finishAMAndVerifyAppState(app1, rm, nm1, am1);
+    Assert.assertTrue(renewer.getAllTokens().containsKey(token1));
+    Assert.assertFalse(dttr.referringAppIds.contains(app1.getApplicationId()));
+    Assert.assertTrue(dttr.referringAppIds.contains(app3.getApplicationId()));
+    Assert.assertFalse(dttr.isTimerCancelled());
+    Assert.assertFalse(Renewer.cancelled);
+
+    MockRM.finishAMAndVerifyAppState(app3, rm, nm1, am3);
+    Assert.assertFalse(renewer.getAllTokens().containsKey(token1));
+    Assert.assertTrue(dttr.referringAppIds.isEmpty());
+    Assert.assertTrue(dttr.isTimerCancelled());
+    Assert.assertTrue(Renewer.cancelled);
+  }
 }


[41/47] hadoop git commit: HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. Contributed by Rohith.

Posted by zj...@apache.org.
HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it stops. Contributed by Rohith.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00fc4a05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00fc4a05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00fc4a05

Branch: refs/heads/YARN-2928
Commit: 00fc4a05202bee78a9871e131e52e78bbc01bf5e
Parents: 90f14ae
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Apr 9 10:58:12 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:55 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                 | 5 ++++-
 .../src/main/java/org/apache/hadoop/http/HttpServer2.java       | 5 ++++-
 2 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fc4a05/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 50fb4d7..397161d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1222,7 +1222,10 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
     (Xiaoyu Yao via cnauroth)
-    
+
+    HADOOP-11815. HttpServer2 should destroy SignerSecretProvider when it
+    stops. (Rohith via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00fc4a05/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index 0f1c222..6fd34d5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -140,6 +140,7 @@ public final class HttpServer2 implements FilterContainer {
   protected final List<String> filterNames = new ArrayList<>();
   static final String STATE_DESCRIPTION_ALIVE = " - alive";
   static final String STATE_DESCRIPTION_NOT_LIVE = " - not live";
+  private final SignerSecretProvider secretProvider;
 
   /**
    * Class to construct instances of HTTP server with specific options.
@@ -335,7 +336,7 @@ public final class HttpServer2 implements FilterContainer {
     this.adminsAcl = b.adminsAcl;
     this.webAppContext = createWebAppContext(b.name, b.conf, adminsAcl, appDir);
     try {
-      SignerSecretProvider secretProvider =
+      this.secretProvider =
           constructSecretProvider(b, webAppContext.getServletContext());
       this.webAppContext.getServletContext().setAttribute
           (AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE,
@@ -913,6 +914,8 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     try {
+      // explicitly destroy the secrete provider
+      secretProvider.destroy();
       // clear & stop webAppContext attributes to avoid memory leaks.
       webAppContext.clearAttributes();
       webAppContext.stop();


[07/47] hadoop git commit: HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). Contributed by Konstantin Shvachko.

Posted by zj...@apache.org.
HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/20731976
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/20731976
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/20731976

Branch: refs/heads/YARN-2928
Commit: 207319762d205d40dc5baee61382833c695021cb
Parents: 5ed6b71
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Mon Apr 6 16:52:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:56 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java      | 35 +++++++++-----------
 .../hdfs/server/namenode/TestAddBlockRetry.java | 17 +++++++++-
 3 files changed, 34 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/20731976/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 52325a2..7d20060 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -880,6 +880,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7811. Avoid recursive call getStoragePolicyID in
     INodeFile#computeQuotaUsage. (Xiaoyu Yao and jing9)
 
+    HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock().
+    (shv)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20731976/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a77c382..62d5f67 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3032,6 +3032,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       FileState fileState = analyzeFileState(
           src, fileId, clientName, previous, onRetryBlock);
       final INodeFile pendingFile = fileState.inode;
+      // Check if the penultimate block is minimally replicated
+      if (!checkFileProgress(src, pendingFile, false)) {
+        throw new NotReplicatedYetException("Not replicated yet: " + src);
+      }
       src = fileState.path;
 
       if (onRetryBlock[0] != null && onRetryBlock[0].getLocations().length > 0) {
@@ -3244,11 +3248,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
             "last block in file " + lastBlockInFile);
       }
     }
-
-    // Check if the penultimate block is minimally replicated
-    if (!checkFileProgress(src, pendingFile, false)) {
-      throw new NotReplicatedYetException("Not replicated yet: " + src);
-    }
     return new FileState(pendingFile, src, iip);
   }
 
@@ -3550,21 +3549,17 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * replicated.  If not, return false. If checkall is true, then check
    * all blocks, otherwise check only penultimate block.
    */
-  private boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
-    readLock();
-    try {
-      if (checkall) {
-        return blockManager.checkBlocksProperlyReplicated(src, v
-            .getBlocks());
-      } else {
-        // check the penultimate block of this file
-        BlockInfoContiguous b = v.getPenultimateBlock();
-        return b == null ||
-            blockManager.checkBlocksProperlyReplicated(
-                src, new BlockInfoContiguous[] { b });
-      }
-    } finally {
-      readUnlock();
+  boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
+    assert hasReadLock();
+    if (checkall) {
+      return blockManager.checkBlocksProperlyReplicated(src, v
+          .getBlocks());
+    } else {
+      // check the penultimate block of this file
+      BlockInfoContiguous b = v.getPenultimateBlock();
+      return b == null ||
+          blockManager.checkBlocksProperlyReplicated(
+              src, new BlockInfoContiguous[] { b });
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/20731976/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
index cf37a54..671f61d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
 
+import java.io.IOException;
 import java.lang.reflect.Field;
 import java.util.EnumSet;
 import java.util.HashSet;
@@ -90,7 +91,7 @@ public class TestAddBlockRetry {
   public void testRetryAddBlockWhileInChooseTarget() throws Exception {
     final String src = "/testRetryAddBlockWhileInChooseTarget";
 
-    FSNamesystem ns = cluster.getNamesystem();
+    final FSNamesystem ns = cluster.getNamesystem();
     BlockManager spyBM = spy(ns.getBlockManager());
     final NamenodeProtocols nn = cluster.getNameNodeRpc();
 
@@ -107,11 +108,15 @@ public class TestAddBlockRetry {
         LOG.info("chooseTarget for " + src);
         DatanodeStorageInfo[] ret =
             (DatanodeStorageInfo[]) invocation.callRealMethod();
+        assertTrue("Penultimate block must be complete",
+            checkFileProgress(src, false));
         count++;
         if(count == 1) { // run second addBlock()
           LOG.info("Starting second addBlock for " + src);
           nn.addBlock(src, "clientName", null, null,
               INodeId.GRANDFATHER_INODE_ID, null);
+          assertTrue("Penultimate block must be complete",
+              checkFileProgress(src, false));
           LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
           assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
           lb2 = lbs.get(0);
@@ -142,6 +147,16 @@ public class TestAddBlockRetry {
     assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
   }
 
+  boolean checkFileProgress(String src, boolean checkall) throws IOException {
+    final FSNamesystem ns = cluster.getNamesystem();
+    ns.readLock();
+    try {
+      return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
+    } finally {
+      ns.readUnlock();
+    }
+  }
+
   /*
    * Since NameNode will not persist any locations of the block, addBlock()
    * retry call after restart NN should re-select the locations and return to


[11/47] hadoop git commit: HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in platform-specific format. Contributed by Xiaoyu Yao.

Posted by zj...@apache.org.
HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in platform-specific format. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd66d1b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd66d1b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd66d1b5

Branch: refs/heads/YARN-2928
Commit: cd66d1b54d48308cee2cbd2d7bbe9c6fe56c63f9
Parents: 6f55a1b
Author: cnauroth <cn...@apache.org>
Authored: Tue Apr 7 13:33:11 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:57 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                  | 3 +++
 .../tools/offlineImageViewer/PBImageDelimitedTextWriter.java | 8 +++++---
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java     | 7 +++++--
 3 files changed, 13 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd66d1b5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3edc80e..51d84f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1388,6 +1388,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
     lock for a very long time (sinago via cmccabe)
 
+    HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
+    platform-specific format. (Xiaoyu Yao via cnauroth)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd66d1b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
index 350967d..fbe7f3a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.SimpleDateFormat;
@@ -79,8 +79,10 @@ public class PBImageDelimitedTextWriter extends PBImageTextWriter {
   @Override
   public String getEntry(String parent, INode inode) {
     StringBuffer buffer = new StringBuffer();
-    String path = new File(parent, inode.getName().toStringUtf8()).toString();
-    buffer.append(path);
+    String inodeName = inode.getName().toStringUtf8();
+    Path path = new Path(parent.isEmpty() ? "/" : parent,
+      inodeName.isEmpty() ? "/" : inodeName);
+    buffer.append(path.toString());
     PermissionStatus p = null;
 
     switch (inode.getType()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd66d1b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index d228920..d2ccc5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatPBINode;
 import org.apache.hadoop.hdfs.server.namenode.FSImageFormatProtobuf;
@@ -145,7 +146,8 @@ abstract class PBImageTextWriter implements Closeable {
           return "/";
         }
         if (this.path == null) {
-          this.path = new File(parent.getPath(), name).toString();
+          this.path = new Path(parent.getPath(), name.isEmpty() ? "/" : name).
+              toString();
           this.name = null;
         }
         return this.path;
@@ -364,7 +366,8 @@ abstract class PBImageTextWriter implements Closeable {
         }
         String parentName = toString(bytes);
         String parentPath =
-            new File(getParentPath(parent), parentName).toString();
+            new Path(getParentPath(parent),
+                parentName.isEmpty()? "/" : parentName).toString();
         dirPathCache.put(parent, parentPath);
       }
       return dirPathCache.get(parent);


[08/47] hadoop git commit: HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)

Posted by zj...@apache.org.
HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/273fef2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/273fef2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/273fef2f

Branch: refs/heads/YARN-2928
Commit: 273fef2fea54b3c179bd1cfdc99df2b9e6a03ff0
Parents: a48dec6
Author: Arpit Agarwal <ar...@apache.org>
Authored: Tue Apr 7 18:06:05 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 20:55:57 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    | 10 ++--------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 5 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/273fef2f/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index f3b6853..3ca9fae 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -28,14 +28,8 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
   $ sudo apt-get -y install maven
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev
-* ProtocolBuffer 2.5.0
-  $ wget https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
-  $ tar -zxvf protobuf-2.5.0.tar.gz
-  $ cd protobuf-2.5.0.tar.gz
-  $ ./configure
-  $ make
-  $ sudo make install
-  $ sudo ldconfig
+* ProtocolBuffer 2.5.0 (required)
+  $ sudo apt-get -y install libprotobuf-dev protobuf-compiler
 
 Optional packages:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/273fef2f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 67050e7..412bad7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -747,6 +747,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-10670. Allow AuthenticationFilters to load secret from signature
     secret files. (Kai Zheng via wheat9)
 
+    HADOOP-11801. Update BUILDING.txt for Ubuntu. (Gabor Liptak via
+    Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.


[38/47] hadoop git commit: HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9963096d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9963096d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9963096d

Branch: refs/heads/YARN-2928
Commit: 9963096d802338b35ea4a38d971e3da8a58c24c7
Parents: 9c639ab
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Apr 9 11:28:02 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Apr 9 21:21:55 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/tracing/TestTracing.java  | 125 +++++++------------
 2 files changed, 45 insertions(+), 83 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9963096d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 59cab03..4b22fa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1427,6 +1427,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-8072. Reserved RBW space is not released if client terminates while
     writing block. (Arpit Agarwal)
 
+    HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
+    via Colin P. McCabe)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9963096d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 01361b5..f6fef5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -31,7 +31,7 @@ import org.apache.htrace.Span;
 import org.apache.htrace.SpanReceiver;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
-import org.junit.AfterClass;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.BeforeClass;
@@ -56,27 +56,26 @@ public class TestTracing {
   private static SpanReceiverHost spanReceiverHost;
 
   @Test
-  public void testGetSpanReceiverHost() throws Exception {
-    Configuration c = new Configuration();
+  public void testTracing() throws Exception {
     // getting instance already loaded.
-    c.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY, "");
-    SpanReceiverHost s = SpanReceiverHost.getInstance(c);
-    Assert.assertEquals(spanReceiverHost, s);
+    Assert.assertEquals(spanReceiverHost,
+        SpanReceiverHost.getInstance(new Configuration()));
+
+    // write and read without tracing started
+    String fileName = "testTracingDisabled.dat";
+    writeTestFile(fileName);
+    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+    readTestFile(fileName);
+    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+
+    writeWithTracing();
+    readWithTracing();
   }
 
-  @Test
-  public void testWriteTraceHooks() throws Exception {
+  public void writeWithTracing() throws Exception {
     long startTime = System.currentTimeMillis();
     TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS);
-    Path file = new Path("traceWriteTest.dat");
-    FSDataOutputStream stream = dfs.create(file);
-
-    for (int i = 0; i < 10; i++) {
-      byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-      stream.write(data);
-    }
-    stream.hflush();
-    stream.close();
+    writeTestFile("testWriteTraceHooks.dat");
     long endTime = System.currentTimeMillis();
     ts.close();
 
@@ -125,55 +124,17 @@ public class TestTracing {
         Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
       }
     }
+    SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @Test
-  public void testWriteWithoutTraceHooks() throws Exception {
-    Path file = new Path("withoutTraceWriteTest.dat");
-    FSDataOutputStream stream = dfs.create(file);
-    for (int i = 0; i < 10; i++) {
-      byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
-      stream.write(data);
-    }
-    stream.hflush();
-    stream.close();
-    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
-  }
-
-  @Test
-  public void testReadTraceHooks() throws Exception {
-    String fileName = "traceReadTest.dat";
-    Path filePath = new Path(fileName);
-
-    // Create the file.
-    FSDataOutputStream ostream = dfs.create(filePath);
-    for (int i = 0; i < 50; i++) {
-      byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes();
-      ostream.write(data);
-    }
-    ostream.close();
-
-
+  public void readWithTracing() throws Exception {
+    String fileName = "testReadTraceHooks.dat";
+    writeTestFile(fileName);
     long startTime = System.currentTimeMillis();
     TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
-    FSDataInputStream istream = dfs.open(filePath, 10240);
-    ByteBuffer buf = ByteBuffer.allocate(10240);
-
-    int count = 0;
-    try {
-      while (istream.read(buf) > 0) {
-        count += 1;
-        buf.clear();
-        istream.seek(istream.getPos() + 5);
-      }
-    } catch (IOException ioe) {
-      // Ignore this it's probably a seek after eof.
-    } finally {
-      istream.close();
-    }
-    ts.getSpan().addTimelineAnnotation("count: " + count);
-    long endTime = System.currentTimeMillis();
+    readTestFile(fileName);
     ts.close();
+    long endTime = System.currentTimeMillis();
 
     String[] expectedSpanNames = {
       "testReadTraceHooks",
@@ -198,21 +159,22 @@ public class TestTracing {
     for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
       Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
     }
+    SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @Test
-  public void testReadWithoutTraceHooks() throws Exception {
-    String fileName = "withoutTraceReadTest.dat";
-    Path filePath = new Path(fileName);
-
-    // Create the file.
-    FSDataOutputStream ostream = dfs.create(filePath);
-    for (int i = 0; i < 50; i++) {
-      byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes();
-      ostream.write(data);
+  private void writeTestFile(String testFileName) throws Exception {
+    Path filePath = new Path(testFileName);
+    FSDataOutputStream stream = dfs.create(filePath);
+    for (int i = 0; i < 10; i++) {
+      byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
+      stream.write(data);
     }
-    ostream.close();
+    stream.hsync();
+    stream.close();
+  }
 
+  private void readTestFile(String testFileName) throws Exception {
+    Path filePath = new Path(testFileName);
     FSDataInputStream istream = dfs.open(filePath, 10240);
     ByteBuffer buf = ByteBuffer.allocate(10240);
 
@@ -228,32 +190,29 @@ public class TestTracing {
     } finally {
       istream.close();
     }
-    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
-  }
-
-  @Before
-  public void cleanSet() {
-    SetSpanReceiver.SetHolder.spans.clear();
   }
 
   @BeforeClass
-  public static void setupCluster() throws IOException {
+  public static void setup() throws IOException {
     conf = new Configuration();
     conf.setLong("dfs.blocksize", 100 * 1024);
     conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY,
         SetSpanReceiver.class.getName());
+    spanReceiverHost = SpanReceiverHost.getInstance(conf);
+  }
 
+  @Before
+  public void startCluster() throws IOException {
     cluster = new MiniDFSCluster.Builder(conf)
         .numDataNodes(3)
         .build();
     cluster.waitActive();
-
     dfs = cluster.getFileSystem();
-    spanReceiverHost = SpanReceiverHost.getInstance(conf);
+    SetSpanReceiver.SetHolder.spans.clear();
   }
 
-  @AfterClass
-  public static void shutDown() throws IOException {
+  @After
+  public void shutDown() throws IOException {
     cluster.shutdown();
   }