You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/27 07:34:03 UTC

[01/50] [abbrv] hadoop git commit: MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort. Contributed by Chao Zhang.

Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 db2f02389 -> ee3526587


MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort. Contributed by Chao Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86011837
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86011837
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86011837

Branch: refs/heads/YARN-2928
Commit: 860118375311153f6b0b4c3823ef8ca067731022
Parents: 78b1b38
Author: Harsh J <ha...@cloudera.com>
Authored: Mon Mar 23 03:48:36 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:42 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                          | 3 +++
 .../src/main/java/org/apache/hadoop/examples/Sort.java        | 7 ++++---
 2 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86011837/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b75d8aa..20505b6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort
+    (Chao Zhang via harsh)
+
     MAPREDUCE-5190. Unnecessary condition test in RandomSampler.
     (Jingguo Yao via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86011837/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
index a90c02b..0382c09 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/Sort.java
@@ -24,7 +24,7 @@ import java.util.*;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.Writable;
@@ -160,13 +160,14 @@ public class Sort<K,V> extends Configured implements Tool {
       System.out.println("Sampling input to effect total-order sort...");
       job.setPartitionerClass(TotalOrderPartitioner.class);
       Path inputDir = FileInputFormat.getInputPaths(job)[0];
-      inputDir = inputDir.makeQualified(inputDir.getFileSystem(conf));
+      FileSystem fs = inputDir.getFileSystem(conf);
+      inputDir = inputDir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
       Path partitionFile = new Path(inputDir, "_sortPartitioning");
       TotalOrderPartitioner.setPartitionFile(conf, partitionFile);
       InputSampler.<K,V>writePartitionFile(job, sampler);
       URI partitionUri = new URI(partitionFile.toString() +
                                  "#" + "_sortPartitioning");
-      DistributedCache.addCacheFile(partitionUri, conf);
+      job.addCacheFile(partitionUri);
     }
 
     System.out.println("Running on " +


[41/50] [abbrv] hadoop git commit: HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser default help text. Contributed by Brahma Reddy Battula.

Posted by zj...@apache.org.
HADOOP-11719.[Fsshell] Remove bin/hadoop reference from GenericOptionsParser default help text. Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a9ce4ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a9ce4ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a9ce4ac

Branch: refs/heads/YARN-2928
Commit: 0a9ce4acf40ad71cfec10f31e573e552986a22e5
Parents: 56b24ba
Author: Harsh J <ha...@cloudera.com>
Authored: Thu Mar 26 11:27:21 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 4 ++++
 .../main/java/org/apache/hadoop/util/GenericOptionsParser.java   | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a9ce4ac/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 667a010..5f43236 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -447,6 +447,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HADOOP-11719. [Fsshell] Remove bin/hadoop reference from
+    GenericOptionsParser default help text.
+    (Brahma Reddy Battula via harsh)
+
     HADOOP-11692. Improve authentication failure WARN message to avoid user
     confusion. (Yongjun Zhang)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a9ce4ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 0a46a7a..925aad6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -513,7 +513,7 @@ public class GenericOptionsParser {
                 "specify comma separated archives to be unarchived" +
                 " on the compute machines.\n");
     out.println("The general command line syntax is");
-    out.println("bin/hadoop command [genericOptions] [commandOptions]\n");
+    out.println("command [genericOptions] [commandOptions]\n");
   }
   
 }


[40/50] [abbrv] hadoop git commit: HADOOP-11724. DistCp throws NPE when the target directory is root. (Lei Eddy Xu via Yongjun Zhang)

Posted by zj...@apache.org.
HADOOP-11724. DistCp throws NPE when the target directory is root. (Lei Eddy Xu via Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aec0d12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aec0d12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aec0d12

Branch: refs/heads/YARN-2928
Commit: 8aec0d1200c82761fc36f064ffde498fde2f3a69
Parents: 19bc19e
Author: Yongjun Zhang <yz...@cloudera.com>
Authored: Wed Mar 25 15:45:45 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                   | 3 +++
 .../main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java   | 3 +++
 2 files changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aec0d12/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 46dfee4..2e26b0a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -469,6 +469,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
     jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
 
+    HADOOP-11724. DistCp throws NPE when the target directory is root.
+    (Lei Eddy Xu via Yongjun Zhang) 
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aec0d12/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index f36ef77..9ec57f4 100644
--- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -133,6 +133,9 @@ public class CopyCommitter extends FileOutputCommitter {
   private void deleteAttemptTempFiles(Path targetWorkPath,
                                       FileSystem targetFS,
                                       String jobId) throws IOException {
+    if (targetWorkPath == null) {
+      return;
+    }
 
     FileStatus[] tempFiles = targetFS.globStatus(
         new Path(targetWorkPath, ".distcp.tmp." + jobId.replaceAll("job","attempt") + "*"));


[13/50] [abbrv] hadoop git commit: HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.

Posted by zj...@apache.org.
HDFS-7956. Improve logging for DatanodeRegistration. Contributed by Plamen Jeliazkov.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ca6d0020
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ca6d0020
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ca6d0020

Branch: refs/heads/YARN-2928
Commit: ca6d00202dd2b2eaf91bbce87b16016a904cb94f
Parents: 4d360bb
Author: Plamen Jeliazkov <pl...@gmail.com>
Authored: Mon Mar 23 23:04:04 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                       | 3 +++
 .../apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java  | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca6d0020/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3dd5fb3..3ea1346 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -777,6 +777,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7917. Use file to replace data dirs in test to simulate a disk failure.
     (Lei (Eddy) Xu via cnauroth)
 
+    HDFS-7956. Improve logging for DatanodeRegistration.
+    (Plamen Jeliazkov via shv)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ca6d0020/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index e788137..7119738 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -100,7 +100,7 @@ public class DatanodeRegistration extends DatanodeID
   @Override
   public String toString() {
     return getClass().getSimpleName()
-      + "(" + getIpAddr()
+      + "(" + super.toString()
       + ", datanodeUuid=" + getDatanodeUuid()
       + ", infoPort=" + getInfoPort()
       + ", infoSecurePort=" + getInfoSecurePort()


[34/50] [abbrv] hadoop git commit: MAPREDUCE-6292. Use org.junit package instead of junit.framework in TestCombineFileInputFormat. (aajisaka)

Posted by zj...@apache.org.
MAPREDUCE-6292. Use org.junit package instead of junit.framework in TestCombineFileInputFormat. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3946d71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3946d71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3946d71

Branch: refs/heads/YARN-2928
Commit: b3946d71d0bb1dd349073f277e9ec7951e854c60
Parents: b51b366
Author: Akira Ajisaka <aa...@apache.org>
Authored: Wed Mar 25 19:00:35 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../lib/input/TestCombineFileInputFormat.java   | 36 ++++++++++++--------
 2 files changed, 25 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3946d71/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index f81a13f..9d6f1d4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -389,6 +389,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6265. Make ContainerLauncherImpl.INITIAL_POOL_SIZE configurable 
     to better control to launch/kill containers. (Zhihai Xu via ozawa)
 
+    MAPREDUCE-6292. Use org.junit package instead of junit.framework in
+    TestCombineFileInputFormat. (aajisaka)
+
   OPTIMIZATIONS
 
     MAPREDUCE-6169. MergeQueue should release reference to the current item 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3946d71/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
index db51ec6..85c675c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestCombineFileInputFormat.java
@@ -29,9 +29,6 @@ import java.util.TreeMap;
 import java.util.concurrent.TimeoutException;
 import java.util.zip.GZIPOutputStream;
 
-import org.junit.Assert;
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -60,7 +57,11 @@ import org.junit.Test;
 
 import com.google.common.collect.HashMultiset;
 
-public class TestCombineFileInputFormat extends TestCase {
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+public class TestCombineFileInputFormat {
 
   private static final String rack1[] = new String[] {
     "/r1"
@@ -221,6 +222,7 @@ public class TestCombineFileInputFormat extends TestCase {
     }
   }
 
+  @Test
   public void testRecordReaderInit() throws InterruptedException, IOException {
     // Test that we properly initialize the child recordreader when
     // CombineFileInputFormat and CombineFileRecordReader are used.
@@ -258,6 +260,7 @@ public class TestCombineFileInputFormat extends TestCase {
       rr.getCurrentKey().toString());
   }
 
+  @Test
   public void testReinit() throws Exception {
     // Test that a split containing multiple files works correctly,
     // with the child RecordReader getting its initialize() method
@@ -296,6 +299,7 @@ public class TestCombineFileInputFormat extends TestCase {
     assertFalse(rr.nextKeyValue());
   }
 
+  @Test
   public void testSplitPlacement() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
@@ -725,6 +729,7 @@ public class TestCombineFileInputFormat extends TestCase {
     DFSTestUtil.waitReplication(fileSys, name, replication);
   }
 
+  @Test
   public void testNodeDistribution() throws IOException, InterruptedException {
     DummyInputFormat inFormat = new DummyInputFormat();
     int numBlocks = 60;
@@ -774,20 +779,21 @@ public class TestCombineFileInputFormat extends TestCase {
         maxSplitSize, minSizeNode, minSizeRack, splits);
 
     int expectedSplitCount = (int) (totLength / maxSplitSize);
-    Assert.assertEquals(expectedSplitCount, splits.size());
+    assertEquals(expectedSplitCount, splits.size());
 
     // Ensure 90+% of the splits have node local blocks.
     // 100% locality may not always be achieved.
     int numLocalSplits = 0;
     for (InputSplit inputSplit : splits) {
-      Assert.assertEquals(maxSplitSize, inputSplit.getLength());
+      assertEquals(maxSplitSize, inputSplit.getLength());
       if (inputSplit.getLocations().length == 1) {
         numLocalSplits++;
       }
     }
-    Assert.assertTrue(numLocalSplits >= 0.9 * splits.size());
+    assertTrue(numLocalSplits >= 0.9 * splits.size());
   }
-  
+
+  @Test
   public void testNodeInputSplit() throws IOException, InterruptedException {
     // Regression test for MAPREDUCE-4892. There are 2 nodes with all blocks on 
     // both nodes. The grouping ensures that both nodes get splits instead of 
@@ -826,18 +832,19 @@ public class TestCombineFileInputFormat extends TestCase {
                           maxSize, minSizeNode, minSizeRack, splits);
     
     int expectedSplitCount = (int)(totLength/maxSize);
-    Assert.assertEquals(expectedSplitCount, splits.size());
+    assertEquals(expectedSplitCount, splits.size());
     HashMultiset<String> nodeSplits = HashMultiset.create();
     for(int i=0; i<expectedSplitCount; ++i) {
       InputSplit inSplit = splits.get(i);
-      Assert.assertEquals(maxSize, inSplit.getLength());
-      Assert.assertEquals(1, inSplit.getLocations().length);
+      assertEquals(maxSize, inSplit.getLength());
+      assertEquals(1, inSplit.getLocations().length);
       nodeSplits.add(inSplit.getLocations()[0]);
     }
-    Assert.assertEquals(3, nodeSplits.count(locations[0]));
-    Assert.assertEquals(3, nodeSplits.count(locations[1]));
+    assertEquals(3, nodeSplits.count(locations[0]));
+    assertEquals(3, nodeSplits.count(locations[1]));
   }
-  
+
+  @Test
   public void testSplitPlacementForCompressedFiles() throws Exception {
     MiniDFSCluster dfs = null;
     FileSystem fileSys = null;
@@ -1190,6 +1197,7 @@ public class TestCombineFileInputFormat extends TestCase {
   /**
    * Test that CFIF can handle missing blocks.
    */
+  @Test
   public void testMissingBlocks() throws Exception {
     String namenode = null;
     MiniDFSCluster dfs = null;


[09/50] [abbrv] hadoop git commit: HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value greater or equal to 1 there is mismatch in the UI report (Contributed by J.Andreina)

Posted by zj...@apache.org.
HDFS-3325. When configuring 'dfs.namenode.safemode.threshold-pct' to a value greater or equal to 1 there is mismatch in the UI report (Contributed by J.Andreina)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37ba77f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37ba77f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37ba77f9

Branch: refs/heads/YARN-2928
Commit: 37ba77f91c1c31dcfaafa6571a4aa32da708ae6c
Parents: 2c5a7b6
Author: Vinayakumar B <vi...@apache.org>
Authored: Tue Mar 24 12:12:01 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                      | 4 ++++
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java     | 2 +-
 .../src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java       | 2 +-
 .../apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java    | 2 +-
 4 files changed, 7 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37ba77f9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3ea1346..ee9a5db 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -338,6 +338,10 @@ Release 2.8.0 - UNRELEASED
     HDFS-7867. Update action param from "start" to "prepare" in rolling upgrade
     javadoc (J.Andreina via vinayakumarb)
 
+    HDFS-3325. When configuring "dfs.namenode.safemode.threshold-pct" to a value
+    greater or equal to 1 there is mismatch in the UI report
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37ba77f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 71c84b1..34b5e95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -5417,7 +5417,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         msg += String.format(
           "The reported blocks %d needs additional %d"
           + " blocks to reach the threshold %.4f of total blocks %d.%n",
-          blockSafe, (blockThreshold - blockSafe) + 1, threshold, blockTotal);
+                blockSafe, (blockThreshold - blockSafe), threshold, blockTotal);
         thresholdsMet = false;
       } else {
         msg += String.format("The reported blocks %d has reached the threshold"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37ba77f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
index 2d5bef2..80fe9ee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
@@ -198,7 +198,7 @@ public class TestSafeMode {
     
     String status = nn.getNamesystem().getSafemode();
     assertEquals("Safe mode is ON. The reported blocks 0 needs additional " +
-        "15 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE +
+        "14 blocks to reach the threshold 0.9990 of total blocks 15." + NEWLINE +
         "The number of live datanodes 0 has reached the minimum number 0. " +
         "Safe mode will be turned off automatically once the thresholds " +
         "have been reached.", status);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37ba77f9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
index c5aad9c..86f3e7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
@@ -498,7 +498,7 @@ public class TestHASafeMode {
             + nodeThresh + ". In safe mode extension. "
             + "Safe mode will be turned off automatically"));
     } else {
-      int additional = total - safe;
+      int additional = (int) (total * 0.9990) - safe;
       assertTrue("Bad safemode status: '" + status + "'",
           status.startsWith(
               "Safe mode is ON. " +


[48/50] [abbrv] hadoop git commit: HADOOP-11553. Foramlize the shell API (aw)

Posted by zj...@apache.org.
HADOOP-11553. Foramlize the shell API (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43227bc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43227bc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43227bc9

Branch: refs/heads/YARN-2928
Commit: 43227bc904c254edd47cd29b6814a278a9f963c0
Parents: b151c56
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Mar 26 15:09:51 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 dev-support/shelldocs.py                        | 250 +++++++++++
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-common/pom.xml     |  57 ++-
 .../src/main/bin/hadoop-functions.sh            | 411 ++++++++++++++++---
 .../src/site/markdown/UnixShellGuide.md         |  98 +++++
 hadoop-project/src/site/site.xml                |   4 +-
 6 files changed, 765 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/dev-support/shelldocs.py
----------------------------------------------------------------------
diff --git a/dev-support/shelldocs.py b/dev-support/shelldocs.py
new file mode 100755
index 0000000..2547450
--- /dev/null
+++ b/dev-support/shelldocs.py
@@ -0,0 +1,250 @@
+#!/usr/bin/python
+#
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+import re
+import sys
+import string
+from optparse import OptionParser
+
+def docstrip(key,string):
+  string=re.sub("^## @%s " % key ,"",string)
+  string=string.lstrip()
+  string=string.rstrip()
+  return string
+
+def toc(list):
+  tocout=[]
+  header=()
+  for i in list:
+    if header != i.getinter():
+      header=i.getinter()
+      line="  * %s\n" % (i.headerbuild())
+      tocout.append(line)
+    line="    * [%s](#%s)\n" % (i.getname().replace("_","\_"),i.getname())
+    tocout.append(line)
+  return tocout
+
+class ShellFunction:
+  def __init__(self):
+    self.reset()
+
+  def __cmp__(self,other):
+    if (self.audience == other.audience):
+      if (self.stability == other.stability):
+        if (self.replaceb == other.replaceb):
+          return(cmp(self.name,other.name))
+        else:
+          if (self.replaceb == "Yes"):
+            return -1
+          else:
+            return 1
+      else:
+          if (self.stability == "Stable"):
+            return -1
+          else:
+            return 1
+    else:
+      if (self.audience == "Public"):
+        return -1
+      else:
+        return 1
+
+  def reset(self):
+    self.name=None
+    self.audience=None
+    self.stability=None
+    self.replaceb=None
+    self.returnt=None
+    self.desc=None
+    self.params=None
+
+  def setname(self,text):
+    definition=text.split();
+    self.name=definition[1]
+
+  def getname(self):
+    if (self.name is None):
+      return "None"
+    else:
+      return self.name
+
+  def setaudience(self,text):
+    self.audience=docstrip("audience",text)
+    self.audience=self.audience.capitalize()
+
+  def getaudience(self):
+    if (self.audience is None):
+      return "None"
+    else:
+      return self.audience
+
+  def setstability(self,text):
+    self.stability=docstrip("stability",text)
+    self.stability=self.stability.capitalize()
+
+  def getstability(self):
+    if (self.stability is None):
+      return "None"
+    else:
+      return self.stability
+
+  def setreplace(self,text):
+    self.replaceb=docstrip("replaceable",text)
+    self.replaceb=self.replaceb.capitalize()
+
+  def getreplace(self):
+    if (self.replaceb is None):
+      return "None"
+    else:
+      return self.replaceb
+
+  def getinter(self):
+    return( (self.getaudience(), self.getstability(), self.getreplace()))
+
+  def addreturn(self,text):
+    if (self.returnt is None):
+      self.returnt = []
+    self.returnt.append(docstrip("return",text))
+
+  def getreturn(self):
+    if (self.returnt is None):
+      return "Nothing"
+    else:
+      return "\n\n".join(self.returnt)
+
+  def adddesc(self,text):
+    if (self.desc is None):
+      self.desc = []
+    self.desc.append(docstrip("description",text))
+
+  def getdesc(self):
+    if (self.desc is None):
+      return "None"
+    else:
+      return " ".join(self.desc)
+
+  def addparam(self,text):
+    if (self.params is None):
+      self.params = []
+    self.params.append(docstrip("param",text))
+
+  def getparams(self):
+    if (self.params is None):
+      return ""
+    else:
+      return " ".join(self.params)
+
+  def getusage(self):
+    line="%s %s" % (self.name, self.getparams())
+    return line
+
+  def headerbuild(self):
+    if self.getreplace() == "Yes":
+      replacetext="Replaceable"
+    else:
+      replacetext="Not Replaceable"
+    line="%s/%s/%s" % (self.getaudience(), self.getstability(), replacetext)
+    return(line)
+
+  def getdocpage(self):
+    line="### `%s`\n\n"\
+         "* Synopsis\n\n"\
+         "```\n%s\n"\
+         "```\n\n" \
+         "* Description\n\n" \
+         "%s\n\n" \
+         "* Returns\n\n" \
+         "%s\n\n" \
+         "| Classification | Level |\n" \
+         "| :--- | :--- |\n" \
+         "| Audience | %s |\n" \
+         "| Stability | %s |\n" \
+         "| Replaceable | %s |\n\n" \
+         % (self.getname(),
+            self.getusage(),
+            self.getdesc(),
+            self.getreturn(),
+            self.getaudience(),
+            self.getstability(),
+            self.getreplace())
+    return line
+
+  def __str__(self):
+    line="{%s %s %s %s}" \
+      % (self.getname(),
+         self.getaudience(),
+         self.getstability(),
+         self.getreplace())
+    return line
+
+def main():
+  parser=OptionParser(usage="usage: %prog --skipprnorep --output OUTFILE --input INFILE [--input INFILE ...]")
+  parser.add_option("-o","--output", dest="outfile",
+     action="store", type="string",
+     help="file to create", metavar="OUTFILE")
+  parser.add_option("-i","--input", dest="infile",
+     action="append", type="string",
+     help="file to read", metavar="INFILE")
+  parser.add_option("--skipprnorep", dest="skipprnorep",
+     action="store_true", help="Skip Private & Not Replaceable")
+
+  (options, args)=parser.parse_args()
+
+  allfuncs=[]
+  for filename in options.infile:
+    with open(filename,"r") as shellcode:
+      funcdef=ShellFunction()
+      for line in shellcode:
+        if line.startswith('## @description'):
+          funcdef.adddesc(line)
+        elif line.startswith('## @audience'):
+          funcdef.setaudience(line)
+        elif line.startswith('## @stability'):
+          funcdef.setstability(line)
+        elif line.startswith('## @replaceable'):
+          funcdef.setreplace(line)
+        elif line.startswith('## @param'):
+          funcdef.addparam(line)
+        elif line.startswith('## @return'):
+          funcdef.addreturn(line)
+        elif line.startswith('function'):
+          funcdef.setname(line)
+          if options.skipprnorep:
+            if funcdef.getaudience() == "Private" and \
+               funcdef.getreplace() == "No":
+               pass
+            else:
+              allfuncs.append(funcdef)
+          funcdef=ShellFunction()
+
+  allfuncs=sorted(allfuncs)
+
+  outfile=open(options.outfile, "w")
+  for line in toc(allfuncs):
+    outfile.write(line)
+
+  outfile.write("\n------\n\n")
+
+  header=[]
+  for funcs in allfuncs:
+    if header != funcs.getinter():
+      header=funcs.getinter()
+      line="## %s\n" % (funcs.headerbuild())
+      outfile.write(line)
+    outfile.write(funcs.getdocpage())
+  outfile.close()
+
+if __name__ == "__main__":
+  main()
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5f43236..dbe9e55 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -19,6 +19,8 @@ Trunk (Unreleased)
     HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like.
     (aajisaka)
 
+    HADOOP-11553. Foramlize the shell API (aw)
+
   NEW FEATURES
 
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via aw)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 3ae78f4..706f5b5 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -471,10 +471,10 @@
               <goal>run</goal>
             </goals>
             <configuration>
-              <tasks>
+              <target>
                 <copy file="src/main/resources/core-default.xml" todir="src/site/resources"/>
                 <copy file="src/main/xsl/configuration.xsl" todir="src/site/resources"/>
-              </tasks>
+              </target>
             </configuration>
           </execution>
         </executions>
@@ -509,6 +509,53 @@
           </excludes>
         </configuration>
       </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <executions>
+            <execution>
+                <id>shelldocs</id>
+                <phase>pre-site</phase>
+                <goals>
+                    <goal>exec</goal>
+                </goals>
+                <configuration>
+                    <executable>python</executable>
+                    <workingDirectory>src/site/markdown</workingDirectory>
+                    <arguments>
+                        <argument>${basedir}/../../dev-support/shelldocs.py</argument>
+                        <argument>--skipprnorep</argument>
+                        <argument>--output</argument>
+                        <argument>${basedir}/src/site/markdown/UnixShellAPI.md</argument>
+                        <argument>--input</argument>
+                        <argument>${basedir}/src/main/bin/hadoop-functions.sh</argument>
+                    </arguments>
+                </configuration>
+            </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <artifactId>maven-clean-plugin</artifactId>
+        <configuration>
+          <filesets>
+            <fileset>
+              <directory>src/site/markdown</directory>
+                <includes>
+                  <include>UnixShellAPI.md</include>
+                </includes>
+              <followSymlinks>false</followSymlinks>
+            </fileset>
+            <fileset>
+              <directory>src/site/resources</directory>
+                <includes>
+                  <include>configuration.xsl</include>
+                  <include>core-default.xml</include>
+                </includes>
+              <followSymlinks>false</followSymlinks>
+            </fileset>
+          </filesets>
+        </configuration>
+      </plugin>
     </plugins>
   </build>
 
@@ -550,7 +597,7 @@
                       <family>unix</family>
                       <message>native build only supported on Mac or Unix</message>
                     </requireOS>
-                  </rules>  
+                  </rules>
                   <fail>true</fail>
                 </configuration>
               </execution>
@@ -670,7 +717,7 @@
                       <family>windows</family>
                       <message>native-win build only supported on Windows</message>
                     </requireOS>
-                  </rules>  
+                  </rules>
                   <fail>true</fail>
                 </configuration>
               </execution>
@@ -786,7 +833,7 @@
                       <family>mac</family>
                       <family>unix</family>
                     </requireOS>
-                  </rules>  
+                  </rules>
                   <fail>true</fail>
                 </configuration>
               </execution>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 616e706..85f8200 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -14,13 +14,21 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+## @description  Print a message to stderr
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        string
 function hadoop_error
 {
-  # NOTE: This function is not user replaceable.
-
   echo "$*" 1>&2
 }
 
+## @description  Print a message to stderr if --debug is turned on
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        string
 function hadoop_debug
 {
   if [[ -n "${HADOOP_SHELL_SCRIPT_DEBUG}" ]]; then
@@ -28,10 +36,14 @@ function hadoop_debug
   fi
 }
 
+## @description  Replace `oldvar` with `newvar` if `oldvar` exists.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        oldvar
+## @param        newvar
 function hadoop_deprecate_envvar
 {
-  #
-  # Deprecate $1 with $2
   local oldvar=$1
   local newvar=$2
   local oldval=${!oldvar}
@@ -50,10 +62,12 @@ function hadoop_deprecate_envvar
   fi
 }
 
+## @description  Bootstraps the Hadoop shell environment
+## @audience     private
+## @stability    evolving
+## @replaceable  no
 function hadoop_bootstrap
 {
-  # NOTE: This function is not user replaceable.
-
   # the root of the Hadoop installation
   # See HADOOP-6255 for the expected directory structure layout
 
@@ -94,14 +108,14 @@ function hadoop_bootstrap
   hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
 }
 
+## @description  Locate Hadoop's configuration directory
+## @audience     private
+## @stability    evolving
+## @replaceable  no
 function hadoop_find_confdir
 {
-  # NOTE: This function is not user replaceable.
-
   local conf_dir
-  # Look for the basic hadoop configuration area.
-  #
-  #
+
   # An attempt at compatibility with some Hadoop 1.x
   # installs.
   if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
@@ -114,6 +128,11 @@ function hadoop_find_confdir
   hadoop_debug "HADOOP_CONF_DIR=${HADOOP_CONF_DIR}"
 }
 
+## @description  Validate ${HADOOP_CONF_DIR}
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @return       will exit on failure conditions
 function hadoop_verify_confdir
 {
   # Check only log4j.properties by default.
@@ -123,10 +142,12 @@ function hadoop_verify_confdir
   fi
 }
 
+## @description  Import the hadoop-env.sh settings
+## @audience     private
+## @stability    evolving
+## @replaceable  no
 function hadoop_exec_hadoopenv
 {
-  # NOTE: This function is not user replaceable.
-
   if [[ -z "${HADOOP_ENV_PROCESSED}" ]]; then
     if [[ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]]; then
       export HADOOP_ENV_PROCESSED=true
@@ -135,26 +156,35 @@ function hadoop_exec_hadoopenv
   fi
 }
 
+## @description  Import the replaced functions
+## @audience     private
+## @stability    evolving
+## @replaceable  no
 function hadoop_exec_userfuncs
 {
-  # NOTE: This function is not user replaceable.
-
   if [[ -e "${HADOOP_CONF_DIR}/hadoop-user-functions.sh" ]]; then
     . "${HADOOP_CONF_DIR}/hadoop-user-functions.sh"
   fi
 }
 
+## @description  Read the user's settings.  This provides for users to
+## @description  override and/or append hadoop-env.sh. It is not meant
+## @description  as a complete system override.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_exec_hadooprc
 {
-  # Read the user's settings.  This provides for users to override
-  # and/or append hadoop-env.sh. It is not meant as a complete system override.
-
   if [[ -f "${HOME}/.hadooprc" ]]; then
     hadoop_debug "Applying the user's .hadooprc"
     . "${HOME}/.hadooprc"
   fi
 }
 
+## @description  Import shellprofile.d content
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_import_shellprofiles
 {
   local i
@@ -180,6 +210,10 @@ function hadoop_import_shellprofiles
   done
 }
 
+## @description  Initialize the registered shell profiles
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_shellprofiles_init
 {
   local i
@@ -194,6 +228,10 @@ function hadoop_shellprofiles_init
   done
 }
 
+## @description  Apply the shell profile classpath additions
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_shellprofiles_classpath
 {
   local i
@@ -208,6 +246,10 @@ function hadoop_shellprofiles_classpath
   done
 }
 
+## @description  Apply the shell profile native library additions
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_shellprofiles_nativelib
 {
   local i
@@ -222,6 +264,10 @@ function hadoop_shellprofiles_nativelib
   done
 }
 
+## @description  Apply the shell profile final configuration
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_shellprofiles_finalize
 {
   local i
@@ -236,6 +282,11 @@ function hadoop_shellprofiles_finalize
   done
 }
 
+## @description  Initialize the Hadoop shell environment, now that
+## @description  user settings have been imported
+## @audience     private
+## @stability    evolving
+## @replaceable  no
 function hadoop_basic_init
 {
   # Some of these are also set in hadoop-env.sh.
@@ -290,10 +341,15 @@ function hadoop_basic_init
   HADOOP_SSH_PARALLEL=${HADOOP_SSH_PARALLEL:-10}
 }
 
-function hadoop_populate_slaves_file()
+## @description  Set the slave support information to the contents
+## @description  of `filename`
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        filename
+## @return       will exit if file does not exist
+function hadoop_populate_slaves_file
 {
-  # NOTE: This function is not user replaceable.
-
   local slavesfile=$1
   shift
   if [[ -f "${slavesfile}" ]]; then
@@ -308,10 +364,17 @@ function hadoop_populate_slaves_file()
   fi
 }
 
+## @description  Rotates the given `file` until `number` of
+## @description  files exist.
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        filename
+## @param        [number]
+## @return       $? will contain last mv's return value
 function hadoop_rotate_log
 {
   #
-  # log rotation (mainly used for .out files)
   # Users are likely to replace this one for something
   # that gzips or uses dates or who knows what.
   #
@@ -334,6 +397,13 @@ function hadoop_rotate_log
   fi
 }
 
+## @description  Via ssh, log into `hostname` and run `command`
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        hostname
+## @param        command
+## @param        [...]
 function hadoop_actual_ssh
 {
   # we are passing this function to xargs
@@ -345,6 +415,13 @@ function hadoop_actual_ssh
   ssh ${HADOOP_SSH_OPTS} ${slave} $"${@// /\\ }" 2>&1 | sed "s/^/$slave: /"
 }
 
+## @description  Connect to ${HADOOP_SLAVES} or ${HADOOP_SLAVE_NAMES}
+## @description  and execute command.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        command
+## @param        [...]
 function hadoop_connect_to_hosts
 {
   # shellcheck disable=SC2124
@@ -405,6 +482,11 @@ function hadoop_connect_to_hosts
   fi
 }
 
+## @description  Utility routine to handle --slaves mode
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        commandarray
 function hadoop_common_slave_mode_execute
 {
   #
@@ -431,6 +513,14 @@ function hadoop_common_slave_mode_execute
   hadoop_connect_to_hosts -- "${argv[@]}"
 }
 
+## @description  Verify that a shell command was passed a valid
+## @description  class name
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        classname
+## @return       0 = success
+## @return       1 = failure w/user message
 function hadoop_validate_classname
 {
   local class=$1
@@ -445,6 +535,14 @@ function hadoop_validate_classname
   return 0
 }
 
+## @description  Append the `appendstring` if `checkstring` is not
+## @description  present in the given `envvar`
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        envvar
+## @param        checkstring
+## @param        appendstring
 function hadoop_add_param
 {
   #
@@ -466,21 +564,30 @@ function hadoop_add_param
   fi
 }
 
+## @description  Register the given `shellprofile` to the Hadoop
+## @description  shell subsystem
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        shellprofile
 function hadoop_add_profile
 {
   # shellcheck disable=SC2086
   hadoop_add_param HADOOP_SHELL_PROFILES $1 $1
 }
 
+## @description  Add a file system object (directory, file,
+## @description  wildcard, ...) to the classpath. Optionally provide
+## @description  a hint as to where in the classpath it should go.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        object
+## @param        [before|after]
+## @return       0 = success (added or duplicate)
+## @return       1 = failure (doesn't exist or some other reason)
 function hadoop_add_classpath
 {
-  # two params:
-  # $1 = directory, file, wildcard, whatever to add
-  # $2 = before or after, which determines where in the
-  #      classpath this object should go. default is after
-  # return 0 = success (added or duplicate)
-  # return 1 = failure (doesn't exist, whatever)
-
   # However, with classpath (& JLP), we can do dedupe
   # along with some sanity checking (e.g., missing directories)
   # since we have a better idea of what is legal
@@ -517,15 +624,23 @@ function hadoop_add_classpath
   return 0
 }
 
+## @description  Add a file system object (directory, file,
+## @description  wildcard, ...) to the colonpath.  Optionally provide
+## @description  a hint as to where in the colonpath it should go.
+## @description  Prior to adding, objects are checked for duplication
+## @description  and check for existence.  Many other functions use
+## @description  this function as their base implementation
+## @description  including `hadoop_add_javalibpath` and `hadoop_add_ldlibpath`.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        envvar
+## @param        object
+## @param        [before|after]
+## @return       0 = success (added or duplicate)
+## @return       1 = failure (doesn't exist or some other reason)
 function hadoop_add_colonpath
 {
-  # two params:
-  # $1 = directory, file, wildcard, whatever to add
-  # $2 = before or after, which determines where in the
-  #      classpath this object should go
-  # return 0 = success
-  # return 1 = failure (duplicate)
-
   # this is CLASSPATH, JLP, etc but with dedupe but no
   # other checking
   if [[ -d "${2}" ]] && [[ ":${!1}:" != *":$2:"* ]]; then
@@ -548,12 +663,34 @@ function hadoop_add_colonpath
   return 1
 }
 
+## @description  Add a file system object (directory, file,
+## @description  wildcard, ...) to the Java JNI path.  Optionally
+## @description  provide a hint as to where in the Java JNI path
+## @description  it should go.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        object
+## @param        [before|after]
+## @return       0 = success (added or duplicate)
+## @return       1 = failure (doesn't exist or some other reason)
 function hadoop_add_javalibpath
 {
   # specialized function for a common use case
   hadoop_add_colonpath JAVA_LIBRARY_PATH "$1" "$2"
 }
 
+## @description  Add a file system object (directory, file,
+## @description  wildcard, ...) to the LD_LIBRARY_PATH.  Optionally
+## @description  provide a hint as to where in the LD_LIBRARY_PATH
+## @description  it should go.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        object
+## @param        [before|after]
+## @return       0 = success (added or duplicate)
+## @return       1 = failure (doesn't exist or some other reason)
 function hadoop_add_ldlibpath
 {
   # specialized function for a common use case
@@ -563,6 +700,11 @@ function hadoop_add_ldlibpath
   export LD_LIBRARY_PATH
 }
 
+## @description  Add the common/core Hadoop components to the
+## @description  environment
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_add_common_to_classpath
 {
   #
@@ -582,6 +724,11 @@ function hadoop_add_common_to_classpath
   hadoop_add_classpath "${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR}"'/*'
 }
 
+## @description  Add the user's custom classpath settings to the
+## @description  environment
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_add_to_classpath_userpath
 {
   # Add the user-specified HADOOP_CLASSPATH to the
@@ -619,13 +766,15 @@ function hadoop_add_to_classpath_userpath
   fi
 }
 
+## @description  Routine to configure any OS-specific settings.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @return       may exit on failure conditions
 function hadoop_os_tricks
 {
   local bindv6only
 
-  # Some OSes have special needs.  Here's some out of the box examples for OS X,
-  # Linux and Windows on Cygwin.
-  # Vendors, replace this with your special sauce.
   HADOOP_IS_CYGWIN=false
   case ${HADOOP_OS_TYPE} in
     Darwin)
@@ -664,6 +813,11 @@ function hadoop_os_tricks
   esac
 }
 
+## @description  Configure/verify ${JAVA_HOME}
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @return       may exit on failure conditions
 function hadoop_java_setup
 {
   # Bail if we did not detect it
@@ -685,6 +839,10 @@ function hadoop_java_setup
   fi
 }
 
+## @description  Finish Java JNI paths prior to execution
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize_libpaths
 {
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
@@ -695,6 +853,10 @@ function hadoop_finalize_libpaths
   fi
 }
 
+## @description  Finish Java heap parameters prior to execution
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize_hadoop_heap
 {
   if [[ -n "${HADOOP_HEAPSIZE_MAX}" ]]; then
@@ -720,9 +882,15 @@ function hadoop_finalize_hadoop_heap
   fi
 }
 
-# Accepts a variable name.  If running on Cygwin, sets the variable value to the
-# equivalent translated Windows path by running the cygpath utility.  If the
-# second argument is true, then the variable is treated as a path list.
+## @description  Converts the contents of the variable name
+## @description  `varnameref` into the equivalent Windows path.
+## @description  If the second parameter is true, then `varnameref`
+## @description  is treated as though it was a path list.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        varnameref
+## @param        [true]
 function hadoop_translate_cygwin_path
 {
   if [[ "${HADOOP_IS_CYGWIN}" = "true" ]]; then
@@ -736,9 +904,11 @@ function hadoop_translate_cygwin_path
   fi
 }
 
-#
-# fill in any last minute options that might not have been defined yet
-#
+## @description  Finish configuring Hadoop specific system properties
+## @description  prior to executing Java
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize_hadoop_opts
 {
   hadoop_translate_cygwin_path HADOOP_LOG_DIR
@@ -754,6 +924,10 @@ function hadoop_finalize_hadoop_opts
   hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
 }
 
+## @description  Finish Java classpath prior to execution
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize_classpath
 {
   hadoop_add_classpath "${HADOOP_CONF_DIR}" before
@@ -764,6 +938,10 @@ function hadoop_finalize_classpath
   hadoop_translate_cygwin_path CLASSPATH true
 }
 
+## @description  Finish Catalina configuration prior to execution
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize_catalina_opts
 {
 
@@ -783,9 +961,14 @@ function hadoop_finalize_catalina_opts
   hadoop_add_param CATALINA_OPTS "${prefix}.ssl.keystore.file" "-D${prefix}.ssl.keystore.file=${HADOOP_CATALINA_SSL_KEYSTORE_FILE}"
 }
 
+## @description  Finish all the remaining environment settings prior
+## @description  to executing Java.  This is a wrapper that calls
+## @description  the other `finalize` routines.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_finalize
 {
-
   hadoop_shellprofiles_finalize
 
   hadoop_finalize_classpath
@@ -801,10 +984,15 @@ function hadoop_finalize
   hadoop_translate_cygwin_path HADOOP_MAPRED_HOME
 }
 
+## @description  Print usage information and exit with the passed
+## @description  `exitcode`
+## @audience     public
+## @stability    stable
+## @replaceable  no
+## @param        exitcode
+## @return       This function will always exit.
 function hadoop_exit_with_usage
 {
-  # NOTE: This function is not user replaceable.
-
   local exitcode=$1
   if [[ -z $exitcode ]]; then
     exitcode=1
@@ -819,6 +1007,12 @@ function hadoop_exit_with_usage
   exit $exitcode
 }
 
+## @description  Verify that prerequisites have been met prior to
+## @description  excuting a privileged program.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @return       This routine may exit.
 function hadoop_verify_secure_prereq
 {
   # if you are on an OS like Illumos that has functional roles
@@ -834,6 +1028,9 @@ function hadoop_verify_secure_prereq
   fi
 }
 
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_setup_secure_service
 {
   # need a more complicated setup? replace me!
@@ -842,6 +1039,9 @@ function hadoop_setup_secure_service
   HADOOP_LOG_DIR=${HADOOP_SECURE_LOG_DIR}
 }
 
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_verify_piddir
 {
   if [[ -z "${HADOOP_PID_DIR}" ]]; then
@@ -864,6 +1064,9 @@ function hadoop_verify_piddir
   rm "${HADOOP_PID_DIR}/$$" >/dev/null 2>&1
 }
 
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
 function hadoop_verify_logdir
 {
   if [[ -z "${HADOOP_LOG_DIR}" ]]; then
@@ -886,7 +1089,14 @@ function hadoop_verify_logdir
   rm "${HADOOP_LOG_DIR}/$$" >/dev/null 2>&1
 }
 
-function hadoop_status_daemon()
+## @description  Determine the status of the daemon referenced
+## @description  by `pidfile`
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        pidfile
+## @return       (mostly) LSB 4.1.0 compatible status
+function hadoop_status_daemon
 {
   #
   # LSB 4.1.0 compatible status command (1)
@@ -919,6 +1129,14 @@ function hadoop_status_daemon()
   return 3
 }
 
+## @description  Execute the Java `class`, passing along any `options`.
+## @description  Additionally, set the Java property -Dproc_`command`.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        command
+## @param        class
+## @param        [options]
 function hadoop_java_exec
 {
   # run a java command.  this is used for
@@ -936,6 +1154,14 @@ function hadoop_java_exec
   exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 
+## @description  Start a non-privileged daemon in the foreground.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        command
+## @param        class
+## @param        pidfile
+## @param        [options]
 function hadoop_start_daemon
 {
   # this is our non-privileged daemon starter
@@ -961,10 +1187,17 @@ function hadoop_start_daemon
   exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 
+## @description  Start a non-privileged daemon in the background.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        command
+## @param        class
+## @param        pidfile
+## @param        outfile
+## @param        [options]
 function hadoop_start_daemon_wrapper
 {
-  # this is our non-privileged daemon start
-  # that fires up a daemon in the *background*
   local daemonname=$1
   local class=$2
   local pidfile=$3
@@ -1019,6 +1252,17 @@ function hadoop_start_daemon_wrapper
   return 0
 }
 
+## @description  Start a privileged daemon in the foreground.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        command
+## @param        class
+## @param        daemonpidfile
+## @param        daemonoutfile
+## @param        daemonerrfile
+## @param        wrapperpidfile
+## @param        [options]
 function hadoop_start_secure_daemon
 {
   # this is used to launch a secure daemon in the *foreground*
@@ -1075,6 +1319,18 @@ function hadoop_start_secure_daemon
     "${class}" "$@"
 }
 
+## @description  Start a privileged daemon in the background.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        command
+## @param        class
+## @param        daemonpidfile
+## @param        daemonoutfile
+## @param        wrapperpidfile
+## @param        warpperoutfile
+## @param        daemonerrfile
+## @param        [options]
 function hadoop_start_secure_daemon_wrapper
 {
   # this wraps hadoop_start_secure_daemon to take care
@@ -1155,6 +1411,13 @@ function hadoop_start_secure_daemon_wrapper
   return 0
 }
 
+## @description  Stop the non-privileged `command` daemon with that
+## @description  that is running at `pidfile`.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        command
+## @param        pidfile
 function hadoop_stop_daemon
 {
   local cmd=$1
@@ -1180,6 +1443,15 @@ function hadoop_stop_daemon
   fi
 }
 
+## @description  Stop the privileged `command` daemon with that
+## @description  that is running at `daemonpidfile` and launched with
+## @description  the wrapper at `wrapperpidfile`.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        command
+## @param        daemonpidfile
+## @param        wrapperpidfile
 function hadoop_stop_secure_daemon
 {
   local command=$1
@@ -1194,6 +1466,16 @@ function hadoop_stop_secure_daemon
   return ${ret}
 }
 
+## @description  Manage a non-privileged daemon.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        [start|stop|status|default]
+## @param        command
+## @param        class
+## @param        daemonpidfile
+## @param        daemonoutfile
+## @param        [options]
 function hadoop_daemon_handler
 {
   local daemonmode=$1
@@ -1238,6 +1520,19 @@ function hadoop_daemon_handler
   esac
 }
 
+## @description  Manage a privileged daemon.
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        [start|stop|status|default]
+## @param        command
+## @param        class
+## @param        daemonpidfile
+## @param        daemonoutfile
+## @param        wrapperpidfile
+## @param        wrapperoutfile
+## @param        wrappererrfile
+## @param        [options]
 function hadoop_secure_daemon_handler
 {
   local daemonmode=$1
@@ -1290,6 +1585,13 @@ function hadoop_secure_daemon_handler
   esac
 }
 
+## @description  Verify that ${USER} is allowed to execute the
+## @description  given subcommand.
+## @audience     public
+## @stability    stable
+## @replaceable  yes
+## @param        subcommand
+## @return       will exit on failure conditions
 function hadoop_verify_user
 {
   local command=$1
@@ -1303,6 +1605,13 @@ function hadoop_verify_user
   fi
 }
 
+## @description  Perform the 'hadoop classpath', etc subcommand with the given
+## @description  parameters
+## @audience     private
+## @stability    evolving
+## @replaceable  yes
+## @param        [parameters]
+## @return       will print & exit with no params
 function hadoop_do_classpath_subcommand
 {
   if [[ "$#" -gt 1 ]]; then

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
new file mode 100644
index 0000000..006fca5
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/UnixShellGuide.md
@@ -0,0 +1,98 @@
+<!---
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+# Unix Shell Guide
+
+Much of Hadoop's functionality is controlled via [the shell](CommandsManual.html).  There are several ways to modify the default behavior of how these commands execute.
+
+## Important End-User Environment Variables
+
+Hadoop has many environment variables that control various aspects of the software.  (See `hadoop-env.sh` and related files.)  Some of these environment variables are dedicated to helping end users manage their runtime.
+
+### `HADOOP_CLIENT_OPTS`
+
+This environment variable is used for almost all end-user operations.  It can be used to set any Java options as well as any Hadoop options via a system property definition. For example:
+
+```bash
+HADOOP_CLIENT_OPTS="-Xmx1g -Dhadoop.socks.server=localhost:4000" hadoop fs -ls /tmp
+```
+
+will increase the memory and send this command via a SOCKS proxy server.
+
+### `HADOOP_USER_CLASSPATH`
+
+The Hadoop scripts have the capability to inject more content into the classpath of the running command by setting this environment variable.  It should be a colon delimited list of directories, files, or wildcard locations.
+
+```bash
+HADOOP_USER_CLASSPATH=${HOME}/lib/myjars/*.jar hadoop classpath
+```
+
+A user can provides hints to the location of the paths via the `HADOOP_USER_CLASSPATH_FIRST` variable.  Setting this to any value will tell the system to try and push these paths near the front.
+
+### Auto-setting of Variables
+
+If a user has a common set of settings, they can be put into the `${HOME}/.hadooprc` file.  This file is always read to initialize and override any variables that the user may want to customize.  It uses bash syntax, similar to the `.bashrc` file:
+
+For example:
+
+```bash
+#
+# my custom Hadoop settings!
+#
+
+HADOOP_USER_CLASSPATH=${HOME}/hadoopjars/*
+HADOOP_USER_CLASSPATH_FIRST=yes
+HADOOP_CLIENT_OPTS="-Xmx1g"
+```
+
+The `.hadooprc` file can also be used to extend functionality and teach Hadoop new tricks.  For example, to run hadoop commands accessing the server referenced in the environment variable `${HADOOP_SERVER}`, the following in the `.hadooprc` will do just that:
+
+```bash
+
+if [[ -n ${HADOOP_SERVER} ]]; then
+  HADOOP_CONF_DIR=/etc/hadoop.${HADOOP_SERVER}
+fi
+```
+
+## Administrator Environment
+
+There are many environment variables that impact how the system operates.  By far, the most important are the series of `_OPTS` variables that control how daemons work.  These variables should contain all of the relevant settings for those daemons.
+
+More, detailed information is contained in `hadoop-env.sh` and the other env.sh files.
+
+Advanced administrators may wish to supplement or do some platform-specific fixes to the existing scripts.  In some systems, this means copying the errant script or creating a custom build with these changes.  Hadoop provides the capabilities to do function overrides so that the existing code base may be changed in place without all of that work.  Replacing functions is covered later under the Shell API documentation.
+
+## Developer and Advanced Administrator Environment
+
+### Shell Profiles
+
+Apache Hadoop allows for third parties to easily add new features through a variety of pluggable interfaces.  This includes a shell code subsystem that makes it easy to inject the necessary content into the base installation.
+
+Core to this functionality is the concept of a shell profile.  Shell profiles are shell snippets that can do things such as add jars to the classpath, configure Java system properties and more.
+
+Shell profiles may be installed in either `${HADOOP_CONF_DIR}/shellprofile.d` or `${HADOOP_PREFIX}/libexec/shellprofile.d`.  Shell profiles in the `libexec` directory are part of the base installation and cannot be overriden by the user.  Shell profiles in the configuration directory may be ignored if the end user changes the configuration directory at runtime.
+
+An example of a shell profile is in the libexec directory.
+
+## Shell API
+
+Hadoop's shell code has a [function library](./UnixShellAPI.html) that is open for administrators and developers to use to assist in their configuration and advanced feature management.  These APIs follow the standard [Hadoop Interface Classification](./InterfaceClassification.html), with one addition: Replaceable.
+
+The shell code allows for core functions to be overridden. However, not all functions can be or are safe to be replaced.  If a function is not safe to replace, it will have an attribute of Replaceable: No.  If a function is safe to replace, it will have the attribute of Replaceable: Yes.
+
+In order to replace a function, create a file called `hadoop-user-functions.sh` in the `${HADOOP_CONF_DIR}` directory.  Simply define the new, replacement function in this file and the system will pick it up automatically.  There may be as many replacement functions as needed in this file.  Examples of function replacement are in the `hadoop-user-functions.sh.examples` file.
+
+
+Functions that are marked Public and Stable are safe to use in shell profiles as-is.  Other functions may change in a minor release.
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43227bc9/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index a90c41d..2e098ef 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -68,6 +68,7 @@
       <item name="HTTP Authentication" href="hadoop-project-dist/hadoop-common/HttpAuthentication.html"/>
       <item name="Hadoop KMS" href="hadoop-kms/index.html"/>
       <item name="Tracing" href="hadoop-project-dist/hadoop-common/Tracing.html"/>
+      <item name="Unix Shell Guide" href="hadoop-project-dist/hadoop-common/UnixShellGuide.html"/>
     </menu>
     
     <menu name="HDFS" inherit="top">
@@ -159,7 +160,8 @@
 
     <menu name="Reference" inherit="top">
       <item name="Release Notes" href="hadoop-project-dist/hadoop-common/releasenotes.html"/>
-      <item name="API docs" href="api/index.html"/>
+      <item name="Java API docs" href="api/index.html"/>
+      <item name="Unix Shell API" href="hadoop-project-dist/hadoop-common/UnixShellAPI.html"/>
       <item name="Common CHANGES.txt" href="hadoop-project-dist/hadoop-common/CHANGES.txt"/>
       <item name="HDFS CHANGES.txt" href="hadoop-project-dist/hadoop-hdfs/CHANGES.txt"/>
       <item name="MapReduce CHANGES.txt" href="hadoop-project-dist/hadoop-mapreduce/CHANGES.txt"/>


[47/50] [abbrv] hadoop git commit: HDFS-7410. Support CreateFlags with append() to support hsync() for appending streams (Vinayakumar B via Colin P. McCabe)

Posted by zj...@apache.org.
HDFS-7410. Support CreateFlags with append() to support hsync() for appending streams (Vinayakumar B via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b151c566
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b151c566
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b151c566

Branch: refs/heads/YARN-2928
Commit: b151c5660d48ffad1d9491af35c7c6d76bef4bd1
Parents: 555cd96
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Thu Mar 26 13:21:09 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt      |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java   |  8 ++++----
 .../org/apache/hadoop/hdfs/DFSOutputStream.java  | 13 ++++++++-----
 .../hadoop/hdfs/server/datanode/TestHSync.java   | 19 ++++++++++++++++++-
 4 files changed, 33 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b151c566/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e16348a..dff8bd2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -806,6 +806,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7976. Update NFS user guide for mount option "sync" to minimize or
     avoid reordered writes. (brandonli)
 
+    HDFS-7410. Support CreateFlags with append() to support hsync() for
+    appending streams (Vinayakumar B via Colin P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b151c566/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 5d67eed..29bb604 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1827,10 +1827,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     try {
       LastBlockWithStatus blkWithStatus = namenode.append(src, clientName,
           new EnumSetWritable<>(flag, CreateFlag.class));
-      return DFSOutputStream.newStreamForAppend(this, src,
-          flag.contains(CreateFlag.NEW_BLOCK),
-          buffersize, progress, blkWithStatus.getLastBlock(),
-          blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(), favoredNodes);
+      return DFSOutputStream.newStreamForAppend(this, src, flag, buffersize,
+          progress, blkWithStatus.getLastBlock(),
+          blkWithStatus.getFileStatus(), dfsClientConf.createChecksum(),
+          favoredNodes);
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      FileNotFoundException.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b151c566/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index ee3e6f6..933d8e6 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -278,11 +278,14 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   /** Construct a new output stream for append. */
-  private DFSOutputStream(DFSClient dfsClient, String src, boolean toNewBlock,
-      Progressable progress, LocatedBlock lastBlock, HdfsFileStatus stat,
-      DataChecksum checksum) throws IOException {
+  private DFSOutputStream(DFSClient dfsClient, String src,
+      EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
+      HdfsFileStatus stat, DataChecksum checksum) throws IOException {
     this(dfsClient, src, progress, stat, checksum);
     initialFileSize = stat.getLen(); // length of file when opened
+    this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);
+
+    boolean toNewBlock = flags.contains(CreateFlag.NEW_BLOCK);
 
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
 
@@ -338,13 +341,13 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
-      boolean toNewBlock, int bufferSize, Progressable progress,
+      EnumSet<CreateFlag> flags, int bufferSize, Progressable progress,
       LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
       String[] favoredNodes) throws IOException {
     TraceScope scope =
         dfsClient.getPathTraceScope("newStreamForAppend", src);
     try {
-      final DFSOutputStream out = new DFSOutputStream(dfsClient, src, toNewBlock,
+      final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
           progress, lastBlock, stat, checksum);
       if (favoredNodes != null && favoredNodes.length != 0) {
         out.streamer.setFavoredNodes(favoredNodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b151c566/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
index b293075..10f371b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestHSync.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 
+import java.io.IOException;
 import java.util.EnumSet;
 import java.util.Random;
 
@@ -30,6 +31,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.io.RandomDatum;
@@ -51,15 +53,30 @@ public class TestHSync {
   /** Test basic hsync cases */
   @Test
   public void testHSync() throws Exception {
+    testHSyncOperation(false);
+  }
+
+  @Test
+  public void testHSyncWithAppend() throws Exception {
+    testHSyncOperation(true);
+  }
+
+  private void testHSyncOperation(boolean testWithAppend) throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
-    final FileSystem fs = cluster.getFileSystem();
+    final DistributedFileSystem fs = cluster.getFileSystem();
 
     final Path p = new Path("/testHSync/foo");
     final int len = 1 << 16;
     FSDataOutputStream out = fs.create(p, FsPermission.getDefault(),
         EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK),
         4096, (short) 1, len, null);
+    if (testWithAppend) {
+      // re-open the file with append call
+      out.close();
+      out = fs.append(p, EnumSet.of(CreateFlag.APPEND, CreateFlag.SYNC_BLOCK),
+          4096, null);
+    }
     out.hflush();
     // hflush does not sync
     checkSyncMetric(cluster, 0);


[21/50] [abbrv] hadoop git commit: HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.

Posted by zj...@apache.org.
HADOOP-11609. Correct credential commands info in CommandsManual.html#credential. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8507bec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8507bec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8507bec

Branch: refs/heads/YARN-2928
Commit: a8507bece7350d678e2a5fdca77d2a419905d511
Parents: 37ba77f
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 24 20:57:39 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 .../java/org/apache/hadoop/security/alias/CredentialShell.java   | 2 +-
 .../hadoop-common/src/site/markdown/CommandsManual.md            | 4 ++--
 3 files changed, 6 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8507bec/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 430015d..4f0cf97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1136,6 +1136,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11729. Fix link to cgroups doc in site.xml. (Masatake Iwasaki via
     ozawa)
 
+    HADOOP-11609. Correct credential commands info in
+    CommandsManual.html#credential. (Varun Saxena via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8507bec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
index e8a721f..265ed16 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
@@ -81,7 +81,7 @@ public class CredentialShell extends Configured implements Tool {
    * <pre>
    * % hadoop credential create alias [-provider providerPath]
    * % hadoop credential list [-provider providerPath]
-   * % hadoop credential delete alias [-provider providerPath] [-i]
+   * % hadoop credential delete alias [-provider providerPath] [-f]
    * </pre>
    * @param args
    * @return 0 if the argument(s) were recognized, 1 otherwise

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8507bec/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
index 33986ae..207160e 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
@@ -128,8 +128,8 @@ Usage: `hadoop credential <subcommand> [options]`
 
 | COMMAND\_OPTION | Description |
 |:---- |:---- |
-| create *alias* [-v *value*][-provider *provider-path*] | Prompts the user for a credential to be stored as the given alias when a value is not provided via `-v`. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
-| delete *alias* [-i][-provider *provider-path*] | Deletes the credential with the provided alias and optionally warns the user when `--interactive` is used. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
+| create *alias* [-provider *provider-path*] | Prompts the user for a credential to be stored as the given alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
+| delete *alias* [-provider *provider-path*] [-f] | Deletes the credential with the provided alias. The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. The command asks for confirmation unless `-f` is specified |
 | list [-provider *provider-path*] | Lists all of the credential aliases The *hadoop.security.credential.provider.path* within the core-site.xml file will be used unless a `-provider` is indicated. |
 
 Command to manage credentials, passwords and secrets within credential providers.


[35/50] [abbrv] hadoop git commit: YARN-2213. Change proxy-user cookie log in AmIpFilter to DEBUG. Contributed by Varun Saxena

Posted by zj...@apache.org.
YARN-2213. Change proxy-user cookie log in AmIpFilter to DEBUG.
Contributed by Varun Saxena


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28b129a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28b129a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28b129a5

Branch: refs/heads/YARN-2928
Commit: 28b129a5450613328ee4fe85a5ae371297edf7c6
Parents: b3946d7
Author: Xuan <xg...@apache.org>
Authored: Wed Mar 25 04:49:43 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                | 3 +++
 .../hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java       | 6 ++++--
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28b129a5/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e8c36a4..d84948d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -887,6 +887,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3393. Getting application(s) goes wrong when app finishes before
     starting the attempt. (Zhijie Shen via xgong)
 
+    YARN-2213. Change proxy-user cookie log in AmIpFilter to DEBUG.
+    (Varun Saxena via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28b129a5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index f1a8be6..e7617f0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -152,8 +152,10 @@ public class AmIpFilter implements Filter {
       }
     }
     if (user == null) {
-      LOG.warn("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
-               + " cookie, so user will not be set");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Could not find " + WebAppProxyServlet.PROXY_USER_COOKIE_NAME
+                 + " cookie, so user will not be set");
+      }
       chain.doFilter(req, resp);
     } else {
       final AmIpPrincipal principal = new AmIpPrincipal(user);


[24/50] [abbrv] hadoop git commit: HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li

Posted by zj...@apache.org.
HDFS-7977. NFS couldn't take percentile intervals. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41c4dab7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41c4dab7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41c4dab7

Branch: refs/heads/YARN-2928
Commit: 41c4dab794b1da8c233cf0c90d959a13de48de13
Parents: bb39451
Author: Brandon Li <br...@apache.org>
Authored: Tue Mar 24 10:49:16 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java |  1 -
 .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java   | 13 ++++++-------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt            |  2 ++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md    | 12 ++++++++++++
 4 files changed, 20 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41c4dab7/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
index 09ee579..05cc0b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/conf/NfsConfigKeys.java
@@ -72,7 +72,6 @@ public class NfsConfigKeys {
   public static final String NFS_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + NFS_HTTPS_PORT_DEFAULT;
   
   public static final String  NFS_METRICS_PERCENTILES_INTERVALS_KEY = "nfs.metrics.percentiles.intervals";
-  public static final String  NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT = "";
   
   /*
    * HDFS super-user is the user with the same identity as NameNode process

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41c4dab7/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
index d36ea73..880a8a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Metrics.java
@@ -90,9 +90,9 @@ public class Nfs3Metrics {
       readNanosQuantiles[i] = registry.newQuantiles("readProcessNanos"
           + interval + "s", "Read process in ns", "ops", "latency", interval);
       writeNanosQuantiles[i] = registry.newQuantiles("writeProcessNanos"
-          + interval + "s", " process in ns", "ops", "latency", interval);
+          + interval + "s", "Write process in ns", "ops", "latency", interval);
       commitNanosQuantiles[i] = registry.newQuantiles("commitProcessNanos"
-          + interval + "s", "Read process in ns", "ops", "latency", interval);
+          + interval + "s", "Commit process in ns", "ops", "latency", interval);
     }
   }
 
@@ -101,10 +101,9 @@ public class Nfs3Metrics {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics jm = JvmMetrics.create(gatewayName, sessionId, ms);
 
-    // Percentile measurement is [,,,] by default 
-    int[] intervals = conf.getInts(conf.get(
-        NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY,
-        NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_DEFAULT));
+    // Percentile measurement is [50th,75th,90th,95th,99th] currently 
+    int[] intervals = conf
+        .getInts(NfsConfigKeys.NFS_METRICS_PERCENTILES_INTERVALS_KEY);
     return ms.register(new Nfs3Metrics(gatewayName, sessionId, intervals, jm));
   }
   
@@ -217,4 +216,4 @@ public class Nfs3Metrics {
     }
   }
 
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41c4dab7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3725a03..5dae029 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1260,6 +1260,8 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang)
 
+    HDFS-7977. NFS couldn't take percentile intervals (brandonli)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41c4dab7/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index 9c95287..a49d168 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -170,6 +170,18 @@ It's strongly recommended for the users to update a few configuration properties
           <value>the_name_of_hdfs_superuser</value>
         </property>
 
+*   Metrics. Like other HDFS daemons, the gateway exposes runtime metrics. It is available at `http://gateway-ip:50079/jmx` as a JSON document.
+    The NFS handler related metrics is exposed under the name "Nfs3Metrics". The latency histograms can be enabled by adding the following
+    property to hdfs-site.xml file.
+
+        <property>
+          <name>nfs.metrics.percentiles.intervals</name>
+          <value>100</value>
+          <description>Enable the latency histograms for read, write and
+             commit requests. The time unit is 100 seconds in this example.
+          </description>
+        </property>
+
 *   JVM and log settings. You can export JVM settings (e.g., heap size and GC log) in
     HADOOP\_NFS3\_OPTS. More NFS related settings can be found in hadoop-env.sh.
     To get NFS debug trace, you can edit the log4j.property file


[15/50] [abbrv] hadoop git commit: HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li

Posted by zj...@apache.org.
HDFS-7976. Update NFS user guide for mount option 'sync' to minimize or avoid reordered writes. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1141b15b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1141b15b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1141b15b

Branch: refs/heads/YARN-2928
Commit: 1141b15baf0e97724abd37127f9c41540c1716ab
Parents: 7eaa30c
Author: Brandon Li <br...@apache.org>
Authored: Tue Mar 24 10:28:38 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt               |  3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md       | 10 +++++++---
 2 files changed, 10 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1141b15b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 70be18a..4f3937a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -784,6 +784,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7956. Improve logging for DatanodeRegistration.
     (Plamen Jeliazkov via shv)
 
+    HDFS-7976. Update NFS user guide for mount option "sync" to minimize or
+    avoid reordered writes. (brandonli)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1141b15b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index b7e1733..9c95287 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -126,7 +126,8 @@ It's strongly recommended for the users to update a few configuration properties
         </property>
 
 *   Users are expected to update the file dump directory. NFS client often
-    reorders writes. Sequential writes can arrive at the NFS gateway at random
+    reorders writes, especially when the export is not mounted with "sync" option.
+    Sequential writes can arrive at the NFS gateway at random
     order. This directory is used to temporarily save out-of-order writes
     before writing to HDFS. For each file, the out-of-order writes are dumped after
     they are accumulated to exceed certain threshold (e.g., 1MB) in memory.
@@ -254,13 +255,16 @@ Verify validity of NFS related services
 Mount the export "/"
 --------------------
 
-Currently NFS v3 only uses TCP as the transportation protocol. NLM is not supported so mount option "nolock" is needed. It's recommended to use hard mount. This is because, even after the client sends all data to NFS gateway, it may take NFS gateway some extra time to transfer data to HDFS when writes were reorderd by NFS client Kernel.
+Currently NFS v3 only uses TCP as the transportation protocol. NLM is not supported so mount option "nolock" is needed. 
+Mount option "sync" is strongly recommended since it can minimize or avoid reordered writes, which results in more predictable throughput.
+ Not specifying the sync option may cause unreliable behavior when uploading large files.
+ It's recommended to use hard mount. This is because, even after the client sends all data to NFS gateway, it may take NFS gateway some extra time to transfer data to HDFS when writes were reorderd by NFS client Kernel.
 
 If soft mount has to be used, the user should give it a relatively long timeout (at least no less than the default timeout on the host) .
 
 The users can mount the HDFS namespace as shown below:
 
-     [root]>mount -t nfs -o vers=3,proto=tcp,nolock,noacl $server:/  $mount_point
+     [root]>mount -t nfs -o vers=3,proto=tcp,nolock,noacl,sync $server:/  $mount_point
 
 Then the users can access HDFS as part of the local file system except that, hard link and random write are not supported yet. To optimize the performance of large file I/O, one can increase the NFS transfer size(rsize and wsize) during mount. By default, NFS gateway supports 1MB as the maximum transfer size. For larger data transfer size, one needs to update "nfs.rtmax" and "nfs.rtmax" in hdfs-site.xml.
 


[23/50] [abbrv] hadoop git commit: HDFS-7985. WebHDFS should be always enabled. Contributed by Li Lu.

Posted by zj...@apache.org.
HDFS-7985. WebHDFS should be always enabled. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6bcb7ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6bcb7ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6bcb7ea

Branch: refs/heads/YARN-2928
Commit: e6bcb7eafd53882c6e4053aa49e8ba8460464e19
Parents: 6ec1a4a
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Mar 24 21:55:56 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 -
 .../server/namenode/NameNodeHttpServer.java     | 46 ++++++++++----------
 .../hadoop/hdfs/web/WebHdfsFileSystem.java      |  7 ---
 .../src/main/native/libhdfs/native_mini_dfs.c   | 16 -------
 .../org/apache/hadoop/fs/TestSymlinkHdfs.java   |  1 -
 .../hadoop/hdfs/TestDistributedFileSystem.java  |  1 -
 .../java/org/apache/hadoop/hdfs/TestQuota.java  |  4 +-
 .../hdfs/security/TestDelegationToken.java      |  1 -
 .../TestDelegationTokenForProxyUser.java        |  1 -
 .../hdfs/server/namenode/TestAuditLogs.java     |  1 -
 .../TestNameNodeRespectsBindHostKeys.java       |  1 -
 .../hdfs/web/TestFSMainOperationsWebHdfs.java   |  1 -
 .../hadoop/hdfs/web/TestHttpsFileSystem.java    |  1 -
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 12 -----
 .../hdfs/web/TestWebHdfsFileSystemContract.java |  1 -
 .../hadoop/hdfs/web/TestWebHdfsTokens.java      |  1 -
 .../web/TestWebHdfsWithMultipleNameNodes.java   |  2 -
 .../apache/hadoop/hdfs/web/WebHdfsTestUtil.java |  1 -
 19 files changed, 25 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4bed2ab..8d7a4e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -18,6 +18,8 @@ Trunk (Unreleased)
     option since it may incorrectly finalize an ongoing rolling upgrade.
     (Kai Sasaki via szetszwo)
 
+    HDFS-7985. WebHDFS should be always enabled. (Li Lu via wheat9)
+
   NEW FEATURES
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5bbe5f..d714276 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -227,8 +227,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT = 4;
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY = "dfs.web.authentication.filter";
   public static final String  DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT = AuthFilter.class.getName();
-  public static final String  DFS_WEBHDFS_ENABLED_KEY = "dfs.webhdfs.enabled";
-  public static final boolean DFS_WEBHDFS_ENABLED_DEFAULT = true;
   public static final String  DFS_WEBHDFS_USER_PATTERN_KEY = "dfs.webhdfs.user.provider.user.pattern";
   public static final String  DFS_WEBHDFS_USER_PATTERN_DEFAULT = "^[A-Za-z_][A-Za-z0-9._-]*[$]?$";
   public static final String  DFS_PERMISSIONS_ENABLED_KEY = "dfs.permissions.enabled";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 662c0e9..a671d21 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -67,30 +67,28 @@ public class NameNodeHttpServer {
   }
 
   private void initWebHdfs(Configuration conf) throws IOException {
-    if (WebHdfsFileSystem.isEnabled(conf, HttpServer2.LOG)) {
-      // set user pattern based on configuration file
-      UserParam.setUserPattern(conf.get(
-          DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
-          DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
-
-      // add authentication filter for webhdfs
-      final String className = conf.get(
-          DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
-          DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
-      final String name = className;
-
-      final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
-      Map<String, String> params = getAuthFilterParams(conf);
-      HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
-          params, new String[] { pathSpec });
-      HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
-          + ")");
-
-      // add webhdfs packages
-      httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
-          .getPackage().getName() + ";" + Param.class.getPackage().getName(),
-          pathSpec);
-    }
+    // set user pattern based on configuration file
+    UserParam.setUserPattern(conf.get(
+        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
+        DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT));
+
+    // add authentication filter for webhdfs
+    final String className = conf.get(
+        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_KEY,
+        DFSConfigKeys.DFS_WEBHDFS_AUTHENTICATION_FILTER_DEFAULT);
+    final String name = className;
+
+    final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+    Map<String, String> params = getAuthFilterParams(conf);
+    HttpServer2.defineFilter(httpServer.getWebAppContext(), name, className,
+        params, new String[] { pathSpec });
+    HttpServer2.LOG.info("Added filter '" + name + "' (class=" + className
+        + ")");
+
+    // add webhdfs packages
+    httpServer.addJerseyResourcePackage(NamenodeWebHdfsMethods.class
+        .getPackage().getName() + ";" + Param.class.getPackage().getName(),
+        pathSpec);
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 739e701..12adb05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -211,13 +211,6 @@ public class WebHdfsFileSystem extends FileSystem
     return super.getCanonicalUri();
   }
 
-  /** Is WebHDFS enabled in conf? */
-  public static boolean isEnabled(final Configuration conf, final Log log) {
-    final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
-        DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
-    return b;
-  }
-
   TokenSelector<DelegationTokenIdentifier> tokenSelector =
       new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(getTokenKind()){};
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
index b37ebcc..ab6abda 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/native_mini_dfs.c
@@ -128,22 +128,6 @@ struct NativeMiniDfsCluster* nmdCreate(struct NativeMiniDfsConf *conf)
             "nmdCreate: new Configuration");
         goto error;
     }
-    if (conf->webhdfsEnabled) {
-        jthr = newJavaStr(env, DFS_WEBHDFS_ENABLED_KEY, &jconfStr);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                  "nmdCreate: new String");
-            goto error;
-        }
-        jthr = invokeMethod(env, NULL, INSTANCE, cobj, HADOOP_CONF,
-                            "setBoolean", "(Ljava/lang/String;Z)V",
-                            jconfStr, conf->webhdfsEnabled);
-        if (jthr) {
-            printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
-                                  "nmdCreate: Configuration::setBoolean");
-            goto error;
-        }
-    }
     if (jthr) {
         printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
                               "nmdCreate: Configuration::setBoolean");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
index 0c3abec..2ff7050 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java
@@ -85,7 +85,6 @@ abstract public class TestSymlinkHdfs extends SymlinkBaseTest {
   @BeforeClass
   public static void beforeClassSetup() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0);
     cluster = new MiniDFSCluster.Builder(conf).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index da81d2f..5be492f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -503,7 +503,6 @@ public class TestDistributedFileSystem {
     RAN.setSeed(seed);
 
     final Configuration conf = getTestConfiguration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
 
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem hdfs = cluster.getFileSystem();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index d108d59..163378c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -851,8 +851,7 @@ public class TestQuota {
     Configuration conf = new HdfsConfiguration();
     final int BLOCK_SIZE = 6 * 1024;
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    MiniDFSCluster cluster = 
+    MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
@@ -913,7 +912,6 @@ public class TestQuota {
     Configuration conf = new HdfsConfiguration();
     final int BLOCK_SIZE = 6 * 1024;
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     // Make it relinquish locks. When run serially, the result should
     // be identical.
     conf.setInt(DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY, 2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
index 79b02e1..dff8fa4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
@@ -67,7 +67,6 @@ public class TestDelegationToken {
   @Before
   public void setUp() throws Exception {
     config = new HdfsConfiguration();
-    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
     config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
index e6493a2..3bbd6f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
@@ -97,7 +97,6 @@ public class TestDelegationTokenForProxyUser {
   @BeforeClass
   public static void setUp() throws Exception {
     config = new HdfsConfiguration();
-    config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     config.setLong(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
     config.setLong(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
index 98297ca..7d06241 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
@@ -114,7 +114,6 @@ public class TestAuditLogs {
     final long precision = 1L;
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
     util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
         setNumFiles(20).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
index 571d719..55926cc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
@@ -193,7 +193,6 @@ public class TestNameNodeRespectsBindHostKeys {
 
   private static void setupSsl() throws Exception {
     Configuration conf = new Configuration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
index 80369fd..20b25f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java
@@ -71,7 +71,6 @@ public class TestFSMainOperationsWebHdfs extends FSMainOperationsBaseTest {
   @BeforeClass
   public static void setupCluster() {
     final Configuration conf = new Configuration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
index 7612de3..3405c68 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
@@ -52,7 +52,6 @@ public class TestHttpsFileSystem {
   @BeforeClass
   public static void setUp() throws Exception {
     conf = new Configuration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
index b308607..2d8892c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
@@ -332,18 +332,6 @@ public class TestWebHDFS {
       }
     }
   }
-  
-  /**
-   * WebHdfs should be enabled by default after HDFS-5532
-   * 
-   * @throws Exception
-   */
-  @Test
-  public void testWebHdfsEnabledByDefault() throws Exception {
-    Configuration conf = new HdfsConfiguration();
-    Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
-        false));
-  }
 
   /**
    * Test snapshot creation through WebHdfs

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index 027fda0..b2250fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -60,7 +60,6 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
   private UserGroupInformation ugi;
 
   static {
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       cluster.waitActive();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
index d55f2b1..db08325 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTokens.java
@@ -205,7 +205,6 @@ public class TestWebHdfsTokens {
       String keystoresDir;
       String sslConfDir;
 	    
-      clusterConf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
       clusterConf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
       clusterConf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
       clusterConf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
index 11abd2c..aeda32c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java
@@ -71,8 +71,6 @@ public class TestWebHdfsWithMultipleNameNodes {
       throws Exception {
     LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);
 
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
         .numDataNodes(nDataNodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6bcb7ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
index 369285d..70f9735 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java
@@ -42,7 +42,6 @@ public class WebHdfsTestUtil {
 
   public static Configuration createConf() {
     final Configuration conf = new Configuration();
-    conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     return conf;
   }
 


[20/50] [abbrv] hadoop git commit: Fix CHANGES.txt for HADOOP-11602.

Posted by zj...@apache.org.
Fix CHANGES.txt for HADOOP-11602.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f070575a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f070575a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f070575a

Branch: refs/heads/YARN-2928
Commit: f070575a0f83e50c78bf85b692c718c9f1f170ea
Parents: a8507be
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 24 21:06:26 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f070575a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 4f0cf97..cdb88d2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -415,8 +415,6 @@ Trunk (Unreleased)
     HADOOP-10774. Update KerberosTestUtils for hadoop-auth tests when using
     IBM Java (sangamesh via aw)
 
-    HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
-
     HADOOP-11653. shellprofiles should require .sh extension
     (Brahma Reddy Battula via aw)
 
@@ -1105,6 +1103,8 @@ Release 2.7.0 - UNRELEASED
 
     HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
+    HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
+
     HADOOP-11686. MiniKDC cannot change ORG_NAME or ORG_DOMAIN.
     (Duo Zhang via wheat9)
 


[03/50] [abbrv] hadoop git commit: YARN-3384. TestLogAggregationService.verifyContainerLogs fails after YARN-2777. Contributed by Naganarasimha G R.

Posted by zj...@apache.org.
YARN-3384. TestLogAggregationService.verifyContainerLogs fails after YARN-2777. Contributed by Naganarasimha G R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ecc7dca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ecc7dca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ecc7dca

Branch: refs/heads/YARN-2928
Commit: 3ecc7dcac7c39c3ca2dd105171bfe50a6f1e30eb
Parents: c10da18
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Tue Mar 24 00:25:30 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                  | 3 +++
 .../logaggregation/TestLogAggregationService.java                | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecc7dca/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6e21e34..acdc09c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -866,6 +866,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3369. Missing NullPointer check in AppSchedulingInfo causes RM to die.
     (Brahma Reddy Battula via wangda)
 
+    YARN-3384. TestLogAggregationService.verifyContainerLogs fails after
+    YARN-2777. (Naganarasimha G R via ozawa)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ecc7dca/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
index 9cbf153..b1de9cb 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
@@ -804,7 +804,9 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
         Map<String, String> thisContainerMap = logMap.remove(containerStr);
         Assert.assertEquals(numOfContainerLogs, thisContainerMap.size());
         for (String fileType : logFiles) {
-          String expectedValue = containerStr + " Hello " + fileType + "!";
+          String expectedValue =
+              containerStr + " Hello " + fileType + "!End of LogType:"
+                  + fileType;
           LOG.info("Expected log-content : " + new String(expectedValue));
           String foundValue = thisContainerMap.remove(fileType);
           Assert.assertNotNull(cId + " " + fileType


[22/50] [abbrv] hadoop git commit: HADOOP-11741. Add LOG.isDebugEnabled() guard for some LOG.debug(). Contributed by Walter Su.

Posted by zj...@apache.org.
HADOOP-11741. Add LOG.isDebugEnabled() guard for some LOG.debug(). Contributed by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86682fb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86682fb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86682fb8

Branch: refs/heads/YARN-2928
Commit: 86682fb89f15ed7006aca9585589fbf266a3e399
Parents: e6bcb7e
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 25 16:36:10 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +
 .../org/apache/hadoop/conf/Configuration.java   |  4 +-
 .../hadoop/crypto/key/JavaKeyStoreProvider.java | 14 +++--
 .../apache/hadoop/ha/ActiveStandbyElector.java  | 65 +++++++++++++-------
 .../main/java/org/apache/hadoop/ipc/RPC.java    |  9 ++-
 .../main/java/org/apache/hadoop/ipc/Server.java |  8 ++-
 .../metrics/ganglia/GangliaContext31.java       |  6 +-
 .../security/ssl/FileBasedKeyStoresFactory.java | 22 +++++--
 8 files changed, 90 insertions(+), 41 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cdb88d2..e3cadf5 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -459,6 +459,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11737. mockito's version in hadoop-nfs’ pom.xml shouldn't be
     specified. (Kengo Seki via ozawa)
 
+    HADOOP-11741. Add LOG.isDebugEnabled() guard for some LOG.debug().
+    (Walter Su via ozawa)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 753f515..8a312ff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -2479,7 +2479,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private Document parse(DocumentBuilder builder, URL url)
       throws IOException, SAXException {
     if (!quietmode) {
-      LOG.debug("parsing URL " + url);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("parsing URL " + url);
+      }
     }
     if (url == null) {
       return null;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
index c0d510d..091cab5 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
@@ -214,9 +214,11 @@ public class JavaKeyStoreProvider extends KeyProvider {
         renameOrFail(path, new Path(path.toString() + "_CORRUPTED_"
             + System.currentTimeMillis()));
         renameOrFail(backupPath, path);
-        LOG.debug(String.format(
-            "KeyStore loaded successfully from '%s' since '%s'"
-                + "was corrupted !!", backupPath, path));
+        if (LOG.isDebugEnabled()) {
+          LOG.debug(String.format(
+              "KeyStore loaded successfully from '%s' since '%s'"
+                  + "was corrupted !!", backupPath, path));
+        }
       } else {
         throw ioe;
       }
@@ -265,8 +267,10 @@ public class JavaKeyStoreProvider extends KeyProvider {
     try {
       perm = loadFromPath(pathToLoad, password);
       renameOrFail(pathToLoad, path);
-      LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
-          pathToLoad));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(String.format("KeyStore loaded successfully from '%s'!!",
+            pathToLoad));
+      }
       if (fs.exists(pathToDelete)) {
         fs.delete(pathToDelete, true);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
index 947baa9..e520a16 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
@@ -256,7 +256,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     appData = new byte[data.length];
     System.arraycopy(data, 0, appData, 0, data.length);
 
-    LOG.debug("Attempting active election for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Attempting active election for " + this);
+    }
     joinElectionInternal();
   }
   
@@ -406,9 +408,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   public synchronized void processResult(int rc, String path, Object ctx,
       String name) {
     if (isStaleClient(ctx)) return;
-    LOG.debug("CreateNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState +
-        "  for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("CreateNode result: " + rc + " for path: " + path
+          + " connectionState: " + zkConnectionState +
+          "  for " + this);
+    }
 
     Code code = Code.get(rc);
     if (isSuccess(code)) {
@@ -467,10 +471,11 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     
     assert wantToBeInElection :
         "Got a StatNode result after quitting election";
-    
-    LOG.debug("StatNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState + " for " + this);
-        
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("StatNode result: " + rc + " for path: " + path
+          + " connectionState: " + zkConnectionState + " for " + this);
+    }
 
     Code code = Code.get(rc);
     if (isSuccess(code)) {
@@ -535,10 +540,12 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
     Event.EventType eventType = event.getType();
     if (isStaleClient(zk)) return;
-    LOG.debug("Watcher event type: " + eventType + " with state:"
-        + event.getState() + " for path:" + event.getPath()
-        + " connectionState: " + zkConnectionState
-        + " for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Watcher event type: " + eventType + " with state:"
+          + event.getState() + " for path:" + event.getPath()
+          + " connectionState: " + zkConnectionState
+          + " for " + this);
+    }
 
     if (eventType == Event.EventType.None) {
       // the connection state has changed
@@ -597,7 +604,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         monitorActiveStatus();
         break;
       default:
-        LOG.debug("Unexpected node event: " + eventType + " for path: " + path);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Unexpected node event: " + eventType + " for path: " + path);
+        }
         monitorActiveStatus();
       }
 
@@ -646,7 +655,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void monitorActiveStatus() {
     assert wantToBeInElection;
-    LOG.debug("Monitoring active leader for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Monitoring active leader for " + this);
+    }
     statRetryCount = 0;
     monitorLockNodeAsync();
   }
@@ -737,7 +748,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     int connectionRetryCount = 0;
     boolean success = false;
     while(!success && connectionRetryCount < maxRetryNum) {
-      LOG.debug("Establishing zookeeper connection for " + this);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Establishing zookeeper connection for " + this);
+      }
       try {
         createConnection();
         success = true;
@@ -765,7 +778,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       watcher = null;
     }
     zkClient = getNewZooKeeper();
-    LOG.debug("Created new connection for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Created new connection for " + this);
+    }
   }
 
   @InterfaceAudience.Private
@@ -773,7 +788,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     if (zkClient == null) {
       return;
     }
-    LOG.debug("Terminating ZK connection for " + this);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Terminating ZK connection for " + this);
+    }
     ZooKeeper tempZk = zkClient;
     zkClient = null;
     watcher = null;
@@ -800,8 +817,10 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
     try {
       Stat oldBreadcrumbStat = fenceOldActive();
       writeBreadCrumbNode(oldBreadcrumbStat);
-      
-      LOG.debug("Becoming active for " + this);
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Becoming active for " + this);
+      }
       appClient.becomeActive();
       state = State.ACTIVE;
       return true;
@@ -906,7 +925,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void becomeStandby() {
     if (state != State.STANDBY) {
-      LOG.debug("Becoming standby for " + this);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Becoming standby for " + this);
+      }
       state = State.STANDBY;
       appClient.becomeStandby();
     }
@@ -914,7 +935,9 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
-      LOG.debug("Entering neutral mode for " + this);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Entering neutral mode for " + this);
+      }
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 8ada0ff..4766175 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -876,9 +876,12 @@ public class RPC {
 
      getProtocolImplMap(rpcKind).put(new ProtoNameVer(protocolName, version),
          new ProtoClassProtoImpl(protocolClass, protocolImpl)); 
-     LOG.debug("RpcKind = " + rpcKind + " Protocol Name = " + protocolName +  " version=" + version +
-         " ProtocolImpl=" + protocolImpl.getClass().getName() + 
-         " protocolClass=" + protocolClass.getName());
+     if (LOG.isDebugEnabled()) {
+       LOG.debug("RpcKind = " + rpcKind + " Protocol Name = " + protocolName +
+           " version=" + version +
+           " ProtocolImpl=" + protocolImpl.getClass().getName() +
+           " protocolClass=" + protocolClass.getName());
+     }
    }
    
    static class VerProtocolImpl {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index d2d61b3..9aa362e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -234,9 +234,11 @@ public abstract class Server {
       throw new IllegalArgumentException("ReRegistration of rpcKind: " +
           rpcKind);      
     }
-    LOG.debug("rpcKind=" + rpcKind + 
-        ", rpcRequestWrapperClass=" + rpcRequestWrapperClass + 
-        ", rpcInvoker=" + rpcInvoker);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("rpcKind=" + rpcKind +
+          ", rpcRequestWrapperClass=" + rpcRequestWrapperClass +
+          ", rpcInvoker=" + rpcInvoker);
+    }
   }
   
   public Class<? extends Writable> getRpcRequestWrapper(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
index f35ad18..9ad2d39 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/ganglia/GangliaContext31.java
@@ -82,8 +82,10 @@ public class GangliaContext31 extends GangliaContext {
       return;
     }
 
-    LOG.debug("Emitting metric " + name + ", type " + type + ", value " + 
-      value + " from hostname" + hostName);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Emitting metric " + name + ", type " + type + ", value " +
+          value + " from hostname" + hostName);
+    }
 
     String units = getUnits(name);
     int slope = getSlope(name);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/86682fb8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
index 609c71f..41634a8 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileBasedKeyStoresFactory.java
@@ -164,7 +164,9 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
       // configuration property for key password.
       keystoreKeyPassword = getPassword(
           conf, keyPasswordProperty, keystorePassword);
-      LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(mode.toString() + " KeyStore: " + keystoreLocation);
+      }
 
       InputStream is = new FileInputStream(keystoreLocation);
       try {
@@ -172,7 +174,9 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
       } finally {
         is.close();
       }
-      LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(mode.toString() + " Loaded KeyStore: " + keystoreLocation);
+      }
     } else {
       keystore.load(null, null);
     }
@@ -204,18 +208,24 @@ public class FileBasedKeyStoresFactory implements KeyStoresFactory {
               resolvePropertyName(mode, SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY),
               DEFAULT_SSL_TRUSTSTORE_RELOAD_INTERVAL);
 
-      LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(mode.toString() + " TrustStore: " + truststoreLocation);
+      }
 
       trustManager = new ReloadingX509TrustManager(truststoreType,
           truststoreLocation,
           truststorePassword,
           truststoreReloadInterval);
       trustManager.init();
-      LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(mode.toString() + " Loaded TrustStore: " + truststoreLocation);
+      }
       trustManagers = new TrustManager[]{trustManager};
     } else {
-      LOG.debug("The property '" + locationProperty + "' has not been set, " +
-          "no TrustStore will be loaded");
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("The property '" + locationProperty + "' has not been set, " +
+            "no TrustStore will be loaded");
+      }
       trustManagers = null;
     }
   }


[49/50] [abbrv] hadoop git commit: HADOOP-11691. X86 build of libwinutils is broken. Contributed by Kiran Kumar M R.

Posted by zj...@apache.org.
HADOOP-11691. X86 build of libwinutils is broken. Contributed by Kiran Kumar M R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee352658
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee352658
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee352658

Branch: refs/heads/YARN-2928
Commit: ee3526587af879e68cebbd284e3b1740df10a81c
Parents: 526c90e
Author: cnauroth <cn...@apache.org>
Authored: Thu Mar 26 21:56:31 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:50 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../src/main/winutils/win8sdk.props             | 29 ++++++++++++++++----
 2 files changed, 27 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee352658/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e739a8f..a7d4adc 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1166,6 +1166,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11748. The secrets of auth cookies should not be specified in
     configuration in clear text. (Li Lu and Haohui Mai via wheat9)
 
+    HADOOP-11691. X86 build of libwinutils is broken.
+    (Kiran Kumar M R via cnauroth)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee352658/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
index 503b37a..29ae2b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
+++ b/hadoop-common-project/hadoop-common/src/main/winutils/win8sdk.props
@@ -19,10 +19,29 @@
  <ImportGroup Label="PropertySheets" />
  <PropertyGroup Label="UserMacros" />
  <PropertyGroup>
-   <ExecutablePath>$(VCInstallDir)bin\x86_amd64;$(VCInstallDir)bin;$(WindowsSdkDir)bin\NETFX 4.0 Tools;$(MSBuildProgramFiles32)\Windows Kits\8.1\bin\x86;$(VSInstallDir)Common7\Tools\bin;$(VSInstallDir)Common7\tools;$(VSInstallDir)Common7\ide;$(MSBuildProgramFiles32)\HTML Help Workshop;$(FrameworkSDKDir)\bin;$(MSBuildToolsPath32);$(VSInstallDir);$(SystemRoot)\SysWow64;$(FxCopDir);$(PATH)</ExecutablePath>
-   <IncludePath>$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(FrameworkSDKDir)\include;</IncludePath>
-   <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(MSBuildProgramFiles32)\Windows Kits\8.1\lib\win8\um\x64;$(MSBuildProgramFiles32)\Windows Kits\8.1\Lib\winv6.3\um\x64;$(FrameworkSDKDir)\lib\x64</LibraryPath>
-   <ExcludePath>$(VCInstallDir)include;$(VCInstallDir)atlmfc\include;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\um;$(MSBuildProgramFiles32)\Windows Kits\8.1\Include\shared;$(FrameworkSDKDir)\include;$(MSBuildToolsPath32);$(VCInstallDir)atlmfc\lib;$(VCInstallDir)lib;</ExcludePath>
- </PropertyGroup>
+     <Windows8SdkDir>$(MSBuildProgramFiles32)\Windows Kits\8.1\</Windows8SdkDir>
+  </PropertyGroup>
+
+  <PropertyGroup>
+    <Windows8SDK_IncludePath>$(Windows8SdkDir)Include\um;$(Windows8SdkDir)Include\shared;</Windows8SDK_IncludePath>
+    <Windows8SDK_ExecutablePath_x86>$(Windows8SdkDir)bin\x86;</Windows8SDK_ExecutablePath_x86>
+    <Windows8SDK_ExecutablePath_x64>$(Windows8SdkDir)bin\x64;</Windows8SDK_ExecutablePath_x64>
+    <Windows8SDK_LibraryPath_x86>$(Windows8SdkDir)lib\winv6.3\um\x86;</Windows8SDK_LibraryPath_x86>
+    <Windows8SDK_LibraryPath_x64>$(Windows8SdkDir)lib\winv6.3\um\x64;</Windows8SDK_LibraryPath_x64>
+  </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Platform)' == 'Win32'">
+   <ExecutablePath>$(ExecutablePath);$(Windows8SDK_ExecutablePath_x86);</ExecutablePath>
+   <IncludePath>$(Windows8SDK_IncludePath);$(IncludePath);</IncludePath>
+   <LibraryPath>$(LibraryPath);$(Windows8SDK_LibraryPath_x86);</LibraryPath>
+   <ExcludePath>$(ExcludePath);$(Windows8SDK_IncludePath);$(Windows8SDK_LibraryPath_x86);</ExcludePath>
+  </PropertyGroup>
+
+  <PropertyGroup Condition="'$(Platform)' == 'x64'">
+   <ExecutablePath>$(ExecutablePath);$(Windows8SDK_ExecutablePath_x64);</ExecutablePath>
+   <IncludePath>$(Windows8SDK_IncludePath);$(IncludePath);</IncludePath>
+   <LibraryPath>$(LibraryPath);$(Windows8SDK_LibraryPath_x64);</LibraryPath>
+   <ExcludePath>$(ExcludePath);$(Windows8SDK_IncludePath);$(Windows8SDK_LibraryPath_x64);</ExcludePath>
+  </PropertyGroup>
 <ItemDefinitionGroup />
 </Project>


[06/50] [abbrv] hadoop git commit: YARN-3336. FileSystem memory leak in DelegationTokenRenewer.

Posted by zj...@apache.org.
YARN-3336. FileSystem memory leak in DelegationTokenRenewer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a879a90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a879a90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a879a90

Branch: refs/heads/YARN-2928
Commit: 9a879a90e605a941985c108a0e25537a50f28f85
Parents: 77e82eb
Author: cnauroth <cn...@apache.org>
Authored: Mon Mar 23 10:45:50 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 .../security/DelegationTokenRenewer.java        | 13 +++++++--
 .../security/TestDelegationTokenRenewer.java    | 30 ++++++++++++++++++--
 3 files changed, 41 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a879a90/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index acdc09c..90d906b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -869,6 +869,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3384. TestLogAggregationService.verifyContainerLogs fails after
     YARN-2777. (Naganarasimha G R via ozawa)
 
+    YARN-3336. FileSystem memory leak in DelegationTokenRenewer.
+    (Zhihai Xu via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a879a90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
index cb456d8..2619971 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java
@@ -605,6 +605,7 @@ public class DelegationTokenRenewer extends AbstractService {
     rmContext.getSystemCredentialsForApps().put(applicationId, byteBuffer);
   }
 
+  @VisibleForTesting
   protected Token<?>[] obtainSystemTokensForUser(String user,
       final Credentials credentials) throws IOException, InterruptedException {
     // Get new hdfs tokens on behalf of this user
@@ -615,8 +616,16 @@ public class DelegationTokenRenewer extends AbstractService {
         proxyUser.doAs(new PrivilegedExceptionAction<Token<?>[]>() {
           @Override
           public Token<?>[] run() throws Exception {
-            return FileSystem.get(getConfig()).addDelegationTokens(
-              UserGroupInformation.getLoginUser().getUserName(), credentials);
+            FileSystem fs = FileSystem.get(getConfig());
+            try {
+              return fs.addDelegationTokens(
+                  UserGroupInformation.getLoginUser().getUserName(),
+                  credentials);
+            } finally {
+              // Close the FileSystem created by the new proxy user,
+              // So that we don't leave an entry in the FileSystem cache
+              fs.close();
+            }
           }
         });
     return newTokens;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9a879a90/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
index 5d31404..99a506a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java
@@ -287,9 +287,16 @@ public class TestDelegationTokenRenewer {
    * exception
    */
   static class MyFS extends DistributedFileSystem {
-    
-    public MyFS() {}
-    public void close() {}
+    private static AtomicInteger instanceCounter = new AtomicInteger();
+    public MyFS() {
+      instanceCounter.incrementAndGet();
+    }
+    public void close() {
+      instanceCounter.decrementAndGet();
+    }
+    public static int getInstanceCounter() {
+      return instanceCounter.get();
+    }
     @Override
     public void initialize(URI uri, Configuration conf) throws IOException {}
     
@@ -299,6 +306,11 @@ public class TestDelegationTokenRenewer {
       LOG.info("Called MYDFS.getdelegationtoken " + result);
       return result;
     }
+
+    public Token<?>[] addDelegationTokens(
+        final String renewer, Credentials credentials) throws IOException {
+      return new Token<?>[0];
+    }
   }
   
   /**
@@ -1022,4 +1034,16 @@ public class TestDelegationTokenRenewer {
     // app2 completes, app1 is still running, check the token is not cancelled
     Assert.assertFalse(Renewer.cancelled);
   }
+
+  // Test FileSystem memory leak in obtainSystemTokensForUser.
+  @Test
+  public void testFSLeakInObtainSystemTokensForUser() throws Exception{
+    Credentials credentials = new Credentials();
+    String user = "test";
+    int oldCounter = MyFS.getInstanceCounter();
+    delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
+    delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
+    delegationTokenRenewer.obtainSystemTokensForUser(user, credentials);
+    Assert.assertEquals(oldCounter, MyFS.getInstanceCounter());
+  }
 }


[10/50] [abbrv] hadoop git commit: YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.

Posted by zj...@apache.org.
YARN-1880. Cleanup TestApplicationClientProtocolOnHA. Contributed by ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c5a7b67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c5a7b67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c5a7b67

Branch: refs/heads/YARN-2928
Commit: 2c5a7b67e339ecc1f1d7d786a2002c27788877ee
Parents: ca6d002
Author: Harsh J <ha...@cloudera.com>
Authored: Tue Mar 24 11:57:28 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                     |  3 +++
 .../client/TestApplicationClientProtocolOnHA.java   | 16 ++++++++++------
 2 files changed, 13 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c5a7b67/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 70b81d4..0605477 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -108,6 +108,9 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    YARN-1880. Cleanup TestApplicationClientProtocolOnHA
+    (ozawa via harsh)
+
     YARN-3243. CapacityScheduler should pass headroom from parent to children
     to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c5a7b67/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
index bfc6656..8e00554 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/TestApplicationClientProtocolOnHA.java
@@ -93,7 +93,8 @@ public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase {
   public void testGetApplicationsOnHA() throws Exception {
     List<ApplicationReport> reports =
         client.getApplications();
-    Assert.assertTrue(reports != null && !reports.isEmpty());
+    Assert.assertTrue(reports != null);
+    Assert.assertFalse(reports.isEmpty());
     Assert.assertEquals(cluster.createFakeAppReports(),
         reports);
   }
@@ -101,7 +102,8 @@ public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetClusterNodesOnHA() throws Exception {
     List<NodeReport> reports = client.getNodeReports(NodeState.RUNNING);
-    Assert.assertTrue(reports != null && !reports.isEmpty());
+    Assert.assertTrue(reports != null);
+    Assert.assertFalse(reports.isEmpty());
     Assert.assertEquals(cluster.createFakeNodeReports(),
         reports);
   }
@@ -117,8 +119,8 @@ public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase {
   @Test(timeout = 15000)
   public void testGetQueueUserAclsOnHA() throws Exception {
     List<QueueUserACLInfo> queueUserAclsList = client.getQueueAclsInfo();
-    Assert.assertTrue(queueUserAclsList != null
-        && !queueUserAclsList.isEmpty());
+    Assert.assertTrue(queueUserAclsList != null);
+    Assert.assertFalse(queueUserAclsList.isEmpty());
     Assert.assertEquals(cluster.createFakeQueueUserACLInfoList(),
         queueUserAclsList);
   }
@@ -136,7 +138,8 @@ public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase {
   public void testGetApplicationAttemptsOnHA() throws Exception {
     List<ApplicationAttemptReport> reports =
         client.getApplicationAttempts(cluster.createFakeAppId());
-    Assert.assertTrue(reports != null && !reports.isEmpty());
+    Assert.assertTrue(reports != null);
+    Assert.assertFalse(reports.isEmpty());
     Assert.assertEquals(cluster.createFakeApplicationAttemptReports(),
         reports);
   }
@@ -153,7 +156,8 @@ public class TestApplicationClientProtocolOnHA extends ProtocolHATestBase {
   public void testGetContainersOnHA() throws Exception {
     List<ContainerReport> reports =
         client.getContainers(cluster.createFakeApplicationAttemptId());
-    Assert.assertTrue(reports != null && !reports.isEmpty());
+    Assert.assertTrue(reports != null);
+    Assert.assertFalse(reports.isEmpty());
     Assert.assertEquals(cluster.createFakeContainerReports(),
         reports);
   }


[29/50] [abbrv] hadoop git commit: HDFS-6353. Check and make checkpoint before stopping the NameNode. Contributed by Jing Zhao.

Posted by zj...@apache.org.
HDFS-6353. Check and make checkpoint before stopping the NameNode. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8cdede2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8cdede2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8cdede2

Branch: refs/heads/YARN-2928
Commit: e8cdede27c4c228a742ef5e46fa663c47a1d3bf0
Parents: 1350262
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Mar 25 10:38:00 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../bkjournal/TestBootstrapStandbyWithBKJM.java |  2 +-
 .../hadoop-hdfs/src/main/bin/hdfs               |  8 +++
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../hadoop/hdfs/DistributedFileSystem.java      | 22 ++++--
 .../hadoop/hdfs/protocol/ClientProtocol.java    | 11 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java | 14 ++--
 .../ClientNamenodeProtocolTranslatorPB.java     |  6 +-
 .../hadoop/hdfs/server/namenode/FSImage.java    | 30 +++++++-
 .../hdfs/server/namenode/FSNamesystem.java      | 14 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +-
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 76 +++++++++++++-------
 .../src/main/proto/ClientNamenodeProtocol.proto |  5 +-
 .../org/apache/hadoop/hdfs/TestFetchImage.java  |  2 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java    |  2 +-
 .../hdfs/server/namenode/FSAclBaseTest.java     |  2 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 .../hdfs/server/namenode/TestCheckpoint.java    |  8 +--
 .../hdfs/server/namenode/TestEditLogRace.java   |  6 +-
 .../hdfs/server/namenode/TestINodeFile.java     |  2 +-
 .../TestNNStorageRetentionFunctional.java       |  2 +-
 .../server/namenode/TestParallelImageWrite.java |  2 +-
 .../hdfs/server/namenode/TestSaveNamespace.java | 67 ++++++++++++++---
 .../hdfs/server/namenode/TestStartup.java       |  4 +-
 .../namenode/metrics/TestNameNodeMetrics.java   |  2 +-
 .../snapshot/TestSnapshotBlocksMap.java         |  2 +-
 27 files changed, 221 insertions(+), 87 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8d7a4e1..cd2ca4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -339,6 +339,8 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
 
+    HDFS-6353. Check and make checkpoint before stopping the NameNode. (jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
index ded9e0e..18dedc8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBootstrapStandbyWithBKJM.java
@@ -111,7 +111,7 @@ public class TestBootstrapStandbyWithBKJM {
     cluster.shutdownNameNode(1);
     deleteEditLogIfExists(confNN1);
     cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_ENTER, true);
-    cluster.getNameNodeRpc(0).saveNamespace();
+    cluster.getNameNodeRpc(0).saveNamespace(0, 0);
     cluster.getNameNodeRpc(0).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, true);
 
     // check without -skipSharedEditsCheck, Bootstrap should fail for BKJM

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 830ca36..ececbb4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -281,6 +281,14 @@ fi
 hadoop_finalize
 
 if [[ -n "${supportdaemonization}" ]]; then
+  if [[ "${COMMAND}" == "namenode" ]] &&
+     [[ "${HADOOP_DAEMON_MODE}" == "stop" ]]; then
+    hadoop_debug "Do checkpoint if necessary before stopping NameNode"
+    export CLASSPATH
+    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-safemode" "enter"
+    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-saveNamespace" "-beforeShutdown"
+    "${JAVA}" "-Dproc_dfsadmin" ${HADOOP_OPTS} "org.apache.hadoop.hdfs.tools.DFSAdmin" "-safemode" "leave"
+  fi
   if [[ -n "${secure_service}" ]]; then
     hadoop_secure_daemon_handler \
     "${HADOOP_DAEMON_MODE}" "${COMMAND}" "${CLASS}"\

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 70f66bd..5d67eed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2840,12 +2840,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
   /**
    * Save namespace image.
    * 
-   * @see ClientProtocol#saveNamespace()
+   * @see ClientProtocol#saveNamespace(long, long)
    */
-  void saveNamespace() throws AccessControlException, IOException {
+  boolean saveNamespace(long timeWindow, long txGap) throws IOException {
     TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
     try {
-      namenode.saveNamespace();
+      return namenode.saveNamespace(timeWindow, txGap);
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class);
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d714276..610932a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -201,6 +201,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 1000000;
   public static final String  DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY = "dfs.namenode.checkpoint.max-retries";
   public static final int     DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT = 3;
+  public static final String  DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_KEY = "dfs.namenode.missing.checkpoint.periods.before.shutdown";
+  public static final int     DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDONW_DEFAULT = 3;
   public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
   public static final String  DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_KEY = "dfs.namenode.tolerate.heartbeat.multiplier";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index c750e79..432e4ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -68,6 +68,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
@@ -1181,13 +1182,24 @@ public class DistributedFileSystem extends FileSystem {
 
   /**
    * Save namespace image.
-   * 
-   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
+   *
+   * @param timeWindow NameNode can ignore this command if the latest
+   *                   checkpoint was done within the given time period (in
+   *                   seconds).
+   * @return true if a new checkpoint has been made
+   * @see ClientProtocol#saveNamespace(long, long)
    */
-  public void saveNamespace() throws AccessControlException, IOException {
-    dfs.saveNamespace();
+  public boolean saveNamespace(long timeWindow, long txGap) throws IOException {
+    return dfs.saveNamespace(timeWindow, txGap);
   }
-  
+
+  /**
+   * Save namespace image. NameNode always does the checkpoint.
+   */
+  public void saveNamespace() throws IOException {
+    saveNamespace(0, 0);
+  }
+
   /**
    * Rolls the edit log on the active NameNode.
    * Requires super-user privileges.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 2b07789..bafb02b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -798,12 +798,17 @@ public interface ClientProtocol {
    * <p>
    * Saves current namespace into storage directories and reset edits log.
    * Requires superuser privilege and safe mode.
-   * 
-   * @throws AccessControlException if the superuser privilege is violated.
+   *
+   * @param timeWindow NameNode does a checkpoint if the latest checkpoint was
+   *                   done beyond the given time period (in seconds).
+   * @param txGap NameNode does a checkpoint if the gap between the latest
+   *              checkpoint and the latest transaction id is greater this gap.
+   * @return whether an extra checkpoint has been done
+   *
    * @throws IOException if image creation failed.
    */
   @AtMostOnce
-  public void saveNamespace() throws AccessControlException, IOException;
+  public boolean saveNamespace(long timeWindow, long txGap) throws IOException;
 
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index ce8c392..e26158b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -277,10 +277,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = 
   RenewLeaseResponseProto.newBuilder().build();
 
-  private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = 
-  SaveNamespaceResponseProto.newBuilder().build();
-
-  private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = 
+  private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE =
   RefreshNodesResponseProto.newBuilder().build();
 
   private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = 
@@ -748,14 +745,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public SaveNamespaceResponseProto saveNamespace(RpcController controller,
       SaveNamespaceRequestProto req) throws ServiceException {
     try {
-      server.saveNamespace();
-      return VOID_SAVENAMESPACE_RESPONSE;
+      final long timeWindow = req.hasTimeWindow() ? req.getTimeWindow() : 0;
+      final long txGap = req.hasTxGap() ? req.getTxGap() : 0;
+      boolean saved = server.saveNamespace(timeWindow, txGap);
+      return SaveNamespaceResponseProto.newBuilder().setSaved(saved).build();
     } catch (IOException e) {
       throw new ServiceException(e);
     }
-
   }
-  
+
   @Override
   public RollEditsResponseProto rollEdits(RpcController controller,
       RollEditsRequestProto request) throws ServiceException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index e970293..4ec6f9e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -670,9 +670,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
 
   @Override
-  public void saveNamespace() throws AccessControlException, IOException {
+  public boolean saveNamespace(long timeWindow, long txGap) throws IOException {
     try {
-      rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST);
+      SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder()
+          .setTimeWindow(timeWindow).setTxGap(txGap).build();
+      return rpcProxy.saveNamespace(null, req).getSaved();
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 7e9d244..7454850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -793,7 +793,7 @@ public class FSImage implements Closeable {
         DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
         DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
     final long checkpointTxnCount = conf.getLong(
-        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
+        DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
         DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
     long checkpointAge = Time.now() - imageFile.lastModified();
 
@@ -1062,11 +1062,35 @@ public class FSImage implements Closeable {
   }
 
   /**
+   * @param timeWindow a checkpoint is done if the latest checkpoint
+   *                   was done more than this number of seconds ago.
+   * @param txGap a checkpoint is done also if the gap between the latest tx id
+   *              and the latest checkpoint is greater than this number.
+   * @return true if a checkpoint has been made
    * @see #saveNamespace(FSNamesystem, NameNodeFile, Canceler)
    */
-  public synchronized void saveNamespace(FSNamesystem source)
-      throws IOException {
+  public synchronized boolean saveNamespace(long timeWindow, long txGap,
+      FSNamesystem source) throws IOException {
+    if (timeWindow > 0 || txGap > 0) {
+      final FSImageStorageInspector inspector = storage.readAndInspectDirs(
+          EnumSet.of(NameNodeFile.IMAGE, NameNodeFile.IMAGE_ROLLBACK),
+          StartupOption.REGULAR);
+      FSImageFile image = inspector.getLatestImages().get(0);
+      File imageFile = image.getFile();
+
+      final long checkpointTxId = image.getCheckpointTxId();
+      final long checkpointAge = Time.now() - imageFile.lastModified();
+      if (checkpointAge <= timeWindow * 1000 &&
+          checkpointTxId >= this.getLastAppliedOrWrittenTxId() - txGap) {
+        return false;
+      }
+    }
     saveNamespace(source, NameNodeFile.IMAGE, null);
+    return true;
+  }
+
+  public void saveNamespace(FSNamesystem source) throws IOException {
+    saveNamespace(0, 0, source);
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 9235425..1226a26 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4947,14 +4947,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * Save namespace image.
    * This will save current namespace into fsimage file and empty edits file.
    * Requires superuser privilege and safe mode.
-   * 
-   * @throws AccessControlException if superuser privilege is violated.
-   * @throws IOException if 
    */
-  void saveNamespace() throws AccessControlException, IOException {
+  boolean saveNamespace(final long timeWindow, final long txGap)
+      throws IOException {
     checkOperation(OperationCategory.UNCHECKED);
     checkSuperuserPrivilege();
 
+    boolean saved = false;
     cpLock();  // Block if a checkpointing is in progress on standby.
     readLock();
     try {
@@ -4964,12 +4963,15 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         throw new IOException("Safe mode should be turned ON "
             + "in order to create namespace image.");
       }
-      getFSImage().saveNamespace(this);
+      saved = getFSImage().saveNamespace(timeWindow, txGap, this);
     } finally {
       readUnlock();
       cpUnlock();
     }
-    LOG.info("New namespace image has been created");
+    if (saved) {
+      LOG.info("New namespace image has been created");
+    }
+    return saved;
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 1788335..7ab8b86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1070,19 +1070,20 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
-  public void saveNamespace() throws IOException {
+  public boolean saveNamespace(long timeWindow, long txGap) throws IOException {
     checkNNStartup();
     CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
     if (cacheEntry != null && cacheEntry.isSuccess()) {
-      return; // Return previous response
+      return true; // Return previous response
     }
     boolean success = false;
     try {
-      namesystem.saveNamespace();
+      namesystem.saveNamespace(timeWindow, txGap);
       success = true;
     } finally {
       RetryCache.setState(cacheEntry, success);
     }
+    return true;
   }
   
   @Override // ClientProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index e80b4c0..b8dcbbf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.tools;
 
 import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
@@ -393,7 +392,7 @@ public class DFSAdmin extends FsShell {
   private static final String commonUsageSummary =
     "\t[-report [-live] [-dead] [-decommissioning]]\n" +
     "\t[-safemode <enter | leave | get | wait>]\n" +
-    "\t[-saveNamespace]\n" +
+    "\t[-saveNamespace [-beforeShutdown]]\n" +
     "\t[-rollEdits]\n" +
     "\t[-restoreFailedStorage true|false|check]\n" +
     "\t[-refreshNodes]\n" +
@@ -694,34 +693,57 @@ public class DFSAdmin extends FsShell {
   /**
    * Command to ask the namenode to save the namespace.
    * Usage: hdfs dfsadmin -saveNamespace
-   * @exception IOException 
-   * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
+   * @see ClientProtocol#saveNamespace(long, long)
    */
-  public int saveNamespace() throws IOException {
-    int exitCode = -1;
+  public int saveNamespace(String[] argv) throws IOException {
+    final DistributedFileSystem dfs = getDFS();
+    final Configuration dfsConf = dfs.getConf();
+
+    long timeWindow = 0;
+    long txGap = 0;
+    if (argv.length > 1 && "-beforeShutdown".equals(argv[1])) {
+      final long checkpointPeriod = dfsConf.getLong(
+          DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY,
+          DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
+      final long checkpointTxnCount = dfsConf.getLong(
+          DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
+          DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
+      final int toleratePeriodNum = dfsConf.getInt(
+          DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDOWN_KEY,
+          DFSConfigKeys.DFS_NAMENODE_MISSING_CHECKPOINT_PERIODS_BEFORE_SHUTDONW_DEFAULT);
+      timeWindow = checkpointPeriod * toleratePeriodNum;
+      txGap = checkpointTxnCount * toleratePeriodNum;
+      System.out.println("Do checkpoint if necessary before stopping " +
+          "namenode. The time window is " + timeWindow + " seconds, and the " +
+          "transaction gap is " + txGap);
+    }
 
-    DistributedFileSystem dfs = getDFS();
-    Configuration dfsConf = dfs.getConf();
     URI dfsUri = dfs.getUri();
     boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);
-
     if (isHaEnabled) {
       String nsId = dfsUri.getHost();
       List<ProxyAndInfo<ClientProtocol>> proxies =
           HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
           nsId, ClientProtocol.class);
       for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
-        proxy.getProxy().saveNamespace();
-        System.out.println("Save namespace successful for " +
-            proxy.getAddress());
+        boolean saved = proxy.getProxy().saveNamespace(timeWindow, txGap);
+        if (saved) {
+          System.out.println("Save namespace successful for " +
+              proxy.getAddress());
+        } else {
+          System.out.println("No extra checkpoint has been made for "
+              + proxy.getAddress());
+        }
       }
     } else {
-      dfs.saveNamespace();
-      System.out.println("Save namespace successful");
+      boolean saved = dfs.saveNamespace(timeWindow, txGap);
+      if (saved) {
+        System.out.println("Save namespace successful");
+      } else {
+        System.out.println("No extra checkpoint has been made");
+      }
     }
-    exitCode = 0;
-   
-    return exitCode;
+    return 0;
   }
 
   public int rollEdits() throws IOException {
@@ -902,9 +924,14 @@ public class DFSAdmin extends FsShell {
       "\t\tcondition.  Safe mode can also be entered manually, but then\n" +
       "\t\tit can only be turned off manually as well.\n";
 
-    String saveNamespace = "-saveNamespace:\t" +
-    "Save current namespace into storage directories and reset edits log.\n" +
-    "\t\tRequires safe mode.\n";
+    String saveNamespace = "-saveNamespace [-beforeShutdown]:\t" +
+        "Save current namespace into storage directories and reset edits \n" +
+        "\t\t log. Requires safe mode.\n" +
+        "\t\tIf the \"beforeShutdown\" option is given, the NameNode does a \n" +
+        "\t\tcheckpoint if and only if there is no checkpoint done during \n" +
+        "\t\ta time window (a configurable number of checkpoint periods).\n" +
+        "\t\tThis is usually used before shutting down the NameNode to \n" +
+        "\t\tprevent potential fsimage/editlog corruption.\n";
 
     String rollEdits = "-rollEdits:\t" +
     "Rolls the edit log.\n";
@@ -1546,10 +1573,9 @@ public class DFSAdmin extends FsShell {
           + " [-disallowSnapshot <snapshotDir>]");
     } else if ("-saveNamespace".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
-                         + " [-saveNamespace]");
+          + " [-saveNamespace [-beforeShutdown]]");
     } else if ("-rollEdits".equals(cmd)) {
-      System.err.println("Usage: hdfs dfsadmin"
-                         + " [-rollEdits]");
+      System.err.println("Usage: hdfs dfsadmin [-rollEdits]");
     } else if ("-restoreFailedStorage".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
           + " [-restoreFailedStorage true|false|check ]");
@@ -1668,7 +1694,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-saveNamespace".equals(cmd)) {
-      if (argv.length != 1) {
+      if (argv.length != 1 && argv.length != 2) {
         printUsage(cmd);
         return exitCode;
       }
@@ -1788,7 +1814,7 @@ public class DFSAdmin extends FsShell {
       } else if ("-disallowSnapshot".equalsIgnoreCase(cmd)) {
         disallowSnapshot(argv);
       } else if ("-saveNamespace".equals(cmd)) {
-        exitCode = saveNamespace();
+        exitCode = saveNamespace(argv);
       } else if ("-rollEdits".equals(cmd)) {
         exitCode = rollEdits();
       } else if ("-restoreFailedStorage".equals(cmd)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index 82709a6..b44c556 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -350,10 +350,13 @@ message SetSafeModeResponseProto {
   required bool result = 1;
 }
 
-message SaveNamespaceRequestProto { // no parameters
+message SaveNamespaceRequestProto {
+  optional uint64 timeWindow = 1 [default = 0];
+  optional uint64 txGap = 2 [default = 0];
 }
 
 message SaveNamespaceResponseProto { // void response
+  optional bool saved = 1 [default = true];
 }
 
 message RollEditsRequestProto { // no parameters

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
index 0d44357..6f61003 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFetchImage.java
@@ -70,7 +70,7 @@ public class TestFetchImage {
       
       cluster.getNameNodeRpc()
           .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       cluster.getNameNodeRpc()
           .setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index dac26a0..2e5348e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -144,7 +144,7 @@ public class UpgradeUtilities {
       
       // save image
       namenode.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-      namenode.saveNamespace();
+      namenode.saveNamespace(0, 0);
       namenode.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       
       // write more files

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
index f481bc1..002f7c0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
@@ -1560,7 +1560,7 @@ public abstract class FSAclBaseTest {
       //restart by loading fsimage
       cluster.getNameNodeRpc()
           .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       cluster.getNameNodeRpc()
           .setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       cluster.restartNameNode(true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index fa23fbf..2540834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -83,7 +83,7 @@ public class NameNodeAdapter {
   
   public static void saveNamespace(NameNode namenode)
       throws AccessControlException, IOException {
-    namenode.getNamesystem().saveNamespace();
+    namenode.getNamesystem().saveNamespace(0, 0);
   }
   
   public static void enterSafeMode(NameNode namenode, boolean resourcesLow)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
index 95da838..5a51cb7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
@@ -1607,7 +1607,7 @@ public class TestCheckpoint {
       // Make sure the on-disk fsimage on the NN has txid > 0.
       FSNamesystem fsns = cluster.getNamesystem();
       fsns.enterSafeMode(false);
-      fsns.saveNamespace();
+      fsns.saveNamespace(0, 0);
       fsns.leaveSafeMode();
       
       secondary = startSecondaryNameNode(conf);
@@ -2239,7 +2239,7 @@ public class TestCheckpoint {
       NamenodeProtocols nn = cluster.getNameNodeRpc();
       nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
       for (int i = 0; i < 3; i++) {
-        nn.saveNamespace();
+        nn.saveNamespace(0, 0);
       }
       nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       
@@ -2324,7 +2324,7 @@ public class TestCheckpoint {
       // therefore needs to download a new fsimage the next time it performs a
       // checkpoint.
       cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       
       // Ensure that the 2NN can still perform a checkpoint.
@@ -2369,7 +2369,7 @@ public class TestCheckpoint {
       // therefore needs to download a new fsimage the next time it performs a
       // checkpoint.
       cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
       
       // Ensure that the 2NN can still perform a checkpoint.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
index 8b3c7ae4..052c23f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
@@ -291,7 +291,7 @@ public class TestEditLogRace {
 
 
         LOG.info("Save " + i + ": saving namespace");
-        namesystem.saveNamespace();
+        namesystem.saveNamespace(0, 0);
         LOG.info("Save " + i + ": leaving safemode");
 
         long savedImageTxId = fsimage.getStorage().getMostRecentCheckpointTxId();
@@ -421,7 +421,7 @@ public class TestEditLogRace {
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);
 
       // Once we're in safe mode, save namespace.
-      namesystem.saveNamespace();
+      namesystem.saveNamespace(0, 0);
 
       LOG.info("Joining on edit thread...");
       doAnEditThread.join();
@@ -515,7 +515,7 @@ public class TestEditLogRace {
       assertTrue(et - st > (BLOCK_TIME - 1)*1000);
 
       // Once we're in safe mode, save namespace.
-      namesystem.saveNamespace();
+      namesystem.saveNamespace(0, 0);
 
       LOG.info("Joining on edit thread...");
       doAnEditThread.join();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index 61d2b3e..daac442 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -474,7 +474,7 @@ public class TestINodeFile {
 
       // Apply editlogs to fsimage, ensure inodeUnderConstruction is handled
       fsn.enterSafeMode(false);
-      fsn.saveNamespace();
+      fsn.saveNamespace(0, 0);
       fsn.leaveSafeMode();
 
       outStream.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
index dfd878e..b8dc44e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
@@ -152,7 +152,7 @@ public class TestNNStorageRetentionFunctional {
   private static void doSaveNamespace(NameNode nn) throws IOException {
     LOG.info("Saving namespace...");
     nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-    nn.getRpcServer().saveNamespace();
+    nn.getRpcServer().saveNamespace(0, 0);
     nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
index 4200261..86ae642 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
@@ -112,7 +112,7 @@ public class TestParallelImageWrite {
       files.cleanup(fs, dir);
       files.createFiles(fs, dir);
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       final String checkAfterModify = checkImages(fsn, numNamenodeDirs);
       assertFalse("Modified namespace should change fsimage contents. " +
           "was: " + checkAfterRestart + " now: " + checkAfterModify,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
index 1821e98..f43edfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.util.reflection.Whitebox;
@@ -184,7 +185,7 @@ public class TestSaveNamespace {
       // Save namespace - this may fail, depending on fault injected
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       try {
-        fsn.saveNamespace();
+        fsn.saveNamespace(0, 0);
         if (shouldFail) {
           fail("Did not fail!");
         }
@@ -256,7 +257,7 @@ public class TestSaveNamespace {
       // Save namespace - should mark the first storage dir as faulty
       // since it's not traversable.
       LOG.info("Doing the first savenamespace.");
-      fsn.saveNamespace();
+      fsn.saveNamespace(0, 0);
       LOG.info("First savenamespace sucessful.");      
       
       assertTrue("Savenamespace should have marked one directory as bad." +
@@ -270,7 +271,7 @@ public class TestSaveNamespace {
       // erroneous directory back to fs.name.dir. This command should
       // be successful.
       LOG.info("Doing the second savenamespace.");
-      fsn.saveNamespace();
+      fsn.saveNamespace(0, 0);
       LOG.warn("Second savenamespace sucessful.");
       assertTrue("Savenamespace should have been successful in removing " +
                  " bad directories from Image."  +
@@ -393,7 +394,7 @@ public class TestSaveNamespace {
       // Save namespace
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
       try {
-        fsn.saveNamespace();
+        fsn.saveNamespace(0, 0);
         fail("saveNamespace did not fail even when all directories failed!");
       } catch (IOException ioe) {
         LOG.info("Got expected exception", ioe);
@@ -403,7 +404,7 @@ public class TestSaveNamespace {
       if (restoreStorageAfterFailure) {
         Mockito.reset(spyImage);
         spyStorage.setRestoreFailedStorage(true);
-        fsn.saveNamespace();
+        fsn.saveNamespace(0, 0);
         checkEditExists(fsn, 1);
       }
 
@@ -441,7 +442,7 @@ public class TestSaveNamespace {
 
       // Save namespace
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      fsn.saveNamespace();
+      fsn.saveNamespace(0, 0);
 
       // Now shut down and restart the NN
       fsn.close();
@@ -475,7 +476,7 @@ public class TestSaveNamespace {
       assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
       
       fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      fsn.saveNamespace();
+      fsn.saveNamespace(0, 0);
 
       // 2 more txns: END the first segment, BEGIN a new one
       assertEquals(4, fsn.getEditLog().getLastWrittenTxId());
@@ -597,7 +598,7 @@ public class TestSaveNamespace {
       fs.rename(new Path("/test-source/"), new Path("/test-target/"));
 
       fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     } finally {
       IOUtils.cleanup(LOG, out, fs);
@@ -616,7 +617,7 @@ public class TestSaveNamespace {
     try {
       cluster.getNamesystem().leaseManager.addLease("me", "/non-existent");      
       fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      cluster.getNameNodeRpc().saveNamespace();
+      cluster.getNameNodeRpc().saveNamespace(0, 0);
       fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
     } finally {
       if (cluster != null) {
@@ -625,6 +626,54 @@ public class TestSaveNamespace {
     }
   }
 
+  @Test
+  public void testSaveNamespaceBeforeShutdown() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem fs = cluster.getFileSystem();
+
+    try {
+      final FSImage fsimage = cluster.getNameNode().getFSImage();
+      final long before = fsimage.getStorage().getMostRecentCheckpointTxId();
+
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      // set the timewindow to 1 hour and tx gap to 1000, which means that if
+      // there is a checkpoint during the past 1 hour or the tx number happening
+      // after the latest checkpoint is <= 1000, this saveNamespace request
+      // will be ignored
+      cluster.getNameNodeRpc().saveNamespace(3600, 1000);
+
+      // make sure no new checkpoint was done
+      long after = fsimage.getStorage().getMostRecentCheckpointTxId();
+      Assert.assertEquals(before, after);
+
+      Thread.sleep(1000);
+      // do another checkpoint. this time set the timewindow to 1s
+      // we should see a new checkpoint
+      cluster.getNameNodeRpc().saveNamespace(1, 1000);
+      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+      after = fsimage.getStorage().getMostRecentCheckpointTxId();
+      Assert.assertTrue(after > before);
+
+      fs.mkdirs(new Path("/foo/bar/baz")); // 3 new tx
+
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      cluster.getNameNodeRpc().saveNamespace(3600, 5); // 3 + end/start segment
+      long after2 = fsimage.getStorage().getMostRecentCheckpointTxId();
+      // no checkpoint should be made
+      Assert.assertEquals(after, after2);
+      cluster.getNameNodeRpc().saveNamespace(3600, 3);
+      after2 = fsimage.getStorage().getMostRecentCheckpointTxId();
+      // a new checkpoint should be done
+      Assert.assertTrue(after2 > after);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
   private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
     // Make an edit
     fsn.mkdirs(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 8b903af..01621ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -425,7 +425,7 @@ public class TestStartup {
     NamenodeProtocols nnRpc = namenode.getRpcServer();
     assertTrue(nnRpc.getFileInfo("/test").isDir());
     nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-    nnRpc.saveNamespace();
+    nnRpc.saveNamespace(0, 0);
     namenode.stop();
     namenode.join();
 
@@ -455,7 +455,7 @@ public class TestStartup {
     NamenodeProtocols nnRpc = namenode.getRpcServer();
     assertTrue(nnRpc.getFileInfo("/test").isDir());
     nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-    nnRpc.saveNamespace();
+    nnRpc.saveNamespace(0, 0);
     namenode.stop();
     namenode.join();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 9e96a8f..011db3c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -427,7 +427,7 @@ public class TestNameNodeMetrics {
     assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
     
     cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
-    cluster.getNameNodeRpc().saveNamespace();
+    cluster.getNameNodeRpc().saveNamespace(0, 0);
     cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
     
     long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8cdede2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
index c6c8dad..85072d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
@@ -431,6 +431,6 @@ public class TestSnapshotBlocksMap {
     // Now make sure that the NN can still save an fsimage successfully.
     cluster.getNameNode().getRpcServer().setSafeMode(
         SafeModeAction.SAFEMODE_ENTER, false);
-    cluster.getNameNode().getRpcServer().saveNamespace();
+    cluster.getNameNode().getRpcServer().saveNamespace(0, 0);
   }
 }


[45/50] [abbrv] hadoop git commit: HADOOP-11553 addendum fix the typo in the changes file

Posted by zj...@apache.org.
HADOOP-11553 addendum fix the typo in the changes file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fee5961b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fee5961b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fee5961b

Branch: refs/heads/YARN-2928
Commit: fee5961ba64036cd9c02a41537d6352b2abbd2bc
Parents: 43227bc
Author: Allen Wittenauer <aw...@apache.org>
Authored: Thu Mar 26 15:18:03 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fee5961b/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index dbe9e55..40b4f84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -19,7 +19,7 @@ Trunk (Unreleased)
     HADOOP-11657. Align the output of `hadoop fs -du` to be more Unix-like.
     (aajisaka)
 
-    HADOOP-11553. Foramlize the shell API (aw)
+    HADOOP-11553. Formalize the shell API (aw)
 
   NEW FEATURES
 


[27/50] [abbrv] hadoop git commit: HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by Li Bo.

Posted by zj...@apache.org.
HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb2eb773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb2eb773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb2eb773

Branch: refs/heads/YARN-2928
Commit: cb2eb773fa60d6fd88a4bcbae17c0f91195c2335
Parents: 41c4dab
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Mar 24 11:06:13 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |    3 +
 .../dev-support/findbugsExcludeFile.xml         |    2 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 1694 ++---------------
 .../org/apache/hadoop/hdfs/DataStreamer.java    | 1754 ++++++++++++++++++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java     |    2 +-
 .../apache/hadoop/hdfs/TestDFSOutputStream.java |    5 +-
 .../apache/hadoop/hdfs/TestFileCreation.java    |   18 +-
 7 files changed, 1893 insertions(+), 1585 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5dae029..4ec0891 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -332,6 +332,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7829. Code clean up for LocatedBlock. (Takanobu Asanuma via jing9)
 
+    HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
+    jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
index dedeece..224d2fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
@@ -77,7 +77,7 @@
       ResponseProccessor is thread that is designed to catch RuntimeException.
      -->
      <Match>
-       <Class name="org.apache.hadoop.hdfs.DFSOutputStream$DataStreamer$ResponseProcessor" />
+       <Class name="org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor" />
        <Method name="run" />
        <Bug pattern="REC_CATCH_EXCEPTION" />
      </Match>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index d7d59af..ee3e6f6 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -17,29 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
-
-import java.io.BufferedOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.InterruptedIOException;
-import java.io.OutputStream;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.channels.ClosedChannelException;
-import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.EnumSet;
-import java.util.HashSet;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -52,64 +35,37 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
-import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
-import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
-import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
-import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
-import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.RetryStartFileException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
 import org.apache.hadoop.io.EnumSetWritable;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Time;
-import org.apache.htrace.NullScope;
 import org.apache.htrace.Sampler;
-import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
-import org.apache.htrace.TraceInfo;
 import org.apache.htrace.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.CacheLoader;
-import com.google.common.cache.LoadingCache;
-import com.google.common.cache.RemovalListener;
-import com.google.common.cache.RemovalNotification;
 
 
 /****************************************************************
@@ -121,19 +77,11 @@ import com.google.common.cache.RemovalNotification;
  * is typically 512 bytes and has an associated checksum with it.
  *
  * When a client application fills up the currentPacket, it is
- * enqueued into dataQueue.  The DataStreamer thread picks up
- * packets from the dataQueue, sends it to the first datanode in
- * the pipeline and moves it from the dataQueue to the ackQueue.
- * The ResponseProcessor receives acks from the datanodes. When an
- * successful ack for a packet is received from all datanodes, the
- * ResponseProcessor removes the corresponding packet from the
- * ackQueue.
+ * enqueued into the dataQueue of DataStreamer. DataStreamer is a
+ * thread that picks up packets from the dataQueue and sends it to
+ * the first datanode in the pipeline.
  *
- * In case of error, all outstanding packets and moved from
- * ackQueue. A new pipeline is setup by eliminating the bad
- * datanode from the original pipeline. The DataStreamer now
- * starts sending packets from the dataQueue.
-****************************************************************/
+ ****************************************************************/
 @InterfaceAudience.Private
 public class DFSOutputStream extends FSOutputSummer
     implements Syncable, CanSetDropBehind {
@@ -148,45 +96,25 @@ public class DFSOutputStream extends FSOutputSummer
       CryptoProtocolVersion.supported();
 
   private final DFSClient dfsClient;
-  private final long dfsclientSlowLogThresholdMs;
   private final ByteArrayManager byteArrayManager;
-  private Socket s;
   // closed is accessed by different threads under different locks.
   private volatile boolean closed = false;
 
-  private String src;
+  private final String src;
   private final long fileId;
   private final long blockSize;
-  /** Only for DataTransferProtocol.writeBlock(..) */
-  private final DataChecksum checksum4WriteBlock;
-  private final int bytesPerChecksum; 
+  private final int bytesPerChecksum;
 
-  // both dataQueue and ackQueue are protected by dataQueue lock
-  private final LinkedList<DFSPacket> dataQueue = new LinkedList<DFSPacket>();
-  private final LinkedList<DFSPacket> ackQueue = new LinkedList<DFSPacket>();
   private DFSPacket currentPacket = null;
   private DataStreamer streamer;
-  private long currentSeqno = 0;
-  private long lastQueuedSeqno = -1;
-  private long lastAckedSeqno = -1;
-  private long bytesCurBlock = 0; // bytes written in current block
   private int packetSize = 0; // write packet size, not including the header.
   private int chunksPerPacket = 0;
-  private final AtomicReference<IOException> lastException = new AtomicReference<IOException>();
-  private long artificialSlowdown = 0;
   private long lastFlushOffset = 0; // offset when flush was invoked
-  //persist blocks on namenode
-  private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
-  private volatile boolean appendChunk = false;   // appending to existing partial block
   private long initialFileSize = 0; // at time of file open
-  private final Progressable progress;
   private final short blockReplication; // replication factor of file
   private boolean shouldSyncBlock = false; // force blocks to disk upon close
   private final AtomicReference<CachingStrategy> cachingStrategy;
-  private boolean failPacket = false;
   private FileEncryptionInfo fileEncryptionInfo;
-  private static final BlockStoragePolicySuite blockStoragePolicySuite =
-      BlockStoragePolicySuite.createDefaultSuite();
 
   /** Use {@link ByteArrayManager} to create buffer for non-heartbeat packets.*/
   private DFSPacket createPacket(int packetSize, int chunksPerPkt, long offsetInBlock,
@@ -207,1326 +135,10 @@ public class DFSOutputStream extends FSOutputSummer
                          getChecksumSize(), lastPacketInBlock);
   }
 
-  /**
-   * For heartbeat packets, create buffer directly by new byte[]
-   * since heartbeats should not be blocked.
-   */
-  private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
-    final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
-    return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO,
-                         getChecksumSize(), false);
-  }
-
-
-  //
-  // The DataStreamer class is responsible for sending data packets to the
-  // datanodes in the pipeline. It retrieves a new blockid and block locations
-  // from the namenode, and starts streaming packets to the pipeline of
-  // Datanodes. Every packet has a sequence number associated with
-  // it. When all the packets for a block are sent out and acks for each
-  // if them are received, the DataStreamer closes the current block.
-  //
-  class DataStreamer extends Daemon {
-    private volatile boolean streamerClosed = false;
-    private ExtendedBlock block; // its length is number of bytes acked
-    private Token<BlockTokenIdentifier> accessToken;
-    private DataOutputStream blockStream;
-    private DataInputStream blockReplyStream;
-    private ResponseProcessor response = null;
-    private volatile DatanodeInfo[] nodes = null; // list of targets for current block
-    private volatile StorageType[] storageTypes = null;
-    private volatile String[] storageIDs = null;
-    private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes =
-        CacheBuilder.newBuilder()
-        .expireAfterWrite(
-            dfsClient.getConf().excludedNodesCacheExpiry,
-            TimeUnit.MILLISECONDS)
-        .removalListener(new RemovalListener<DatanodeInfo, DatanodeInfo>() {
-          @Override
-          public void onRemoval(
-              RemovalNotification<DatanodeInfo, DatanodeInfo> notification) {
-            DFSClient.LOG.info("Removing node " +
-                notification.getKey() + " from the excluded nodes list");
-          }
-        })
-        .build(new CacheLoader<DatanodeInfo, DatanodeInfo>() {
-          @Override
-          public DatanodeInfo load(DatanodeInfo key) throws Exception {
-            return key;
-          }
-        });
-    private String[] favoredNodes;
-    volatile boolean hasError = false;
-    volatile int errorIndex = -1;
-    // Restarting node index
-    AtomicInteger restartingNodeIndex = new AtomicInteger(-1);
-    private long restartDeadline = 0; // Deadline of DN restart
-    private BlockConstructionStage stage;  // block construction stage
-    private long bytesSent = 0; // number of bytes that've been sent
-    private final boolean isLazyPersistFile;
-
-    /** Nodes have been used in the pipeline before and have failed. */
-    private final List<DatanodeInfo> failed = new ArrayList<DatanodeInfo>();
-    /** The last ack sequence number before pipeline failure. */
-    private long lastAckedSeqnoBeforeFailure = -1;
-    private int pipelineRecoveryCount = 0;
-    /** Has the current block been hflushed? */
-    private boolean isHflushed = false;
-    /** Append on an existing block? */
-    private final boolean isAppend;
-
-    private DataStreamer(HdfsFileStatus stat, ExtendedBlock block) {
-      isAppend = false;
-      isLazyPersistFile = isLazyPersist(stat);
-      this.block = block;
-      stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
-    }
-    
-    /**
-     * Construct a data streamer for appending to the last partial block
-     * @param lastBlock last block of the file to be appended
-     * @param stat status of the file to be appended
-     * @param bytesPerChecksum number of bytes per checksum
-     * @throws IOException if error occurs
-     */
-    private DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat,
-        int bytesPerChecksum) throws IOException {
-      isAppend = true;
-      stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
-      block = lastBlock.getBlock();
-      bytesSent = block.getNumBytes();
-      accessToken = lastBlock.getBlockToken();
-      isLazyPersistFile = isLazyPersist(stat);
-      long usedInLastBlock = stat.getLen() % blockSize;
-      int freeInLastBlock = (int)(blockSize - usedInLastBlock);
-
-      // calculate the amount of free space in the pre-existing 
-      // last crc chunk
-      int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
-      int freeInCksum = bytesPerChecksum - usedInCksum;
-
-      // if there is space in the last block, then we have to 
-      // append to that block
-      if (freeInLastBlock == blockSize) {
-        throw new IOException("The last block for file " + 
-            src + " is full.");
-      }
-
-      if (usedInCksum > 0 && freeInCksum > 0) {
-        // if there is space in the last partial chunk, then 
-        // setup in such a way that the next packet will have only 
-        // one chunk that fills up the partial chunk.
-        //
-        computePacketChunkSize(0, freeInCksum);
-        setChecksumBufSize(freeInCksum);
-        appendChunk = true;
-      } else {
-        // if the remaining space in the block is smaller than 
-        // that expected size of of a packet, then create 
-        // smaller size packet.
-        //
-        computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock), 
-            bytesPerChecksum);
-      }
-
-      // setup pipeline to append to the last block XXX retries??
-      setPipeline(lastBlock);
-      errorIndex = -1;   // no errors yet.
-      if (nodes.length < 1) {
-        throw new IOException("Unable to retrieve blocks locations " +
-            " for last block " + block +
-            "of file " + src);
-
-      }
-    }
-
-    private void setPipeline(LocatedBlock lb) {
-      setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
-    }
-    private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
-        String[] storageIDs) {
-      this.nodes = nodes;
-      this.storageTypes = storageTypes;
-      this.storageIDs = storageIDs;
-    }
-
-    private void setFavoredNodes(String[] favoredNodes) {
-      this.favoredNodes = favoredNodes;
-    }
-
-    /**
-     * Initialize for data streaming
-     */
-    private void initDataStreaming() {
-      this.setName("DataStreamer for file " + src +
-          " block " + block);
-      response = new ResponseProcessor(nodes);
-      response.start();
-      stage = BlockConstructionStage.DATA_STREAMING;
-    }
-    
-    private void endBlock() {
-      if(DFSClient.LOG.isDebugEnabled()) {
-        DFSClient.LOG.debug("Closing old block " + block);
-      }
-      this.setName("DataStreamer for file " + src);
-      closeResponder();
-      closeStream();
-      setPipeline(null, null, null);
-      stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
-    }
-    
-    /*
-     * streamer thread is the only thread that opens streams to datanode, 
-     * and closes them. Any error recovery is also done by this thread.
-     */
-    @Override
-    public void run() {
-      long lastPacket = Time.monotonicNow();
-      TraceScope scope = NullScope.INSTANCE;
-      while (!streamerClosed && dfsClient.clientRunning) {
-        // if the Responder encountered an error, shutdown Responder
-        if (hasError && response != null) {
-          try {
-            response.close();
-            response.join();
-            response = null;
-          } catch (InterruptedException  e) {
-            DFSClient.LOG.warn("Caught exception ", e);
-          }
-        }
-
-        DFSPacket one;
-        try {
-          // process datanode IO errors if any
-          boolean doSleep = false;
-          if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) {
-            doSleep = processDatanodeError();
-          }
-
-          synchronized (dataQueue) {
-            // wait for a packet to be sent.
-            long now = Time.monotonicNow();
-            while ((!streamerClosed && !hasError && dfsClient.clientRunning 
-                && dataQueue.size() == 0 && 
-                (stage != BlockConstructionStage.DATA_STREAMING || 
-                 stage == BlockConstructionStage.DATA_STREAMING && 
-                 now - lastPacket < dfsClient.getConf().socketTimeout/2)) || doSleep ) {
-              long timeout = dfsClient.getConf().socketTimeout/2 - (now-lastPacket);
-              timeout = timeout <= 0 ? 1000 : timeout;
-              timeout = (stage == BlockConstructionStage.DATA_STREAMING)?
-                 timeout : 1000;
-              try {
-                dataQueue.wait(timeout);
-              } catch (InterruptedException  e) {
-                DFSClient.LOG.warn("Caught exception ", e);
-              }
-              doSleep = false;
-              now = Time.monotonicNow();
-            }
-            if (streamerClosed || hasError || !dfsClient.clientRunning) {
-              continue;
-            }
-            // get packet to be sent.
-            if (dataQueue.isEmpty()) {
-              one = createHeartbeatPacket();
-              assert one != null;
-            } else {
-              one = dataQueue.getFirst(); // regular data packet
-              long parents[] = one.getTraceParents();
-              if (parents.length > 0) {
-                scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0]));
-                // TODO: use setParents API once it's available from HTrace 3.2
-//                scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS);
-//                scope.getSpan().setParents(parents);
-              }
-            }
-          }
-
-          // get new block from namenode.
-          if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
-            if(DFSClient.LOG.isDebugEnabled()) {
-              DFSClient.LOG.debug("Allocating new block");
-            }
-            setPipeline(nextBlockOutputStream());
-            initDataStreaming();
-          } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
-            if(DFSClient.LOG.isDebugEnabled()) {
-              DFSClient.LOG.debug("Append to block " + block);
-            }
-            setupPipelineForAppendOrRecovery();
-            initDataStreaming();
-          }
-
-          long lastByteOffsetInBlock = one.getLastByteOffsetBlock();
-          if (lastByteOffsetInBlock > blockSize) {
-            throw new IOException("BlockSize " + blockSize +
-                " is smaller than data size. " +
-                " Offset of packet in block " + 
-                lastByteOffsetInBlock +
-                " Aborting file " + src);
-          }
-
-          if (one.isLastPacketInBlock()) {
-            // wait for all data packets have been successfully acked
-            synchronized (dataQueue) {
-              while (!streamerClosed && !hasError && 
-                  ackQueue.size() != 0 && dfsClient.clientRunning) {
-                try {
-                  // wait for acks to arrive from datanodes
-                  dataQueue.wait(1000);
-                } catch (InterruptedException  e) {
-                  DFSClient.LOG.warn("Caught exception ", e);
-                }
-              }
-            }
-            if (streamerClosed || hasError || !dfsClient.clientRunning) {
-              continue;
-            }
-            stage = BlockConstructionStage.PIPELINE_CLOSE;
-          }
-          
-          // send the packet
-          Span span = null;
-          synchronized (dataQueue) {
-            // move packet from dataQueue to ackQueue
-            if (!one.isHeartbeatPacket()) {
-              span = scope.detach();
-              one.setTraceSpan(span);
-              dataQueue.removeFirst();
-              ackQueue.addLast(one);
-              dataQueue.notifyAll();
-            }
-          }
-
-          if (DFSClient.LOG.isDebugEnabled()) {
-            DFSClient.LOG.debug("DataStreamer block " + block +
-                " sending packet " + one);
-          }
-
-          // write out data to remote datanode
-          TraceScope writeScope = Trace.startSpan("writeTo", span);
-          try {
-            one.writeTo(blockStream);
-            blockStream.flush();   
-          } catch (IOException e) {
-            // HDFS-3398 treat primary DN is down since client is unable to 
-            // write to primary DN. If a failed or restarting node has already
-            // been recorded by the responder, the following call will have no 
-            // effect. Pipeline recovery can handle only one node error at a
-            // time. If the primary node fails again during the recovery, it
-            // will be taken out then.
-            tryMarkPrimaryDatanodeFailed();
-            throw e;
-          } finally {
-            writeScope.close();
-          }
-          lastPacket = Time.monotonicNow();
-          
-          // update bytesSent
-          long tmpBytesSent = one.getLastByteOffsetBlock();
-          if (bytesSent < tmpBytesSent) {
-            bytesSent = tmpBytesSent;
-          }
-
-          if (streamerClosed || hasError || !dfsClient.clientRunning) {
-            continue;
-          }
-
-          // Is this block full?
-          if (one.isLastPacketInBlock()) {
-            // wait for the close packet has been acked
-            synchronized (dataQueue) {
-              while (!streamerClosed && !hasError && 
-                  ackQueue.size() != 0 && dfsClient.clientRunning) {
-                dataQueue.wait(1000);// wait for acks to arrive from datanodes
-              }
-            }
-            if (streamerClosed || hasError || !dfsClient.clientRunning) {
-              continue;
-            }
-
-            endBlock();
-          }
-          if (progress != null) { progress.progress(); }
-
-          // This is used by unit test to trigger race conditions.
-          if (artificialSlowdown != 0 && dfsClient.clientRunning) {
-            Thread.sleep(artificialSlowdown); 
-          }
-        } catch (Throwable e) {
-          // Log warning if there was a real error.
-          if (restartingNodeIndex.get() == -1) {
-            // Since their messages are descriptive enough, do not always
-            // log a verbose stack-trace WARN for quota exceptions.
-            if (e instanceof QuotaExceededException) {
-              DFSClient.LOG.debug("DataStreamer Quota Exception", e);
-            } else {
-              DFSClient.LOG.warn("DataStreamer Exception", e);
-            }
-          }
-          if (e instanceof IOException) {
-            setLastException((IOException)e);
-          } else {
-            setLastException(new IOException("DataStreamer Exception: ",e));
-          }
-          hasError = true;
-          if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
-            // Not a datanode issue
-            streamerClosed = true;
-          }
-        } finally {
-          scope.close();
-        }
-      }
-      closeInternal();
-    }
-
-    private void closeInternal() {
-      closeResponder();       // close and join
-      closeStream();
-      streamerClosed = true;
-      setClosed();
-      synchronized (dataQueue) {
-        dataQueue.notifyAll();
-      }
-    }
-
-    /*
-     * close both streamer and DFSOutputStream, should be called only 
-     * by an external thread and only after all data to be sent has 
-     * been flushed to datanode.
-     * 
-     * Interrupt this data streamer if force is true
-     * 
-     * @param force if this data stream is forced to be closed 
-     */
-    void close(boolean force) {
-      streamerClosed = true;
-      synchronized (dataQueue) {
-        dataQueue.notifyAll();
-      }
-      if (force) {
-        this.interrupt();
-      }
-    }
-
-    private void closeResponder() {
-      if (response != null) {
-        try {
-          response.close();
-          response.join();
-        } catch (InterruptedException  e) {
-          DFSClient.LOG.warn("Caught exception ", e);
-        } finally {
-          response = null;
-        }
-      }
-    }
-
-    private void closeStream() {
-      if (blockStream != null) {
-        try {
-          blockStream.close();
-        } catch (IOException e) {
-          setLastException(e);
-        } finally {
-          blockStream = null;
-        }
-      }
-      if (blockReplyStream != null) {
-        try {
-          blockReplyStream.close();
-        } catch (IOException e) {
-          setLastException(e);
-        } finally {
-          blockReplyStream = null;
-        }
-      }
-      if (null != s) {
-        try {
-          s.close();
-        } catch (IOException e) {
-          setLastException(e);
-        } finally {
-          s = null;
-        }
-      }
-    }
-
-    // The following synchronized methods are used whenever 
-    // errorIndex or restartingNodeIndex is set. This is because
-    // check & set needs to be atomic. Simply reading variables
-    // does not require a synchronization. When responder is
-    // not running (e.g. during pipeline recovery), there is no
-    // need to use these methods.
-
-    /** Set the error node index. Called by responder */
-    synchronized void setErrorIndex(int idx) {
-      errorIndex = idx;
-    }
-
-    /** Set the restarting node index. Called by responder */
-    synchronized void setRestartingNodeIndex(int idx) {
-      restartingNodeIndex.set(idx);
-      // If the data streamer has already set the primary node
-      // bad, clear it. It is likely that the write failed due to
-      // the DN shutdown. Even if it was a real failure, the pipeline
-      // recovery will take care of it.
-      errorIndex = -1;      
-    }
-
-    /**
-     * This method is used when no explicit error report was received,
-     * but something failed. When the primary node is a suspect or
-     * unsure about the cause, the primary node is marked as failed.
-     */
-    synchronized void tryMarkPrimaryDatanodeFailed() {
-      // There should be no existing error and no ongoing restart.
-      if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) {
-        errorIndex = 0;
-      }
-    }
-
-    /**
-     * Examine whether it is worth waiting for a node to restart.
-     * @param index the node index
-     */
-    boolean shouldWaitForRestart(int index) {
-      // Only one node in the pipeline.
-      if (nodes.length == 1) {
-        return true;
-      }
-
-      // Is it a local node?
-      InetAddress addr = null;
-      try {
-        addr = InetAddress.getByName(nodes[index].getIpAddr());
-      } catch (java.net.UnknownHostException e) {
-        // we are passing an ip address. this should not happen.
-        assert false;
-      }
-
-      if (addr != null && NetUtils.isLocalAddress(addr)) {
-        return true;
-      }
-      return false;
-    }
-
-    //
-    // Processes responses from the datanodes.  A packet is removed
-    // from the ackQueue when its response arrives.
-    //
-    private class ResponseProcessor extends Daemon {
-
-      private volatile boolean responderClosed = false;
-      private DatanodeInfo[] targets = null;
-      private boolean isLastPacketInBlock = false;
-
-      ResponseProcessor (DatanodeInfo[] targets) {
-        this.targets = targets;
-      }
-
-      @Override
-      public void run() {
-
-        setName("ResponseProcessor for block " + block);
-        PipelineAck ack = new PipelineAck();
-
-        TraceScope scope = NullScope.INSTANCE;
-        while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
-          // process responses from datanodes.
-          try {
-            // read an ack from the pipeline
-            long begin = Time.monotonicNow();
-            ack.readFields(blockReplyStream);
-            long duration = Time.monotonicNow() - begin;
-            if (duration > dfsclientSlowLogThresholdMs
-                && ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) {
-              DFSClient.LOG
-                  .warn("Slow ReadProcessor read fields took " + duration
-                      + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: "
-                      + ack + ", targets: " + Arrays.asList(targets));
-            } else if (DFSClient.LOG.isDebugEnabled()) {
-              DFSClient.LOG.debug("DFSClient " + ack);
-            }
-
-            long seqno = ack.getSeqno();
-            // processes response status from datanodes.
-            for (int i = ack.getNumOfReplies()-1; i >=0  && dfsClient.clientRunning; i--) {
-              final Status reply = PipelineAck.getStatusFromHeader(ack
-                .getReply(i));
-              // Restart will not be treated differently unless it is
-              // the local node or the only one in the pipeline.
-              if (PipelineAck.isRestartOOBStatus(reply) &&
-                  shouldWaitForRestart(i)) {
-                restartDeadline = dfsClient.getConf().datanodeRestartTimeout
-                    + Time.monotonicNow();
-                setRestartingNodeIndex(i);
-                String message = "A datanode is restarting: " + targets[i];
-                DFSClient.LOG.info(message);
-               throw new IOException(message);
-              }
-              // node error
-              if (reply != SUCCESS) {
-                setErrorIndex(i); // first bad datanode
-                throw new IOException("Bad response " + reply +
-                    " for block " + block +
-                    " from datanode " + 
-                    targets[i]);
-              }
-            }
-            
-            assert seqno != PipelineAck.UNKOWN_SEQNO : 
-              "Ack for unknown seqno should be a failed ack: " + ack;
-            if (seqno == DFSPacket.HEART_BEAT_SEQNO) {  // a heartbeat ack
-              continue;
-            }
-
-            // a success ack for a data packet
-            DFSPacket one;
-            synchronized (dataQueue) {
-              one = ackQueue.getFirst();
-            }
-            if (one.getSeqno() != seqno) {
-              throw new IOException("ResponseProcessor: Expecting seqno " +
-                                    " for block " + block +
-                                    one.getSeqno() + " but received " + seqno);
-            }
-            isLastPacketInBlock = one.isLastPacketInBlock();
-
-            // Fail the packet write for testing in order to force a
-            // pipeline recovery.
-            if (DFSClientFaultInjector.get().failPacket() &&
-                isLastPacketInBlock) {
-              failPacket = true;
-              throw new IOException(
-                    "Failing the last packet for testing.");
-            }
-              
-            // update bytesAcked
-            block.setNumBytes(one.getLastByteOffsetBlock());
-
-            synchronized (dataQueue) {
-              scope = Trace.continueSpan(one.getTraceSpan());
-              one.setTraceSpan(null);
-              lastAckedSeqno = seqno;
-              ackQueue.removeFirst();
-              dataQueue.notifyAll();
-
-              one.releaseBuffer(byteArrayManager);
-            }
-          } catch (Exception e) {
-            if (!responderClosed) {
-              if (e instanceof IOException) {
-                setLastException((IOException)e);
-              }
-              hasError = true;
-              // If no explicit error report was received, mark the primary
-              // node as failed.
-              tryMarkPrimaryDatanodeFailed();
-              synchronized (dataQueue) {
-                dataQueue.notifyAll();
-              }
-              if (restartingNodeIndex.get() == -1) {
-                DFSClient.LOG.warn("DFSOutputStream ResponseProcessor exception "
-                     + " for block " + block, e);
-              }
-              responderClosed = true;
-            }
-          } finally {
-            scope.close();
-          }
-        }
-      }
-
-      void close() {
-        responderClosed = true;
-        this.interrupt();
-      }
-    }
-
-    // If this stream has encountered any errors so far, shutdown 
-    // threads and mark stream as closed. Returns true if we should
-    // sleep for a while after returning from this call.
-    //
-    private boolean processDatanodeError() throws IOException {
-      if (response != null) {
-        DFSClient.LOG.info("Error Recovery for " + block +
-        " waiting for responder to exit. ");
-        return true;
-      }
-      closeStream();
-
-      // move packets from ack queue to front of the data queue
-      synchronized (dataQueue) {
-        dataQueue.addAll(0, ackQueue);
-        ackQueue.clear();
-      }
-
-      // Record the new pipeline failure recovery.
-      if (lastAckedSeqnoBeforeFailure != lastAckedSeqno) {
-         lastAckedSeqnoBeforeFailure = lastAckedSeqno;
-         pipelineRecoveryCount = 1;
-      } else {
-        // If we had to recover the pipeline five times in a row for the
-        // same packet, this client likely has corrupt data or corrupting
-        // during transmission.
-        if (++pipelineRecoveryCount > 5) {
-          DFSClient.LOG.warn("Error recovering pipeline for writing " +
-              block + ". Already retried 5 times for the same packet.");
-          lastException.set(new IOException("Failing write. Tried pipeline " +
-              "recovery 5 times without success."));
-          streamerClosed = true;
-          return false;
-        }
-      }
-      boolean doSleep = setupPipelineForAppendOrRecovery();
-      
-      if (!streamerClosed && dfsClient.clientRunning) {
-        if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
-
-          // If we had an error while closing the pipeline, we go through a fast-path
-          // where the BlockReceiver does not run. Instead, the DataNode just finalizes
-          // the block immediately during the 'connect ack' process. So, we want to pull
-          // the end-of-block packet from the dataQueue, since we don't actually have
-          // a true pipeline to send it over.
-          //
-          // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that
-          // a client waiting on close() will be aware that the flush finished.
-          synchronized (dataQueue) {
-            DFSPacket endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
-            Span span = endOfBlockPacket.getTraceSpan();
-            if (span != null) {
-              // Close any trace span associated with this Packet
-              TraceScope scope = Trace.continueSpan(span);
-              scope.close();
-            }
-            assert endOfBlockPacket.isLastPacketInBlock();
-            assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
-            lastAckedSeqno = endOfBlockPacket.getSeqno();
-            dataQueue.notifyAll();
-          }
-          endBlock();
-        } else {
-          initDataStreaming();
-        }
-      }
-      
-      return doSleep;
-    }
-
-    private void setHflush() {
-      isHflushed = true;
-    }
-
-    private int findNewDatanode(final DatanodeInfo[] original
-        ) throws IOException {
-      if (nodes.length != original.length + 1) {
-        throw new IOException(
-            new StringBuilder()
-            .append("Failed to replace a bad datanode on the existing pipeline ")
-            .append("due to no more good datanodes being available to try. ")
-            .append("(Nodes: current=").append(Arrays.asList(nodes))
-            .append(", original=").append(Arrays.asList(original)).append("). ")
-            .append("The current failed datanode replacement policy is ")
-            .append(dfsClient.dtpReplaceDatanodeOnFailure).append(", and ")
-            .append("a client may configure this via '")
-            .append(DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY)
-            .append("' in its configuration.")
-            .toString());
-      }
-      for(int i = 0; i < nodes.length; i++) {
-        int j = 0;
-        for(; j < original.length && !nodes[i].equals(original[j]); j++);
-        if (j == original.length) {
-          return i;
-        }
-      }
-      throw new IOException("Failed: new datanode not found: nodes="
-          + Arrays.asList(nodes) + ", original=" + Arrays.asList(original));
-    }
-
-    private void addDatanode2ExistingPipeline() throws IOException {
-      if (DataTransferProtocol.LOG.isDebugEnabled()) {
-        DataTransferProtocol.LOG.debug("lastAckedSeqno = " + lastAckedSeqno);
-      }
-      /*
-       * Is data transfer necessary?  We have the following cases.
-       * 
-       * Case 1: Failure in Pipeline Setup
-       * - Append
-       *    + Transfer the stored replica, which may be a RBW or a finalized.
-       * - Create
-       *    + If no data, then no transfer is required.
-       *    + If there are data written, transfer RBW. This case may happens 
-       *      when there are streaming failure earlier in this pipeline.
-       *
-       * Case 2: Failure in Streaming
-       * - Append/Create:
-       *    + transfer RBW
-       * 
-       * Case 3: Failure in Close
-       * - Append/Create:
-       *    + no transfer, let NameNode replicates the block.
-       */
-      if (!isAppend && lastAckedSeqno < 0
-          && stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
-        //no data have been written
-        return;
-      } else if (stage == BlockConstructionStage.PIPELINE_CLOSE
-          || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
-        //pipeline is closing
-        return;
-      }
-
-      //get a new datanode
-      final DatanodeInfo[] original = nodes;
-      final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
-          src, fileId, block, nodes, storageIDs,
-          failed.toArray(new DatanodeInfo[failed.size()]),
-          1, dfsClient.clientName);
-      setPipeline(lb);
-
-      //find the new datanode
-      final int d = findNewDatanode(original);
-
-      //transfer replica
-      final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
-      final DatanodeInfo[] targets = {nodes[d]};
-      final StorageType[] targetStorageTypes = {storageTypes[d]};
-      transfer(src, targets, targetStorageTypes, lb.getBlockToken());
-    }
-
-    private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
-        final StorageType[] targetStorageTypes,
-        final Token<BlockTokenIdentifier> blockToken) throws IOException {
-      //transfer replica to the new datanode
-      Socket sock = null;
-      DataOutputStream out = null;
-      DataInputStream in = null;
-      try {
-        sock = createSocketForPipeline(src, 2, dfsClient);
-        final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
-        
-        OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
-        InputStream unbufIn = NetUtils.getInputStream(sock);
-        IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
-          unbufOut, unbufIn, dfsClient, blockToken, src);
-        unbufOut = saslStreams.out;
-        unbufIn = saslStreams.in;
-        out = new DataOutputStream(new BufferedOutputStream(unbufOut,
-            HdfsConstants.SMALL_BUFFER_SIZE));
-        in = new DataInputStream(unbufIn);
-
-        //send the TRANSFER_BLOCK request
-        new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
-            targets, targetStorageTypes);
-        out.flush();
-
-        //ack
-        BlockOpResponseProto response =
-          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
-        if (SUCCESS != response.getStatus()) {
-          throw new IOException("Failed to add a datanode");
-        }
-      } finally {
-        IOUtils.closeStream(in);
-        IOUtils.closeStream(out);
-        IOUtils.closeSocket(sock);
-      }
-    }
-
-    /**
-     * Open a DataOutputStream to a DataNode pipeline so that 
-     * it can be written to.
-     * This happens when a file is appended or data streaming fails
-     * It keeps on trying until a pipeline is setup
-     */
-    private boolean setupPipelineForAppendOrRecovery() throws IOException {
-      // check number of datanodes
-      if (nodes == null || nodes.length == 0) {
-        String msg = "Could not get block locations. " + "Source file \""
-            + src + "\" - Aborting...";
-        DFSClient.LOG.warn(msg);
-        setLastException(new IOException(msg));
-        streamerClosed = true;
-        return false;
-      }
-      
-      boolean success = false;
-      long newGS = 0L;
-      while (!success && !streamerClosed && dfsClient.clientRunning) {
-        // Sleep before reconnect if a dn is restarting.
-        // This process will be repeated until the deadline or the datanode
-        // starts back up.
-        if (restartingNodeIndex.get() >= 0) {
-          // 4 seconds or the configured deadline period, whichever is shorter.
-          // This is the retry interval and recovery will be retried in this
-          // interval until timeout or success.
-          long delay = Math.min(dfsClient.getConf().datanodeRestartTimeout,
-              4000L);
-          try {
-            Thread.sleep(delay);
-          } catch (InterruptedException ie) {
-            lastException.set(new IOException("Interrupted while waiting for " +
-                "datanode to restart. " + nodes[restartingNodeIndex.get()]));
-            streamerClosed = true;
-            return false;
-          }
-        }
-        boolean isRecovery = hasError;
-        // remove bad datanode from list of datanodes.
-        // If errorIndex was not set (i.e. appends), then do not remove 
-        // any datanodes
-        // 
-        if (errorIndex >= 0) {
-          StringBuilder pipelineMsg = new StringBuilder();
-          for (int j = 0; j < nodes.length; j++) {
-            pipelineMsg.append(nodes[j]);
-            if (j < nodes.length - 1) {
-              pipelineMsg.append(", ");
-            }
-          }
-          if (nodes.length <= 1) {
-            lastException.set(new IOException("All datanodes " + pipelineMsg
-                + " are bad. Aborting..."));
-            streamerClosed = true;
-            return false;
-          }
-          DFSClient.LOG.warn("Error Recovery for block " + block +
-              " in pipeline " + pipelineMsg + 
-              ": bad datanode " + nodes[errorIndex]);
-          failed.add(nodes[errorIndex]);
-
-          DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
-          arraycopy(nodes, newnodes, errorIndex);
-
-          final StorageType[] newStorageTypes = new StorageType[newnodes.length];
-          arraycopy(storageTypes, newStorageTypes, errorIndex);
-
-          final String[] newStorageIDs = new String[newnodes.length];
-          arraycopy(storageIDs, newStorageIDs, errorIndex);
-          
-          setPipeline(newnodes, newStorageTypes, newStorageIDs);
-
-          // Just took care of a node error while waiting for a node restart
-          if (restartingNodeIndex.get() >= 0) {
-            // If the error came from a node further away than the restarting
-            // node, the restart must have been complete.
-            if (errorIndex > restartingNodeIndex.get()) {
-              restartingNodeIndex.set(-1);
-            } else if (errorIndex < restartingNodeIndex.get()) {
-              // the node index has shifted.
-              restartingNodeIndex.decrementAndGet();
-            } else {
-              // this shouldn't happen...
-              assert false;
-            }
-          }
-
-          if (restartingNodeIndex.get() == -1) {
-            hasError = false;
-          }
-          lastException.set(null);
-          errorIndex = -1;
-        }
-
-        // Check if replace-datanode policy is satisfied.
-        if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(blockReplication,
-            nodes, isAppend, isHflushed)) {
-          try {
-            addDatanode2ExistingPipeline();
-          } catch(IOException ioe) {
-            if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) {
-              throw ioe;
-            }
-            DFSClient.LOG.warn("Failed to replace datanode."
-                + " Continue with the remaining datanodes since "
-                + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY
-                + " is set to true.", ioe);
-          }
-        }
-
-        // get a new generation stamp and an access token
-        LocatedBlock lb = dfsClient.namenode.updateBlockForPipeline(block, dfsClient.clientName);
-        newGS = lb.getBlock().getGenerationStamp();
-        accessToken = lb.getBlockToken();
-        
-        // set up the pipeline again with the remaining nodes
-        if (failPacket) { // for testing
-          success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
-          failPacket = false;
-          try {
-            // Give DNs time to send in bad reports. In real situations,
-            // good reports should follow bad ones, if client committed
-            // with those nodes.
-            Thread.sleep(2000);
-          } catch (InterruptedException ie) {}
-        } else {
-          success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
-        }
-
-        if (restartingNodeIndex.get() >= 0) {
-          assert hasError == true;
-          // check errorIndex set above
-          if (errorIndex == restartingNodeIndex.get()) {
-            // ignore, if came from the restarting node
-            errorIndex = -1;
-          }
-          // still within the deadline
-          if (Time.monotonicNow() < restartDeadline) {
-            continue; // with in the deadline
-          }
-          // expired. declare the restarting node dead
-          restartDeadline = 0;
-          int expiredNodeIndex = restartingNodeIndex.get();
-          restartingNodeIndex.set(-1);
-          DFSClient.LOG.warn("Datanode did not restart in time: " +
-              nodes[expiredNodeIndex]);
-          // Mark the restarting node as failed. If there is any other failed
-          // node during the last pipeline construction attempt, it will not be
-          // overwritten/dropped. In this case, the restarting node will get
-          // excluded in the following attempt, if it still does not come up.
-          if (errorIndex == -1) {
-            errorIndex = expiredNodeIndex;
-          }
-          // From this point on, normal pipeline recovery applies.
-        }
-      } // while
-
-      if (success) {
-        // update pipeline at the namenode
-        ExtendedBlock newBlock = new ExtendedBlock(
-            block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
-        dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock,
-            nodes, storageIDs);
-        // update client side generation stamp
-        block = newBlock;
-      }
-      return false; // do not sleep, continue processing
-    }
-
-    /**
-     * Open a DataOutputStream to a DataNode so that it can be written to.
-     * This happens when a file is created and each time a new block is allocated.
-     * Must get block ID and the IDs of the destinations from the namenode.
-     * Returns the list of target datanodes.
-     */
-    private LocatedBlock nextBlockOutputStream() throws IOException {
-      LocatedBlock lb = null;
-      DatanodeInfo[] nodes = null;
-      StorageType[] storageTypes = null;
-      int count = dfsClient.getConf().nBlockWriteRetry;
-      boolean success = false;
-      ExtendedBlock oldBlock = block;
-      do {
-        hasError = false;
-        lastException.set(null);
-        errorIndex = -1;
-        success = false;
-
-        DatanodeInfo[] excluded =
-            excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
-            .keySet()
-            .toArray(new DatanodeInfo[0]);
-        block = oldBlock;
-        lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
-        block = lb.getBlock();
-        block.setNumBytes(0);
-        bytesSent = 0;
-        accessToken = lb.getBlockToken();
-        nodes = lb.getLocations();
-        storageTypes = lb.getStorageTypes();
-
-        //
-        // Connect to first DataNode in the list.
-        //
-        success = createBlockOutputStream(nodes, storageTypes, 0L, false);
-
-        if (!success) {
-          DFSClient.LOG.info("Abandoning " + block);
-          dfsClient.namenode.abandonBlock(block, fileId, src,
-              dfsClient.clientName);
-          block = null;
-          DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
-          excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
-        }
-      } while (!success && --count >= 0);
-
-      if (!success) {
-        throw new IOException("Unable to create new block.");
-      }
-      return lb;
-    }
-
-    // connects to the first datanode in the pipeline
-    // Returns true if success, otherwise return failure.
-    //
-    private boolean createBlockOutputStream(DatanodeInfo[] nodes,
-        StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
-      if (nodes.length == 0) {
-        DFSClient.LOG.info("nodes are empty for write pipeline of block "
-            + block);
-        return false;
-      }
-      Status pipelineStatus = SUCCESS;
-      String firstBadLink = "";
-      boolean checkRestart = false;
-      if (DFSClient.LOG.isDebugEnabled()) {
-        for (int i = 0; i < nodes.length; i++) {
-          DFSClient.LOG.debug("pipeline = " + nodes[i]);
-        }
-      }
-
-      // persist blocks on namenode on next flush
-      persistBlocks.set(true);
-
-      int refetchEncryptionKey = 1;
-      while (true) {
-        boolean result = false;
-        DataOutputStream out = null;
-        try {
-          assert null == s : "Previous socket unclosed";
-          assert null == blockReplyStream : "Previous blockReplyStream unclosed";
-          s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
-          long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
-          
-          OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
-          InputStream unbufIn = NetUtils.getInputStream(s);
-          IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s,
-            unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
-          unbufOut = saslStreams.out;
-          unbufIn = saslStreams.in;
-          out = new DataOutputStream(new BufferedOutputStream(unbufOut,
-              HdfsConstants.SMALL_BUFFER_SIZE));
-          blockReplyStream = new DataInputStream(unbufIn);
-  
-          //
-          // Xmit header info to datanode
-          //
-  
-          BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): stage;
-
-          // We cannot change the block length in 'block' as it counts the number
-          // of bytes ack'ed.
-          ExtendedBlock blockCopy = new ExtendedBlock(block);
-          blockCopy.setNumBytes(blockSize);
-
-          boolean[] targetPinnings = getPinnings(nodes, true);
-          // send the request
-          new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken,
-              dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
-              nodes.length, block.getNumBytes(), bytesSent, newGS,
-              checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile,
-            (targetPinnings == null ? false : targetPinnings[0]), targetPinnings);
-  
-          // receive ack for connect
-          BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
-              PBHelper.vintPrefixed(blockReplyStream));
-          pipelineStatus = resp.getStatus();
-          firstBadLink = resp.getFirstBadLink();
-          
-          // Got an restart OOB ack.
-          // If a node is already restarting, this status is not likely from
-          // the same node. If it is from a different node, it is not
-          // from the local datanode. Thus it is safe to treat this as a
-          // regular node error.
-          if (PipelineAck.isRestartOOBStatus(pipelineStatus) &&
-            restartingNodeIndex.get() == -1) {
-            checkRestart = true;
-            throw new IOException("A datanode is restarting.");
-          }
-
-          String logInfo = "ack with firstBadLink as " + firstBadLink;
-          DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
-
-          assert null == blockStream : "Previous blockStream unclosed";
-          blockStream = out;
-          result =  true; // success
-          restartingNodeIndex.set(-1);
-          hasError = false;
-        } catch (IOException ie) {
-          if (restartingNodeIndex.get() == -1) {
-            DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
-          }
-          if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
-            DFSClient.LOG.info("Will fetch a new encryption key and retry, " 
-                + "encryption key was invalid when connecting to "
-                + nodes[0] + " : " + ie);
-            // The encryption key used is invalid.
-            refetchEncryptionKey--;
-            dfsClient.clearDataEncryptionKey();
-            // Don't close the socket/exclude this node just yet. Try again with
-            // a new encryption key.
-            continue;
-          }
-  
-          // find the datanode that matches
-          if (firstBadLink.length() != 0) {
-            for (int i = 0; i < nodes.length; i++) {
-              // NB: Unconditionally using the xfer addr w/o hostname
-              if (firstBadLink.equals(nodes[i].getXferAddr())) {
-                errorIndex = i;
-                break;
-              }
-            }
-          } else {
-            assert checkRestart == false;
-            errorIndex = 0;
-          }
-          // Check whether there is a restart worth waiting for.
-          if (checkRestart && shouldWaitForRestart(errorIndex)) {
-            restartDeadline = dfsClient.getConf().datanodeRestartTimeout +
-                Time.monotonicNow();
-            restartingNodeIndex.set(errorIndex);
-            errorIndex = -1;
-            DFSClient.LOG.info("Waiting for the datanode to be restarted: " +
-                nodes[restartingNodeIndex.get()]);
-          }
-          hasError = true;
-          setLastException(ie);
-          result =  false;  // error
-        } finally {
-          if (!result) {
-            IOUtils.closeSocket(s);
-            s = null;
-            IOUtils.closeStream(out);
-            out = null;
-            IOUtils.closeStream(blockReplyStream);
-            blockReplyStream = null;
-          }
-        }
-        return result;
-      }
-    }
-
-    private boolean[] getPinnings(DatanodeInfo[] nodes, boolean shouldLog) {
-      if (favoredNodes == null) {
-        return null;
-      } else {
-        boolean[] pinnings = new boolean[nodes.length];
-        HashSet<String> favoredSet =
-            new HashSet<String>(Arrays.asList(favoredNodes));
-        for (int i = 0; i < nodes.length; i++) {
-          pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname());
-          if (DFSClient.LOG.isDebugEnabled()) {
-            DFSClient.LOG.debug(nodes[i].getXferAddrWithHostname() +
-                " was chosen by name node (favored=" + pinnings[i] +
-                ").");
-          }
-        }
-        if (shouldLog && !favoredSet.isEmpty()) {
-          // There is one or more favored nodes that were not allocated.
-          DFSClient.LOG.warn(
-              "These favored nodes were specified but not chosen: " +
-              favoredSet +
-              " Specified favored nodes: " + Arrays.toString(favoredNodes));
-
-        }
-        return pinnings;
-      }
-    }
-
-    private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)  throws IOException {
-      int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
-      long sleeptime = dfsClient.getConf().
-          blockWriteLocateFollowingInitialDelayMs;
-      while (true) {
-        long localstart = Time.monotonicNow();
-        while (true) {
-          try {
-            return dfsClient.namenode.addBlock(src, dfsClient.clientName,
-                block, excludedNodes, fileId, favoredNodes);
-          } catch (RemoteException e) {
-            IOException ue = 
-              e.unwrapRemoteException(FileNotFoundException.class,
-                                      AccessControlException.class,
-                                      NSQuotaExceededException.class,
-                                      DSQuotaExceededException.class,
-                                      UnresolvedPathException.class);
-            if (ue != e) { 
-              throw ue; // no need to retry these exceptions
-            }
-            
-            
-            if (NotReplicatedYetException.class.getName().
-                equals(e.getClassName())) {
-              if (retries == 0) { 
-                throw e;
-              } else {
-                --retries;
-                DFSClient.LOG.info("Exception while adding a block", e);
-                long elapsed = Time.monotonicNow() - localstart;
-                if (elapsed > 5000) {
-                  DFSClient.LOG.info("Waiting for replication for "
-                      + (elapsed / 1000) + " seconds");
-                }
-                try {
-                  DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
-                      + " retries left " + retries);
-                  Thread.sleep(sleeptime);
-                  sleeptime *= 2;
-                } catch (InterruptedException ie) {
-                  DFSClient.LOG.warn("Caught exception ", ie);
-                }
-              }
-            } else {
-              throw e;
-            }
-
-          }
-        }
-      } 
-    }
-
-    ExtendedBlock getBlock() {
-      return block;
-    }
-
-    DatanodeInfo[] getNodes() {
-      return nodes;
-    }
-
-    Token<BlockTokenIdentifier> getBlockToken() {
-      return accessToken;
-    }
-
-    private void setLastException(IOException e) {
-      lastException.compareAndSet(null, e);
-    }
-  }
-
-  /**
-   * Create a socket for a write pipeline
-   * @param first the first datanode 
-   * @param length the pipeline length
-   * @param client client
-   * @return the socket connected to the first datanode
-   */
-  static Socket createSocketForPipeline(final DatanodeInfo first,
-      final int length, final DFSClient client) throws IOException {
-    final String dnAddr = first.getXferAddr(
-        client.getConf().connectToDnViaHostname);
-    if (DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
-    }
-    final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
-    final Socket sock = client.socketFactory.createSocket();
-    final int timeout = client.getDatanodeReadTimeout(length);
-    NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
-    sock.setSoTimeout(timeout);
-    sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
-    if(DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
-    }
-    return sock;
-  }
-
   @Override
   protected void checkClosed() throws IOException {
     if (isClosed()) {
-      IOException e = lastException.get();
+      IOException e = streamer.getLastException().get();
       throw e != null ? e : new ClosedChannelException();
     }
   }
@@ -1536,7 +148,7 @@ public class DFSOutputStream extends FSOutputSummer
   //
   @VisibleForTesting
   public synchronized DatanodeInfo[] getPipeline() {
-    if (streamer == null) {
+    if (streamer.streamerClosed()) {
       return null;
     }
     DatanodeInfo[] currentNodes = streamer.getNodes();
@@ -1556,7 +168,7 @@ public class DFSOutputStream extends FSOutputSummer
    */
   private static DataChecksum getChecksum4Compute(DataChecksum checksum,
       HdfsFileStatus stat) {
-    if (isLazyPersist(stat) && stat.getReplication() == 1) {
+    if (DataStreamer.isLazyPersist(stat) && stat.getReplication() == 1) {
       // do not compute checksum for writing to single replica to memory
       return DataChecksum.newDataChecksum(Type.NULL,
           checksum.getBytesPerChecksum());
@@ -1573,7 +185,6 @@ public class DFSOutputStream extends FSOutputSummer
     this.blockSize = stat.getBlockSize();
     this.blockReplication = stat.getReplication();
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
-    this.progress = progress;
     this.cachingStrategy = new AtomicReference<CachingStrategy>(
         dfsClient.getDefaultWriteCachingStrategy());
     if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
@@ -1591,10 +202,6 @@ public class DFSOutputStream extends FSOutputSummer
           + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
           + ") must divide block size (=" + blockSize + ").");
     }
-    this.checksum4WriteBlock = checksum;
-
-    this.dfsclientSlowLogThresholdMs =
-      dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
     this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
   }
 
@@ -1607,7 +214,8 @@ public class DFSOutputStream extends FSOutputSummer
 
     computePacketChunkSize(dfsClient.getConf().writePacketSize, bytesPerChecksum);
 
-    streamer = new DataStreamer(stat, null);
+    streamer = new DataStreamer(stat, null, dfsClient, src, progress, checksum,
+        cachingStrategy, byteArrayManager);
     if (favoredNodes != null && favoredNodes.length != 0) {
       streamer.setFavoredNodes(favoredNodes);
     }
@@ -1676,18 +284,57 @@ public class DFSOutputStream extends FSOutputSummer
     this(dfsClient, src, progress, stat, checksum);
     initialFileSize = stat.getLen(); // length of file when opened
 
+    this.fileEncryptionInfo = stat.getFileEncryptionInfo();
+
     // The last partial block of the file has to be filled.
     if (!toNewBlock && lastBlock != null) {
       // indicate that we are appending to an existing block
-      bytesCurBlock = lastBlock.getBlockSize();
-      streamer = new DataStreamer(lastBlock, stat, bytesPerChecksum);
+      streamer = new DataStreamer(lastBlock, stat, dfsClient, src, progress, checksum,
+          cachingStrategy, byteArrayManager);
+      streamer.setBytesCurBlock(lastBlock.getBlockSize());
+      adjustPacketChunkSize(stat);
+      streamer.setPipelineInConstruction(lastBlock);
     } else {
       computePacketChunkSize(dfsClient.getConf().writePacketSize,
           bytesPerChecksum);
-      streamer = new DataStreamer(stat,
-          lastBlock != null ? lastBlock.getBlock() : null);
+      streamer = new DataStreamer(stat, lastBlock != null ? lastBlock.getBlock() : null,
+          dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager);
+    }
+  }
+
+  private void adjustPacketChunkSize(HdfsFileStatus stat) throws IOException{
+
+    long usedInLastBlock = stat.getLen() % blockSize;
+    int freeInLastBlock = (int)(blockSize - usedInLastBlock);
+
+    // calculate the amount of free space in the pre-existing
+    // last crc chunk
+    int usedInCksum = (int)(stat.getLen() % bytesPerChecksum);
+    int freeInCksum = bytesPerChecksum - usedInCksum;
+
+    // if there is space in the last block, then we have to
+    // append to that block
+    if (freeInLastBlock == blockSize) {
+      throw new IOException("The last block for file " +
+          src + " is full.");
+    }
+
+    if (usedInCksum > 0 && freeInCksum > 0) {
+      // if there is space in the last partial chunk, then
+      // setup in such a way that the next packet will have only
+      // one chunk that fills up the partial chunk.
+      //
+      computePacketChunkSize(0, freeInCksum);
+      setChecksumBufSize(freeInCksum);
+      streamer.setAppendChunk(true);
+    } else {
+      // if the remaining space in the block is smaller than
+      // that expected size of of a packet, then create
+      // smaller size packet.
+      //
+      computePacketChunkSize(Math.min(dfsClient.getConf().writePacketSize, freeInLastBlock),
+          bytesPerChecksum);
     }
-    this.fileEncryptionInfo = stat.getFileEncryptionInfo();
   }
 
   static DFSOutputStream newStreamForAppend(DFSClient dfsClient, String src,
@@ -1708,12 +355,6 @@ public class DFSOutputStream extends FSOutputSummer
       scope.close();
     }
   }
-  
-  private static boolean isLazyPersist(HdfsFileStatus stat) {
-    final BlockStoragePolicy p = blockStoragePolicySuite.getPolicy(
-        HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
-    return p != null && stat.getStoragePolicy() == p.getId();
-  }
 
   private void computePacketChunkSize(int psize, int csize) {
     final int bodySize = psize - PacketHeader.PKT_MAX_HEADER_LEN;
@@ -1728,62 +369,6 @@ public class DFSOutputStream extends FSOutputSummer
     }
   }
 
-  private void queueCurrentPacket() {
-    synchronized (dataQueue) {
-      if (currentPacket == null) return;
-      currentPacket.addTraceParent(Trace.currentSpan());
-      dataQueue.addLast(currentPacket);
-      lastQueuedSeqno = currentPacket.getSeqno();
-      if (DFSClient.LOG.isDebugEnabled()) {
-        DFSClient.LOG.debug("Queued packet " + currentPacket.getSeqno());
-      }
-      currentPacket = null;
-      dataQueue.notifyAll();
-    }
-  }
-
-  private void waitAndQueueCurrentPacket() throws IOException {
-    synchronized (dataQueue) {
-      try {
-      // If queue is full, then wait till we have enough space
-        boolean firstWait = true;
-        try {
-          while (!isClosed() && dataQueue.size() + ackQueue.size() >
-              dfsClient.getConf().writeMaxPackets) {
-            if (firstWait) {
-              Span span = Trace.currentSpan();
-              if (span != null) {
-                span.addTimelineAnnotation("dataQueue.wait");
-              }
-              firstWait = false;
-            }
-            try {
-              dataQueue.wait();
-            } catch (InterruptedException e) {
-              // If we get interrupted while waiting to queue data, we still need to get rid
-              // of the current packet. This is because we have an invariant that if
-              // currentPacket gets full, it will get queued before the next writeChunk.
-              //
-              // Rather than wait around for space in the queue, we should instead try to
-              // return to the caller as soon as possible, even though we slightly overrun
-              // the MAX_PACKETS length.
-              Thread.currentThread().interrupt();
-              break;
-            }
-          }
-        } finally {
-          Span span = Trace.currentSpan();
-          if ((span != null) && (!firstWait)) {
-            span.addTimelineAnnotation("end.wait");
-          }
-        }
-        checkClosed();
-        queueCurrentPacket();
-      } catch (ClosedChannelException e) {
-      }
-    }
-  }
-
   // @see FSOutputSummer#writeChunk()
   @Override
   protected synchronized void writeChunk(byte[] b, int offset, int len,
@@ -1814,57 +399,62 @@ public class DFSOutputStream extends FSOutputSummer
 
     if (currentPacket == null) {
       currentPacket = createPacket(packetSize, chunksPerPacket, 
-          bytesCurBlock, currentSeqno++, false);
+          streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk allocating new packet seqno=" + 
             currentPacket.getSeqno() +
             ", src=" + src +
             ", packetSize=" + packetSize +
             ", chunksPerPacket=" + chunksPerPacket +
-            ", bytesCurBlock=" + bytesCurBlock);
+            ", bytesCurBlock=" + streamer.getBytesCurBlock());
       }
     }
 
     currentPacket.writeChecksum(checksum, ckoff, cklen);
     currentPacket.writeData(b, offset, len);
     currentPacket.incNumChunks();
-    bytesCurBlock += len;
+    streamer.incBytesCurBlock(len);
 
     // If packet is full, enqueue it for transmission
     //
     if (currentPacket.getNumChunks() == currentPacket.getMaxChunks() ||
-        bytesCurBlock == blockSize) {
+        streamer.getBytesCurBlock() == blockSize) {
       if (DFSClient.LOG.isDebugEnabled()) {
         DFSClient.LOG.debug("DFSClient writeChunk packet full seqno=" +
             currentPacket.getSeqno() +
             ", src=" + src +
-            ", bytesCurBlock=" + bytesCurBlock +
+            ", bytesCurBlock=" + streamer.getBytesCurBlock() +
             ", blockSize=" + blockSize +
-            ", appendChunk=" + appendChunk);
+            ", appendChunk=" + streamer.getAppendChunk());
       }
-      waitAndQueueCurrentPacket();
+      streamer.waitAndQueuePacket(currentPacket);
+      currentPacket = null;
 
       // If the reopened file did not end at chunk boundary and the above
       // write filled up its partial chunk. Tell the summer to generate full 
       // crc chunks from now on.
-      if (appendChunk && bytesCurBlock%bytesPerChecksum == 0) {
-        appendChunk = false;
+      if (streamer.getAppendChunk() &&
+          streamer.getBytesCurBlock() % bytesPerChecksum == 0) {
+        streamer.setAppendChunk(false);
         resetChecksumBufSize();
       }
 
-      if (!appendChunk) {
-        int psize = Math.min((int)(blockSize-bytesCurBlock), dfsClient.getConf().writePacketSize);
+      if (!streamer.getAppendChunk()) {
+        int psize = Math.min((int)(blockSize-streamer.getBytesCurBlock()),
+            dfsClient.getConf().writePacketSize);
         computePacketChunkSize(psize, bytesPerChecksum);
       }
       //
       // if encountering a block boundary, send an empty packet to 
       // indicate the end of block and reset bytesCurBlock.
       //
-      if (bytesCurBlock == blockSize) {
-        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+      if (streamer.getBytesCurBlock() == blockSize) {
+        currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
+            streamer.getAndIncCurrentSeqno(), true);
         currentPacket.setSyncBlock(shouldSyncBlock);
-        waitAndQueueCurrentPacket();
-        bytesCurBlock = 0;
+        streamer.waitAndQueuePacket(currentPacket);
+        currentPacket = null;
+        streamer.setBytesCurBlock(0);
         lastFlushOffset = 0;
       }
     }
@@ -1954,30 +544,30 @@ public class DFSOutputStream extends FSOutputSummer
 
         if (DFSClient.LOG.isDebugEnabled()) {
           DFSClient.LOG.debug("DFSClient flush(): "
-              + " bytesCurBlock=" + bytesCurBlock
+              + " bytesCurBlock=" + streamer.getBytesCurBlock()
               + " lastFlushOffset=" + lastFlushOffset
               + " createNewBlock=" + endBlock);
         }
         // Flush only if we haven't already flushed till this offset.
-        if (lastFlushOffset != bytesCurBlock) {
-          assert bytesCurBlock > lastFlushOffset;
+        if (lastFlushOffset != streamer.getBytesCurBlock()) {
+          assert streamer.getBytesCurBlock() > lastFlushOffset;
           // record the valid offset of this flush
-          lastFlushOffset = bytesCurBlock;
+          lastFlushOffset = streamer.getBytesCurBlock();
           if (isSync && currentPacket == null && !endBlock) {
             // Nothing to send right now,
             // but sync was requested.
             // Send an empty packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                bytesCurBlock, currentSeqno++, false);
+                streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
           }
         } else {
-          if (isSync && bytesCurBlock > 0 && !endBlock) {
+          if (isSync && streamer.getBytesCurBlock() > 0 && !endBlock) {
             // Nothing to send right now,
             // and the block was partially written,
             // and sync was requested.
             // So send an empty sync packet if we do not end the block right now
             currentPacket = createPacket(packetSize, chunksPerPacket,
-                bytesCurBlock, currentSeqno++, false);
+                streamer.getBytesCurBlock(), streamer.getAndIncCurrentSeqno(), false);
           } else if (currentPacket != null) {
             // just discard the current packet since it is already been sent.
             currentPacket.releaseBuffer(byteArrayManager);
@@ -1986,39 +576,42 @@ public class DFSOutputStream extends FSOutputSummer
         }
         if (currentPacket != null) {
           currentPacket.setSyncBlock(isSync);
-          waitAndQueueCurrentPacket();          
+          streamer.waitAndQueuePacket(currentPacket);
+          currentPacket = null;
         }
-        if (endBlock && bytesCurBlock > 0) {
+        if (endBlock && streamer.getBytesCurBlock() > 0) {
           // Need to end the current block, thus send an empty packet to
           // indicate this is the end of the block and reset bytesCurBlock
-          currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+          currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
+              streamer.getAndIncCurrentSeqno(), true);
           currentPacket.setSyncBlock(shouldSyncBlock || isSync);
-          waitAndQueueCurrentPacket();
-          bytesCurBlock = 0;
+          streamer.waitAndQueuePacket(currentPacket);
+          currentPacket = null;
+          streamer.setBytesCurBlock(0);
           lastFlushOffset = 0;
         } else {
           // Restore state of stream. Record the last flush offset
           // of the last full chunk that was flushed.
-          bytesCurBlock -= numKept;
+          streamer.setBytesCurBlock(streamer.getBytesCurBlock() - numKept);
         }
 
-        toWaitFor = lastQueuedSeqno;
+        toWaitFor = streamer.getLastQueuedSeqno();
       } // end synchronized
 
-      waitForAckedSeqno(toWaitFor);
+      streamer.waitForAckedSeqno(toWaitFor);
 
       // update the block length first time irrespective of flag
-      if (updateLength || persistBlocks.get()) {
+      if (updateLength || streamer.getPersistBlocks().get()) {
         synchronized (this) {
-          if (streamer != null && streamer.block != null) {
-            lastBlockLength = streamer.block.getNumBytes();
+          if (!streamer.streamerClosed() && streamer.getBlock() != null) {
+            lastBlockLength = streamer.getBlock().getNumBytes();
           }
         }
       }
       // If 1) any new blocks were allocated since the last flush, or 2) to
       // update length in NN is required, then persist block locations on
       // namenode.
-      if (persistBlocks.getAndSet(false) || updateLength) {
+      if (streamer.getPersistBlocks().getAndSet(false) || updateLength) {
         try {
           dfsClient.namenode.fsync(src, fileId, dfsClient.clientName,
               lastBlockLength);
@@ -2035,7 +628,7 @@ public class DFSOutputStream extends FSOutputSummer
       }
 
       synchronized(this) {
-        if (streamer != null) {
+        if (!streamer.streamerClosed()) {
           streamer.setHflush();
         }
       }
@@ -2048,7 +641,7 @@ public class DFSOutputStream extends FSOutputSummer
       DFSClient.LOG.warn("Error while syncing", e);
       synchronized (this) {
         if (!isClosed()) {
-          lastException.set(new IOException("IOException flush: " + e));
+          streamer.getLastException().set(new IOException("IOException flush: " + e));
           closeThreads(true);
         }
       }
@@ -2073,7 +666,7 @@ public class DFSOutputStream extends FSOutputSummer
   public synchronized int getCurrentBlockReplication() throws IOException {
     dfsClient.checkOpen();
     checkClosed();
-    if (streamer == null) {
+    if (streamer.streamerClosed()) {
       return blockReplication; // no pipeline, return repl factor of file
     }
     DatanodeInfo[] currentNodes = streamer.getNodes();
@@ -2095,47 +688,12 @@ public class DFSOutputStream extends FSOutputSummer
       //
       // If there is data in the current buffer, send it across
       //
-      queueCurrentPacket();
-      toWaitFor = lastQueuedSeqno;
+      streamer.queuePacket(currentPacket);
+      currentPacket = null;
+      toWaitFor = streamer.getLastQueuedSeqno();
     }
 
-    waitForAckedSeqno(toWaitFor);
-  }
-
-  private void waitForAckedSeqno(long seqno) throws IOException {
-    TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
-    try {
-      if (DFSClient.LOG.isDebugEnabled()) {
-        DFSClient.LOG.debug("Waiting for ack for: " + seqno);
-      }
-      long begin = Time.monotonicNow();
-      try {
-        synchronized (dataQueue) {
-          while (!isClosed()) {
-            checkClosed();
-            if (lastAckedSeqno >= seqno) {
-              break;
-            }
-            try {
-              dataQueue.wait(1000); // when we receive an ack, we notify on
-              // dataQueue
-            } catch (InterruptedException ie) {
-              throw new InterruptedIOException(
-                  "Interrupted while waiting for data to be acknowledged by pipeline");
-            }
-          }
-        }
-        checkClosed();
-      } catch (ClosedChannelException e) {
-      }
-      long duration = Time.monotonicNow() - begin;
-      if (duration > dfsclientSlowLogThresholdMs) {
-        DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
-            + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
-      }
-    } finally {
-      scope.close();
-    }
+    streamer.waitForAckedSeqno(toWaitFor);
   }
 
   private synchronized void start() {
@@ -2157,22 +715,12 @@ public class DFSOutputStream extends FSOutputSummer
   }
 
   boolean isClosed() {
-    return closed;
+    return closed || streamer.streamerClosed();
   }
 
   void setClosed() {
     closed = true;
-    synchronized (dataQueue) {
-      releaseBuffer(dataQueue, byteArrayManager);
-      releaseBuffer(ackQueue, byteArrayManager);
-    }
-  }
-  
-  private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
-    for (DFSPacket p : packets) {
-      p.releaseBuffer(bam);
-    }
-    packets.clear();
+    streamer.release();
   }
 
   // shutdown datastreamer and responseprocessor threads.
@@ -2181,14 +729,11 @@ public class DFSOutputStream extends FSOutputSummer
     try {
       streamer.close(force);
       streamer.join();
-      if (s != null) {
-        s.close();
-      }
+      streamer.closeSocket();
     } catch (InterruptedException e) {
       throw new IOException("Failed to shutdown streamer");
     } finally {
-      streamer = null;
-      s = null;
+      streamer.setSocketToNull();
       setClosed();
     }
   }
@@ -2210,7 +755,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   private synchronized void closeImpl() throws IOException {
     if (isClosed()) {
-      IOException e = lastException.getAndSet(null);
+      IOException e = streamer.getLastException().getAndSet(null);
       if (e == null)
         return;
       else
@@ -2221,12 +766,14 @@ public class DFSOutputStream extends FSOutputSummer
       flushBuffer();       // flush from all upper layers
 
       if (currentPacket != null) { 
-        waitAndQueueCurrentPacket();
+        streamer.waitAndQueuePacket(currentPacket);
+        currentPacket = null;
       }
 
-      if (bytesCurBlock != 0) {
+      if (streamer.getBytesCurBlock() != 0) {
         // send an empty packet to mark the end of the block
-        currentPacket = createPacket(0, 0, bytesCurBlock, currentSeqno++, true);
+        currentPacket = createPacket(0, 0, streamer.getBytesCurBlock(),
+            streamer.getAndIncCurrentSeqno(), true);
         currentPacket.setSyncBlock(shouldSyncBlock);
       }
 
@@ -2261,7 +808,7 @@ public class DFSOutputStream extends FSOutputSummer
       if (!fileComplete) {
         final int hdfsTimeout = dfsClient.getHdfsTimeout();
         if (!dfsClient.clientRunning
-            || (hdfsTimeout > 0 
+            || (hdfsTimeout > 0
                 && localstart + hdfsTimeout < Time.monotonicNow())) {
             String msg = "Unable to close file because dfsclient " +
                           " was unable to contact the HDFS servers." +
@@ -2290,7 +837,7 @@ public class DFSOutputStream extends FSOutputSummer
 
   @VisibleForTesting
   public void setArtificialSlowdown(long period) {
-    artificialSlowdown = period;
+    streamer.setArtificialSlowdown(period);
   }
 
   @VisibleForTesting
@@ -2299,10 +846,6 @@ public class DFSOutputStream extends FSOutputSummer
     packetSize = (bytesPerChecksum + getChecksumSize()) * chunksPerPacket;
   }
 
-  synchronized void setTestFilename(String newname) {
-    src = newname;
-  }
-
   /**
    * Returns the size of a file as it was when this stream was opened
    */
@@ -2345,9 +888,4 @@ public class DFSOutputStream extends FSOutputSummer
   public long getFileId() {
     return fileId;
   }
-
-  private static <T> void arraycopy(T[] srcs, T[] dsts, int skipIndex) {
-    System.arraycopy(srcs, 0, dsts, 0, skipIndex);
-    System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, dsts.length-skipIndex);
-  }
 }


[14/50] [abbrv] hadoop git commit: HDFS-7960. The full block report should prune zombie storages even if they're not empty. Contributed by Colin McCabe and Eddy Xu.

Posted by zj...@apache.org.
HDFS-7960. The full block report should prune zombie storages even if they're not empty. Contributed by Colin McCabe and Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d360bbc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d360bbc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d360bbc

Branch: refs/heads/YARN-2928
Commit: 4d360bbcab2e0bbc054c394c7b50b54dbe6d3234
Parents: 524987c
Author: Andrew Wang <wa...@apache.org>
Authored: Mon Mar 23 22:00:34 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../DatanodeProtocolClientSideTranslatorPB.java |   5 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  15 +++
 .../server/blockmanagement/BlockManager.java    |  53 +++++++-
 .../blockmanagement/DatanodeDescriptor.java     |  51 ++++++-
 .../blockmanagement/DatanodeStorageInfo.java    |  13 +-
 .../hdfs/server/datanode/BPServiceActor.java    |  34 +++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 +-
 .../server/protocol/BlockReportContext.java     |  52 +++++++
 .../hdfs/server/protocol/DatanodeProtocol.java  |  10 +-
 .../src/main/proto/DatanodeProtocol.proto       |  14 ++
 .../hdfs/protocol/TestBlockListAsLongs.java     |   7 +-
 .../blockmanagement/TestBlockManager.java       |   8 +-
 .../TestNameNodePrunesMissingStorages.java      | 135 ++++++++++++++++++-
 .../server/datanode/BlockReportTestBase.java    |   4 +-
 .../server/datanode/TestBPOfferService.java     |  10 +-
 .../TestBlockHasMultipleReplicasOnSameDN.java   |   4 +-
 .../datanode/TestDataNodeVolumeFailure.java     |   3 +-
 .../TestDatanodeProtocolRetryPolicy.java        |   4 +-
 ...TestDnRespectsBlockReportSplitThreshold.java |   7 +-
 .../TestNNHandlesBlockReportPerStorage.java     |   7 +-
 .../TestNNHandlesCombinedBlockReport.java       |   4 +-
 .../server/datanode/TestTriggerBlockReport.java |   7 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../hdfs/server/namenode/ha/TestDNFencing.java  |   4 +-
 27 files changed, 433 insertions(+), 49 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d2891e3..3dd5fb3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1241,6 +1241,9 @@ Release 2.7.0 - UNRELEASED
     provided by the client is larger than the one stored in the datanode.
     (Brahma Reddy Battula via szetszwo)
 
+    HDFS-7960. The full block report should prune zombie storages even if
+    they're not empty. (cmccabe and Eddy Xu via wang)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index c4003f1..825e835 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -169,7 +170,8 @@ public class DatanodeProtocolClientSideTranslatorPB implements
 
   @Override
   public DatanodeCommand blockReport(DatanodeRegistration registration,
-      String poolId, StorageBlockReport[] reports) throws IOException {
+      String poolId, StorageBlockReport[] reports, BlockReportContext context)
+        throws IOException {
     BlockReportRequestProto.Builder builder = BlockReportRequestProto
         .newBuilder().setRegistration(PBHelper.convert(registration))
         .setBlockPoolId(poolId);
@@ -191,6 +193,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
       }
       builder.addReports(reportBuilder.build());
     }
+    builder.setContext(PBHelper.convert(context));
     BlockReportResponseProto resp;
     try {
       resp = rpcProxy.blockReport(NULL_CONTROLLER, builder.build());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
index e18081f..873eb6d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
@@ -161,7 +161,9 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     }
     try {
       cmd = impl.blockReport(PBHelper.convert(request.getRegistration()),
-          request.getBlockPoolId(), report);
+          request.getBlockPoolId(), report,
+          request.hasContext() ?
+              PBHelper.convert(request.getContext()) : null);
     } catch (IOException e) {
       throw new ServiceException(e);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index fad1d2c..b841850 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rollin
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
@@ -123,6 +124,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHe
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.VolumeFailureSummaryProto;
+import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReportContextProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -194,6 +196,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockIdCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.CheckpointCommand;
@@ -3009,4 +3012,16 @@ public class PBHelper {
     return targetPinnings;
   }
 
+  public static BlockReportContext convert(BlockReportContextProto proto) {
+    return new BlockReportContext(proto.getTotalRpcs(),
+        proto.getCurRpc(), proto.getId());
+  }
+
+  public static BlockReportContextProto convert(BlockReportContext context) {
+    return BlockReportContextProto.newBuilder().
+        setTotalRpcs(context.getTotalRpcs()).
+        setCurRpc(context.getCurRpc()).
+        setId(context.getReportId()).
+        build();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 674c0ea..91cfead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -1770,7 +1771,8 @@ public class BlockManager {
    */
   public boolean processReport(final DatanodeID nodeID,
       final DatanodeStorage storage,
-      final BlockListAsLongs newReport) throws IOException {
+      final BlockListAsLongs newReport, BlockReportContext context,
+      boolean lastStorageInRpc) throws IOException {
     namesystem.writeLock();
     final long startTime = Time.monotonicNow(); //after acquiring write lock
     final long endTime;
@@ -1809,6 +1811,29 @@ public class BlockManager {
       }
       
       storageInfo.receivedBlockReport();
+      if (context != null) {
+        storageInfo.setLastBlockReportId(context.getReportId());
+        if (lastStorageInRpc) {
+          int rpcsSeen = node.updateBlockReportContext(context);
+          if (rpcsSeen >= context.getTotalRpcs()) {
+            List<DatanodeStorageInfo> zombies = node.removeZombieStorages();
+            if (zombies.isEmpty()) {
+              LOG.debug("processReport 0x{}: no zombie storages found.",
+                  Long.toHexString(context.getReportId()));
+            } else {
+              for (DatanodeStorageInfo zombie : zombies) {
+                removeZombieReplicas(context, zombie);
+              }
+            }
+            node.clearBlockReportContext();
+          } else {
+            LOG.debug("processReport 0x{}: {} more RPCs remaining in this " +
+                    "report.", Long.toHexString(context.getReportId()),
+                (context.getTotalRpcs() - rpcsSeen)
+            );
+          }
+        }
+      }
     } finally {
       endTime = Time.monotonicNow();
       namesystem.writeUnlock();
@@ -1833,6 +1858,32 @@ public class BlockManager {
     return !node.hasStaleStorages();
   }
 
+  private void removeZombieReplicas(BlockReportContext context,
+      DatanodeStorageInfo zombie) {
+    LOG.warn("processReport 0x{}: removing zombie storage {}, which no " +
+             "longer exists on the DataNode.",
+              Long.toHexString(context.getReportId()), zombie.getStorageID());
+    assert(namesystem.hasWriteLock());
+    Iterator<BlockInfoContiguous> iter = zombie.getBlockIterator();
+    int prevBlocks = zombie.numBlocks();
+    while (iter.hasNext()) {
+      BlockInfoContiguous block = iter.next();
+      // We assume that a block can be on only one storage in a DataNode.
+      // That's why we pass in the DatanodeDescriptor rather than the
+      // DatanodeStorageInfo.
+      // TODO: remove this assumption in case we want to put a block on
+      // more than one storage on a datanode (and because it's a difficult
+      // assumption to really enforce)
+      removeStoredBlock(block, zombie.getDatanodeDescriptor());
+      invalidateBlocks.remove(zombie.getDatanodeDescriptor(), block);
+    }
+    assert(zombie.numBlocks() == 0);
+    LOG.warn("processReport 0x{}: removed {} replicas from storage {}, " +
+            "which no longer exists on the DataNode.",
+            Long.toHexString(context.getReportId()), prevBlocks,
+            zombie.getStorageID());
+  }
+
   /**
    * Rescan the list of blocks which were previously postponed.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 3f143e7..d0d7a72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import java.util.ArrayList;
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
@@ -31,6 +32,7 @@ import java.util.Set;
 
 import com.google.common.annotations.VisibleForTesting;
 
+import com.google.common.collect.ImmutableList;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -40,6 +42,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary;
@@ -62,7 +65,25 @@ public class DatanodeDescriptor extends DatanodeInfo {
   // Stores status of decommissioning.
   // If node is not decommissioning, do not use this object for anything.
   public final DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
-  
+
+  private long curBlockReportId = 0;
+
+  private BitSet curBlockReportRpcsSeen = null;
+
+  public int updateBlockReportContext(BlockReportContext context) {
+    if (curBlockReportId != context.getReportId()) {
+      curBlockReportId = context.getReportId();
+      curBlockReportRpcsSeen = new BitSet(context.getTotalRpcs());
+    }
+    curBlockReportRpcsSeen.set(context.getCurRpc());
+    return curBlockReportRpcsSeen.cardinality();
+  }
+
+  public void clearBlockReportContext() {
+    curBlockReportId = 0;
+    curBlockReportRpcsSeen = null;
+  }
+
   /** Block and targets pair */
   @InterfaceAudience.Private
   @InterfaceStability.Evolving
@@ -282,6 +303,34 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
   }
 
+  static final private List<DatanodeStorageInfo> EMPTY_STORAGE_INFO_LIST =
+      ImmutableList.of();
+
+  List<DatanodeStorageInfo> removeZombieStorages() {
+    List<DatanodeStorageInfo> zombies = null;
+    synchronized (storageMap) {
+      Iterator<Map.Entry<String, DatanodeStorageInfo>> iter =
+          storageMap.entrySet().iterator();
+      while (iter.hasNext()) {
+        Map.Entry<String, DatanodeStorageInfo> entry = iter.next();
+        DatanodeStorageInfo storageInfo = entry.getValue();
+        if (storageInfo.getLastBlockReportId() != curBlockReportId) {
+          LOG.info(storageInfo.getStorageID() + " had lastBlockReportId 0x" +
+              Long.toHexString(storageInfo.getLastBlockReportId()) +
+              ", but curBlockReportId = 0x" +
+              Long.toHexString(curBlockReportId));
+          iter.remove();
+          if (zombies == null) {
+            zombies = new LinkedList<DatanodeStorageInfo>();
+          }
+          zombies.add(storageInfo);
+        }
+        storageInfo.setLastBlockReportId(0);
+      }
+    }
+    return zombies == null ? EMPTY_STORAGE_INFO_LIST : zombies;
+  }
+
   /**
    * Remove block from the list of blocks belonging to the data-node. Remove
    * data-node from the block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index c4612a3..be16a87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -115,6 +115,9 @@ public class DatanodeStorageInfo {
   private volatile BlockInfoContiguous blockList = null;
   private int numBlocks = 0;
 
+  // The ID of the last full block report which updated this storage.
+  private long lastBlockReportId = 0;
+
   /** The number of block reports received */
   private int blockReportCount = 0;
 
@@ -178,7 +181,15 @@ public class DatanodeStorageInfo {
     this.remaining = remaining;
     this.blockPoolUsed = blockPoolUsed;
   }
-  
+
+  long getLastBlockReportId() {
+    return lastBlockReportId;
+  }
+
+  void setLastBlockReportId(long lastBlockReportId) {
+    this.lastBlockReportId = lastBlockReportId;
+  }
+
   State getState() {
     return this.state;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index 90f2fe6..10cce45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -434,6 +435,17 @@ class BPServiceActor implements Runnable {
     return sendImmediateIBR;
   }
 
+  private long prevBlockReportId = 0;
+
+  private long generateUniqueBlockReportId() {
+    long id = System.nanoTime();
+    if (id <= prevBlockReportId) {
+      id = prevBlockReportId + 1;
+    }
+    prevBlockReportId = id;
+    return id;
+  }
+
   /**
    * Report the list blocks to the Namenode
    * @return DatanodeCommands returned by the NN. May be null.
@@ -476,11 +488,13 @@ class BPServiceActor implements Runnable {
     int numRPCs = 0;
     boolean success = false;
     long brSendStartTime = monotonicNow();
+    long reportId = generateUniqueBlockReportId();
     try {
       if (totalBlockCount < dnConf.blockReportSplitThreshold) {
         // Below split threshold, send all reports in a single message.
         DatanodeCommand cmd = bpNamenode.blockReport(
-            bpRegistration, bpos.getBlockPoolId(), reports);
+            bpRegistration, bpos.getBlockPoolId(), reports,
+              new BlockReportContext(1, 0, reportId));
         numRPCs = 1;
         numReportsSent = reports.length;
         if (cmd != null) {
@@ -488,10 +502,11 @@ class BPServiceActor implements Runnable {
         }
       } else {
         // Send one block report per message.
-        for (StorageBlockReport report : reports) {
-          StorageBlockReport singleReport[] = { report };
+        for (int r = 0; r < reports.length; r++) {
+          StorageBlockReport singleReport[] = { reports[r] };
           DatanodeCommand cmd = bpNamenode.blockReport(
-              bpRegistration, bpos.getBlockPoolId(), singleReport);
+              bpRegistration, bpos.getBlockPoolId(), singleReport,
+              new BlockReportContext(reports.length, r, reportId));
           numReportsSent++;
           numRPCs++;
           if (cmd != null) {
@@ -507,11 +522,12 @@ class BPServiceActor implements Runnable {
       dn.getMetrics().addBlockReport(brSendCost);
       final int nCmds = cmds.size();
       LOG.info((success ? "S" : "Uns") +
-          "uccessfully sent " + numReportsSent +
-          " of " + reports.length +
-          " blockreports for " + totalBlockCount +
-          " total blocks using " + numRPCs +
-          " RPCs. This took " + brCreateCost +
+          "uccessfully sent block report 0x" +
+          Long.toHexString(reportId) + ",  containing " + reports.length +
+          " storage report(s), of which we sent " + numReportsSent + "." +
+          " The reports had " + totalBlockCount +
+          " total blocks and used " + numRPCs +
+          " RPC(s). This took " + brCreateCost +
           " msec to generate and " + brSendCost +
           " msecs for RPC and NN processing." +
           " Got back " +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 059bd28..1788335 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -121,6 +121,7 @@ import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1292,7 +1293,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
 
   @Override // DatanodeProtocol
   public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
-      String poolId, StorageBlockReport[] reports) throws IOException {
+        String poolId, StorageBlockReport[] reports,
+        BlockReportContext context) throws IOException {
     checkNNStartup();
     verifyRequest(nodeReg);
     if(blockStateChangeLog.isDebugEnabled()) {
@@ -1301,14 +1303,15 @@ class NameNodeRpcServer implements NamenodeProtocols {
     }
     final BlockManager bm = namesystem.getBlockManager(); 
     boolean noStaleStorages = false;
-    for(StorageBlockReport r : reports) {
-      final BlockListAsLongs blocks = r.getBlocks();
+    for (int r = 0; r < reports.length; r++) {
+      final BlockListAsLongs blocks = reports[r].getBlocks();
       //
       // BlockManager.processReport accumulates information of prior calls
       // for the same node and storage, so the value returned by the last
       // call of this loop is the final updated value for noStaleStorage.
       //
-      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks);
+      noStaleStorages = bm.processReport(nodeReg, reports[r].getStorage(),
+          blocks, context, (r == reports.length - 1));
       metrics.incrStorageBlockReportOps();
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
new file mode 100644
index 0000000..a084a81
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockReportContext.java
@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocol;
+
+/**
+ * The context of the block report.
+ *
+ * This is a set of fields that the Datanode sends to provide context about a
+ * block report RPC.  The context includes a unique 64-bit ID which
+ * identifies the block report as a whole.  It also includes the total number
+ * of RPCs which this block report is split into, and the index into that
+ * total for the current RPC.
+ */
+public class BlockReportContext {
+  private final int totalRpcs;
+  private final int curRpc;
+  private final long reportId;
+
+  public BlockReportContext(int totalRpcs, int curRpc, long reportId) {
+    this.totalRpcs = totalRpcs;
+    this.curRpc = curRpc;
+    this.reportId = reportId;
+  }
+
+  public int getTotalRpcs() {
+    return totalRpcs;
+  }
+
+  public int getCurRpc() {
+    return curRpc;
+  }
+
+  public long getReportId() {
+    return reportId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 047de56..a3b6004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -23,7 +23,6 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -128,20 +127,23 @@ public interface DatanodeProtocol {
    *     Each finalized block is represented as 3 longs. Each under-
    *     construction replica is represented as 4 longs.
    *     This is done instead of Block[] to reduce memory used by block reports.
-   *     
+   * @param reports report of blocks per storage
+   * @param context Context information for this block report.
+   *
    * @return - the next command for DN to process.
    * @throws IOException
    */
   @Idempotent
   public DatanodeCommand blockReport(DatanodeRegistration registration,
-      String poolId, StorageBlockReport[] reports) throws IOException;
+            String poolId, StorageBlockReport[] reports,
+            BlockReportContext context) throws IOException;
     
 
   /**
    * Communicates the complete list of locally cached blocks to the NameNode.
    * 
    * This method is similar to
-   * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[])},
+   * {@link #blockReport(DatanodeRegistration, String, StorageBlockReport[], BlockReportContext)},
    * which is used to communicated blocks stored on disk.
    *
    * @param            The datanode registration.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
index 7b3a4a9..3083dc9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
@@ -224,11 +224,25 @@ message HeartbeatResponseProto {
  *                second long represents length
  *                third long represents gen stamp
  *                fourth long (if under construction) represents replica state
+ * context      - An optional field containing information about the context
+ *                of this block report.
  */
 message BlockReportRequestProto {
   required DatanodeRegistrationProto registration = 1;
   required string blockPoolId = 2;
   repeated StorageBlockReportProto reports = 3;
+  optional BlockReportContextProto context = 4;
+}
+
+message BlockReportContextProto  {
+  // The total number of RPCs this block report is broken into.
+  required int32 totalRpcs = 1;
+
+  // The index of the current RPC (zero-based)
+  required int32 curRpc = 2;
+
+  // The unique 64-bit ID of this block report
+  required int64 id = 3;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
index bebde18..f0dab4c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockListAsLongs.java
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.Replica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
@@ -219,7 +220,8 @@ public class TestBlockListAsLongs {
     // check DN sends new-style BR
     request.set(null);
     nsInfo.setCapabilities(Capability.STORAGE_BLOCK_REPORT_BUFFERS.getMask());
-    nn.blockReport(reg, "pool", sbr);
+    nn.blockReport(reg, "pool", sbr,
+        new BlockReportContext(1, 0, System.nanoTime()));
     BlockReportRequestProto proto = request.get();
     assertNotNull(proto);
     assertTrue(proto.getReports(0).getBlocksList().isEmpty());
@@ -228,7 +230,8 @@ public class TestBlockListAsLongs {
     // back up to prior version and check DN sends old-style BR
     request.set(null);
     nsInfo.setCapabilities(Capability.UNKNOWN.getMask());
-    nn.blockReport(reg, "pool", sbr);
+    nn.blockReport(reg, "pool", sbr,
+        new BlockReportContext(1, 0, System.nanoTime()));
     proto = request.get();
     assertNotNull(proto);
     assertFalse(proto.getReports(0).getBlocksList().isEmpty());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index d9ac9e5..707c780 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -555,12 +555,12 @@ public class TestBlockManager {
     reset(node);
     
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
     // send block report again, should NOT be processed
     reset(node);
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
 
     // re-register as if node restarted, should update existing node
@@ -571,7 +571,7 @@ public class TestBlockManager {
     // send block report, should be processed after restart
     reset(node);
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-                     BlockListAsLongs.EMPTY);
+                     BlockListAsLongs.EMPTY, null, false);
     // Reinitialize as registration with empty storage list pruned
     // node.storageMap.
     ds = node.getStorageInfos()[0];
@@ -600,7 +600,7 @@ public class TestBlockManager {
     reset(node);
     doReturn(1).when(node).numBlocks();
     bm.processReport(node, new DatanodeStorage(ds.getStorageID()),
-        BlockListAsLongs.EMPTY);
+        BlockListAsLongs.EMPTY, null, false);
     assertEquals(1, ds.getBlockReportCount());
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index b67ae7a..4b97d01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -18,26 +18,40 @@
 
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import com.google.common.base.Supplier;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.commons.math3.stat.inference.TestUtils;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
 
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertThat;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertEquals;
 
 
 public class TestNameNodePrunesMissingStorages {
@@ -110,7 +124,9 @@ public class TestNameNodePrunesMissingStorages {
   }
 
   /**
-   * Verify that the NameNode does not prune storages with blocks.
+   * Verify that the NameNode does not prune storages with blocks
+   * simply as a result of a heartbeat being sent missing that storage.
+   *
    * @throws IOException
    */
   @Test (timeout=300000)
@@ -118,4 +134,119 @@ public class TestNameNodePrunesMissingStorages {
     // Run the test with 1 storage, after the text still expect 1 storage.
     runTest(GenericTestUtils.getMethodName(), true, 1, 1);
   }
+
+  /**
+   * Regression test for HDFS-7960.<p/>
+   *
+   * Shutting down a datanode, removing a storage directory, and restarting
+   * the DataNode should not produce zombie storages.
+   */
+  @Test(timeout=300000)
+  public void testRemovingStorageDoesNotProduceZombies() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
+    final int NUM_STORAGES_PER_DN = 2;
+    final MiniDFSCluster cluster = new MiniDFSCluster
+        .Builder(conf).numDataNodes(3)
+        .storagesPerDatanode(NUM_STORAGES_PER_DN)
+        .build();
+    try {
+      cluster.waitActive();
+      for (DataNode dn : cluster.getDataNodes()) {
+        assertEquals(NUM_STORAGES_PER_DN,
+          cluster.getNamesystem().getBlockManager().
+              getDatanodeManager().getDatanode(dn.getDatanodeId()).
+              getStorageInfos().length);
+      }
+      // Create a file which will end up on all 3 datanodes.
+      final Path TEST_PATH = new Path("/foo1");
+      DistributedFileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, TEST_PATH, 1024, (short) 3, 0xcafecafe);
+      for (DataNode dn : cluster.getDataNodes()) {
+        DataNodeTestUtils.triggerBlockReport(dn);
+      }
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
+      cluster.getNamesystem().writeLock();
+      final String storageIdToRemove;
+      String datanodeUuid;
+      // Find the first storage which this block is in.
+      try {
+        Iterator<DatanodeStorageInfo> storageInfoIter =
+            cluster.getNamesystem().getBlockManager().
+                getStorages(block.getLocalBlock()).iterator();
+        assertTrue(storageInfoIter.hasNext());
+        DatanodeStorageInfo info = storageInfoIter.next();
+        storageIdToRemove = info.getStorageID();
+        datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
+      } finally {
+        cluster.getNamesystem().writeUnlock();
+      }
+      // Find the DataNode which holds that first storage.
+      final DataNode datanodeToRemoveStorageFrom;
+      int datanodeToRemoveStorageFromIdx = 0;
+      while (true) {
+        if (datanodeToRemoveStorageFromIdx >= cluster.getDataNodes().size()) {
+          Assert.fail("failed to find datanode with uuid " + datanodeUuid);
+          datanodeToRemoveStorageFrom = null;
+          break;
+        }
+        DataNode dn = cluster.getDataNodes().
+            get(datanodeToRemoveStorageFromIdx);
+        if (dn.getDatanodeUuid().equals(datanodeUuid)) {
+          datanodeToRemoveStorageFrom = dn;
+          break;
+        }
+        datanodeToRemoveStorageFromIdx++;
+      }
+      // Find the volume within the datanode which holds that first storage.
+      List<? extends FsVolumeSpi> volumes =
+          datanodeToRemoveStorageFrom.getFSDataset().getVolumes();
+      assertEquals(NUM_STORAGES_PER_DN, volumes.size());
+      String volumeDirectoryToRemove = null;
+      for (FsVolumeSpi volume : volumes) {
+        if (volume.getStorageID().equals(storageIdToRemove)) {
+          volumeDirectoryToRemove = volume.getBasePath();
+        }
+      }
+      // Shut down the datanode and remove the volume.
+      // Replace the volume directory with a regular file, which will
+      // cause a volume failure.  (If we merely removed the directory,
+      // it would be re-initialized with a new storage ID.)
+      assertNotNull(volumeDirectoryToRemove);
+      datanodeToRemoveStorageFrom.shutdown();
+      FileUtil.fullyDelete(new File(volumeDirectoryToRemove));
+      FileOutputStream fos = new FileOutputStream(volumeDirectoryToRemove);
+      try {
+        fos.write(1);
+      } finally {
+        fos.close();
+      }
+      cluster.restartDataNode(datanodeToRemoveStorageFromIdx);
+      // Wait for the NameNode to remove the storage.
+      LOG.info("waiting for the datanode to remove " + storageIdToRemove);
+      GenericTestUtils.waitFor(new Supplier<Boolean>() {
+        @Override
+        public Boolean get() {
+          final DatanodeDescriptor dnDescriptor =
+              cluster.getNamesystem().getBlockManager().getDatanodeManager().
+                  getDatanode(datanodeToRemoveStorageFrom.getDatanodeUuid());
+          assertNotNull(dnDescriptor);
+          DatanodeStorageInfo[] infos = dnDescriptor.getStorageInfos();
+          for (DatanodeStorageInfo info : infos) {
+            if (info.getStorageID().equals(storageIdToRemove)) {
+              LOG.info("Still found storage " + storageIdToRemove + " on " +
+                  info + ".");
+              return false;
+            }
+          }
+          assertEquals(NUM_STORAGES_PER_DN - 1, infos.length);
+          return true;
+        }
+      }, 10, 30000);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
index de66db5..c4a2d06 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
@@ -613,7 +614,8 @@ public abstract class BlockReportTestBase {
         .when(spy).blockReport(
           Mockito.<DatanodeRegistration>anyObject(),
           Mockito.anyString(),
-          Mockito.<StorageBlockReport[]>anyObject());
+          Mockito.<StorageBlockReport[]>anyObject(),
+          Mockito.<BlockReportContext>anyObject());
 
       // Force a block report to be generated. The block report will have
       // an RBW replica in it. Wait for the RPC to be sent, but block

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
index bc49793..3aa9a7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -216,7 +217,8 @@ public class TestBPOfferService {
         .when(mockNN2).blockReport(
             Mockito.<DatanodeRegistration>anyObject(),  
             Mockito.eq(FAKE_BPID),
-            Mockito.<StorageBlockReport[]>anyObject());
+            Mockito.<StorageBlockReport[]>anyObject(),
+            Mockito.<BlockReportContext>anyObject());
 
     bpos.start();
     try {
@@ -406,7 +408,8 @@ public class TestBPOfferService {
           Mockito.verify(mockNN).blockReport(
               Mockito.<DatanodeRegistration>anyObject(),  
               Mockito.eq(FAKE_BPID),
-              Mockito.<StorageBlockReport[]>anyObject());
+              Mockito.<StorageBlockReport[]>anyObject(),
+              Mockito.<BlockReportContext>anyObject());
           return true;
         } catch (Throwable t) {
           LOG.info("waiting on block report: " + t.getMessage());
@@ -431,7 +434,8 @@ public class TestBPOfferService {
           Mockito.verify(mockNN).blockReport(
                   Mockito.<DatanodeRegistration>anyObject(),
                   Mockito.eq(FAKE_BPID),
-                  Mockito.<StorageBlockReport[]>anyObject());
+                  Mockito.<StorageBlockReport[]>anyObject(),
+                  Mockito.<BlockReportContext>anyObject());
           return true;
         } catch (Throwable t) {
           LOG.info("waiting on block report: " + t.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
index 3238d6a..c47209e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockHasMultipleReplicasOnSameDN.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.*;
 import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
@@ -122,7 +123,8 @@ public class TestBlockHasMultipleReplicasOnSameDN {
     }
 
     // Should not assert!
-    cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports);
+    cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports,
+        new BlockReportContext(1, 0, System.nanoTime()));
 
     // Get the block locations once again.
     locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 0428b81..41e8d7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -185,7 +186,7 @@ public class TestDataNodeVolumeFailure {
             new StorageBlockReport(dnStorage, blockList);
     }
     
-    cluster.getNameNodeRpc().blockReport(dnR, bpid, reports);
+    cluster.getNameNodeRpc().blockReport(dnR, bpid, reports, null);
 
     // verify number of blocks and files...
     verify(filename, filesize);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
index ac7ebc0..cab50b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeProtocolRetryPolicy.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
@@ -136,7 +137,8 @@ public class TestDatanodeProtocolRetryPolicy {
           Mockito.verify(mockNN).blockReport(
               Mockito.eq(datanodeRegistration),
               Mockito.eq(POOL_ID),
-              Mockito.<StorageBlockReport[]>anyObject());
+              Mockito.<StorageBlockReport[]>anyObject(),
+              Mockito.<BlockReportContext>anyObject());
           return true;
         } catch (Throwable t) {
           LOG.info("waiting on block report: " + t.getMessage());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
index a5e4d4e..aadd9b2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDnRespectsBlockReportSplitThreshold.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.*;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCKREPORT_SPLIT_THRESHOLD_KEY;
@@ -133,7 +134,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
     Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
         any(DatanodeRegistration.class),
         anyString(),
-        captor.capture());
+        captor.capture(),  Mockito.<BlockReportContext>anyObject());
 
     verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
   }
@@ -165,7 +166,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
     Mockito.verify(nnSpy, times(1)).blockReport(
         any(DatanodeRegistration.class),
         anyString(),
-        captor.capture());
+        captor.capture(),  Mockito.<BlockReportContext>anyObject());
 
     verifyCapturedArguments(captor, cluster.getStoragesPerDatanode(), BLOCKS_IN_FILE);
   }
@@ -197,7 +198,7 @@ public class TestDnRespectsBlockReportSplitThreshold {
     Mockito.verify(nnSpy, times(cluster.getStoragesPerDatanode())).blockReport(
         any(DatanodeRegistration.class),
         anyString(),
-        captor.capture());
+        captor.capture(), Mockito.<BlockReportContext>anyObject());
 
     verifyCapturedArguments(captor, 1, BLOCKS_IN_FILE);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
index 1b03786..b150b0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesBlockReportPerStorage.java
@@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
+import org.apache.hadoop.util.Time;
 
 
 /**
@@ -33,10 +35,13 @@ public class TestNNHandlesBlockReportPerStorage extends BlockReportTestBase {
   @Override
   protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
       StorageBlockReport[] reports) throws IOException {
+    int i = 0;
     for (StorageBlockReport report : reports) {
       LOG.info("Sending block report for storage " + report.getStorage().getStorageID());
       StorageBlockReport[] singletonReport = { report };
-      cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport);
+      cluster.getNameNodeRpc().blockReport(dnR, poolId, singletonReport,
+          new BlockReportContext(reports.length, i, System.nanoTime()));
+      i++;
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java
index 036b550..dca3c88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestNNHandlesCombinedBlockReport.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 
@@ -34,6 +35,7 @@ public class TestNNHandlesCombinedBlockReport extends BlockReportTestBase {
   protected void sendBlockReports(DatanodeRegistration dnR, String poolId,
                                   StorageBlockReport[] reports) throws IOException {
     LOG.info("Sending combined block reports for " + dnR);
-    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports);
+    cluster.getNameNodeRpc().blockReport(dnR, poolId, reports,
+        new BlockReportContext(1, 0, System.nanoTime()));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
index efb9d98..3195d7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestTriggerBlockReport.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
@@ -76,7 +77,8 @@ public final class TestTriggerBlockReport {
       Mockito.verify(spy, times(0)).blockReport(
           any(DatanodeRegistration.class),
           anyString(),
-          any(StorageBlockReport[].class));
+          any(StorageBlockReport[].class),
+          Mockito.<BlockReportContext>anyObject());
       Mockito.verify(spy, times(1)).blockReceivedAndDeleted(
           any(DatanodeRegistration.class),
           anyString(),
@@ -113,7 +115,8 @@ public final class TestTriggerBlockReport {
       Mockito.verify(spy, timeout(60000)).blockReport(
           any(DatanodeRegistration.class),
           anyString(),
-          any(StorageBlockReport[].class));
+          any(StorageBlockReport[].class),
+          Mockito.<BlockReportContext>anyObject());
     }
 
     cluster.shutdown();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
index bc3c6b5..9e24f72 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -939,7 +940,8 @@ public class NNThroughputBenchmark implements Tool {
           new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
       };
       nameNodeProto.blockReport(dnRegistration, 
-          nameNode.getNamesystem().getBlockPoolId(), reports);
+          nameNode.getNamesystem().getBlockPoolId(), reports,
+              new BlockReportContext(1, 0, System.nanoTime()));
     }
 
     /**
@@ -1184,8 +1186,9 @@ public class NNThroughputBenchmark implements Tool {
       long start = Time.now();
       StorageBlockReport[] report = { new StorageBlockReport(
           dn.storage, dn.getBlockReportList()) };
-      nameNodeProto.blockReport(dn.dnRegistration, nameNode.getNamesystem()
-          .getBlockPoolId(), report);
+      nameNodeProto.blockReport(dn.dnRegistration,
+          nameNode.getNamesystem().getBlockPoolId(), report,
+          new BlockReportContext(1, 0, System.nanoTime()));
       long end = Time.now();
       return end-start;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index ee80b33..92c329e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -107,7 +108,8 @@ public class TestDeadDatanode {
         new DatanodeStorage(reg.getDatanodeUuid()),
         BlockListAsLongs.EMPTY) };
     try {
-      dnp.blockReport(reg, poolId, report);
+      dnp.blockReport(reg, poolId, report,
+          new BlockReportContext(1, 0, System.nanoTime()));
       fail("Expected IOException is not thrown");
     } catch (IOException ex) {
       // Expected

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d360bbc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index fa7a307..74358bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.io.IOUtils;
@@ -547,7 +548,8 @@ public class TestDNFencing {
         .when(spy).blockReport(
           Mockito.<DatanodeRegistration>anyObject(),
           Mockito.anyString(),
-          Mockito.<StorageBlockReport[]>anyObject());
+          Mockito.<StorageBlockReport[]>anyObject(),
+          Mockito.<BlockReportContext>anyObject());
       dn.scheduleAllBlockReport(0);
       delayer.waitForCall();
       


[28/50] [abbrv] hadoop git commit: HDFS-6826. Plugin interface to enable delegation of HDFS authorization assertions. Contributed by Arun Suresh.

Posted by zj...@apache.org.
HDFS-6826. Plugin interface to enable delegation of HDFS authorization assertions. Contributed by Arun Suresh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ec1a4a0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ec1a4a0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ec1a4a0

Branch: refs/heads/YARN-2928
Commit: 6ec1a4a02aa66d3719944d32133c17c0f5eb3e80
Parents: f8ec988
Author: Jitendra Pandey <Ji...@Jitendra-Pandeys-MacBook-Pro-4.local>
Authored: Tue Mar 24 15:43:03 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   1 +
 .../DefaultINodeAttributesProvider.java         |  45 ++++
 .../server/namenode/FSDirStatAndListingOp.java  |  51 +++--
 .../hdfs/server/namenode/FSDirectory.java       |  41 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java      |  19 ++
 .../server/namenode/FSPermissionChecker.java    | 222 +++++++++++-------
 .../server/namenode/INodeAttributeProvider.java | 135 +++++++++++
 .../hdfs/server/namenode/INodeAttributes.java   |   3 +
 .../namenode/INodeDirectoryAttributes.java      |   4 +
 .../server/namenode/INodeFileAttributes.java    |   5 +
 .../hdfs/server/namenode/INodesInPath.java      |   6 +
 .../namenode/TestFSPermissionChecker.java       |   4 +-
 .../namenode/TestINodeAttributeProvider.java    | 229 +++++++++++++++++++
 15 files changed, 659 insertions(+), 115 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5ade5fb..4bed2ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -390,6 +390,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7838. Expose truncate API for libhdfs. (yliu)
 
+    HDFS-6826. Plugin interface to enable delegation of HDFS authorization 
+    assertions. (Arun Suresh via jitendra)
+
   IMPROVEMENTS
 
     HDFS-7752. Improve description for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9ecf242..b5bbe5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -477,6 +477,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_IPC_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
   public static final String  DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
+  public static final String  DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY = "dfs.namenode.inode.attributes.provider.class";
 
   public static final String  DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
   public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
new file mode 100644
index 0000000..45aa1b5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DefaultINodeAttributesProvider.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+/**
+ * A default implementation of the INodeAttributesProvider
+ *
+ */
+public class DefaultINodeAttributesProvider extends INodeAttributeProvider {
+
+  public static INodeAttributeProvider DEFAULT_PROVIDER =
+      new DefaultINodeAttributesProvider();
+
+  @Override
+  public void start() {
+    // NO-OP
+  }
+
+  @Override
+  public void stop() {
+    // NO-OP
+  }
+
+  @Override
+  public INodeAttributes getAttributes(String[] pathElements,
+      INodeAttributes inode) {
+    return inode;
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index cb3da19..43c2de3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -181,7 +181,7 @@ class FSDirStatAndListingOp {
 
       if (!targetNode.isDirectory()) {
         return new DirectoryListing(
-            new HdfsFileStatus[]{createFileStatus(fsd,
+            new HdfsFileStatus[]{createFileStatus(fsd, src,
                 HdfsFileStatus.EMPTY_NAME, targetNode, needLocation,
                 parentStoragePolicy, snapshot, isRawPath, iip)}, 0);
       }
@@ -200,7 +200,7 @@ class FSDirStatAndListingOp {
         byte curPolicy = isSuperUser && !cur.isSymlink()?
             cur.getLocalStoragePolicyID():
             BlockStoragePolicySuite.ID_UNSPECIFIED;
-        listing[i] = createFileStatus(fsd, cur.getLocalNameBytes(), cur,
+        listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
             needLocation, getStoragePolicyID(curPolicy,
                 parentStoragePolicy), snapshot, isRawPath, iip);
         listingCnt++;
@@ -253,7 +253,7 @@ class FSDirStatAndListingOp {
     final HdfsFileStatus listing[] = new HdfsFileStatus[numOfListing];
     for (int i = 0; i < numOfListing; i++) {
       Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
-      listing[i] = createFileStatus(fsd, sRoot.getLocalNameBytes(), sRoot,
+      listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
           BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
           false, INodesInPath.fromINode(sRoot));
     }
@@ -270,7 +270,7 @@ class FSDirStatAndListingOp {
    *         or null if file not found
    */
   static HdfsFileStatus getFileInfo(
-      FSDirectory fsd, INodesInPath src, boolean isRawPath,
+      FSDirectory fsd, String path, INodesInPath src, boolean isRawPath,
       boolean includeStoragePolicy)
       throws IOException {
     fsd.readLock();
@@ -279,7 +279,7 @@ class FSDirStatAndListingOp {
       byte policyId = includeStoragePolicy && i != null && !i.isSymlink() ?
           i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
       return i == null ? null : createFileStatus(
-          fsd, HdfsFileStatus.EMPTY_NAME, i, policyId,
+          fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
           src.getPathSnapshotId(), isRawPath, src);
     } finally {
       fsd.readUnlock();
@@ -303,7 +303,7 @@ class FSDirStatAndListingOp {
     fsd.readLock();
     try {
       final INodesInPath iip = fsd.getINodesInPath(srcs, resolveLink);
-      return getFileInfo(fsd, iip, isRawPath, includeStoragePolicy);
+      return getFileInfo(fsd, src, iip, isRawPath, includeStoragePolicy);
     } finally {
       fsd.readUnlock();
     }
@@ -340,14 +340,15 @@ class FSDirStatAndListingOp {
    * @throws java.io.IOException if any error occurs
    */
   static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INode node, boolean needLocation,
-      byte storagePolicy, int snapshot, boolean isRawPath, INodesInPath iip)
+      FSDirectory fsd, String fullPath, byte[] path, INode node,
+      boolean needLocation, byte storagePolicy, int snapshot, boolean isRawPath,
+      INodesInPath iip)
       throws IOException {
     if (needLocation) {
-      return createLocatedFileStatus(fsd, path, node, storagePolicy,
+      return createLocatedFileStatus(fsd, fullPath, path, node, storagePolicy,
           snapshot, isRawPath, iip);
     } else {
-      return createFileStatus(fsd, path, node, storagePolicy, snapshot,
+      return createFileStatus(fsd, fullPath, path, node, storagePolicy, snapshot,
           isRawPath, iip);
     }
   }
@@ -356,8 +357,9 @@ class FSDirStatAndListingOp {
    * Create FileStatus by file INode
    */
   static HdfsFileStatus createFileStatus(
-      FSDirectory fsd, byte[] path, INode node, byte storagePolicy,
-      int snapshot, boolean isRawPath, INodesInPath iip) throws IOException {
+      FSDirectory fsd, String fullPath, byte[] path, INode node,
+      byte storagePolicy, int snapshot, boolean isRawPath,
+      INodesInPath iip) throws IOException {
      long size = 0;     // length is zero for directories
      short replication = 0;
      long blocksize = 0;
@@ -380,6 +382,8 @@ class FSDirStatAndListingOp {
      int childrenNum = node.isDirectory() ?
          node.asDirectory().getChildrenNum(snapshot) : 0;
 
+     INodeAttributes nodeAttrs =
+         fsd.getAttributes(fullPath, path, node, snapshot);
      return new HdfsFileStatus(
         size,
         node.isDirectory(),
@@ -387,9 +391,9 @@ class FSDirStatAndListingOp {
         blocksize,
         node.getModificationTime(snapshot),
         node.getAccessTime(snapshot),
-        getPermissionForFileStatus(node, snapshot, isEncrypted),
-        node.getUserName(snapshot),
-        node.getGroupName(snapshot),
+        getPermissionForFileStatus(nodeAttrs, isEncrypted),
+        nodeAttrs.getUserName(),
+        nodeAttrs.getGroupName(),
         node.isSymlink() ? node.asSymlink().getSymlink() : null,
         path,
         node.getId(),
@@ -402,8 +406,9 @@ class FSDirStatAndListingOp {
    * Create FileStatus with location info by file INode
    */
   private static HdfsLocatedFileStatus createLocatedFileStatus(
-      FSDirectory fsd, byte[] path, INode node, byte storagePolicy,
-      int snapshot, boolean isRawPath, INodesInPath iip) throws IOException {
+      FSDirectory fsd, String fullPath, byte[] path, INode node,
+      byte storagePolicy, int snapshot, boolean isRawPath,
+      INodesInPath iip) throws IOException {
     assert fsd.hasReadLock();
     long size = 0; // length is zero for directories
     short replication = 0;
@@ -437,12 +442,14 @@ class FSDirStatAndListingOp {
     int childrenNum = node.isDirectory() ?
         node.asDirectory().getChildrenNum(snapshot) : 0;
 
+    INodeAttributes nodeAttrs =
+        fsd.getAttributes(fullPath, path, node, snapshot);
     HdfsLocatedFileStatus status =
         new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
           blocksize, node.getModificationTime(snapshot),
           node.getAccessTime(snapshot),
-          getPermissionForFileStatus(node, snapshot, isEncrypted),
-          node.getUserName(snapshot), node.getGroupName(snapshot),
+          getPermissionForFileStatus(nodeAttrs, isEncrypted),
+          nodeAttrs.getUserName(), nodeAttrs.getGroupName(),
           node.isSymlink() ? node.asSymlink().getSymlink() : null, path,
           node.getId(), loc, childrenNum, feInfo, storagePolicy);
     // Set caching information for the located blocks.
@@ -467,9 +474,9 @@ class FSDirStatAndListingOp {
    * and encrypted bit on if it represents an encrypted file/dir.
    */
   private static FsPermission getPermissionForFileStatus(
-      INode node, int snapshot, boolean isEncrypted) {
-    FsPermission perm = node.getFsPermission(snapshot);
-    boolean hasAcl = node.getAclFeature(snapshot) != null;
+      INodeAttributes node, boolean isEncrypted) {
+    FsPermission perm = node.getFsPermission();
+    boolean hasAcl = node.getAclFeature() != null;
     if (hasAcl || isEncrypted) {
       perm = new FsPermissionExtension(perm, hasAcl, isEncrypted);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 2f73627..7eea343 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -167,6 +167,12 @@ public class FSDirectory implements Closeable {
 
   private final FSEditLog editLog;
 
+  private INodeAttributeProvider attributeProvider;
+
+  public void setINodeAttributeProvider(INodeAttributeProvider provider) {
+    attributeProvider = provider;
+  }
+
   // utility methods to acquire and release read lock and write lock
   void readLock() {
     this.dirLock.readLock().lock();
@@ -1623,13 +1629,23 @@ public class FSDirectory implements Closeable {
   FSPermissionChecker getPermissionChecker()
     throws AccessControlException {
     try {
-      return new FSPermissionChecker(fsOwnerShortUserName, supergroup,
+      return getPermissionChecker(fsOwnerShortUserName, supergroup,
           NameNode.getRemoteUser());
-    } catch (IOException ioe) {
-      throw new AccessControlException(ioe);
+    } catch (IOException e) {
+      throw new AccessControlException(e);
     }
   }
 
+  @VisibleForTesting
+  FSPermissionChecker getPermissionChecker(String fsOwner, String superGroup,
+      UserGroupInformation ugi) throws AccessControlException {
+    return new FSPermissionChecker(
+        fsOwner, superGroup, ugi,
+        attributeProvider == null ?
+            DefaultINodeAttributesProvider.DEFAULT_PROVIDER
+            : attributeProvider);
+  }
+
   void checkOwner(FSPermissionChecker pc, INodesInPath iip)
       throws AccessControlException {
     checkPermission(pc, iip, true, null, null, null, null);
@@ -1690,7 +1706,8 @@ public class FSDirectory implements Closeable {
   HdfsFileStatus getAuditFileInfo(INodesInPath iip)
       throws IOException {
     return (namesystem.isAuditEnabled() && namesystem.isExternalInvocation())
-        ? FSDirStatAndListingOp.getFileInfo(this, iip, false, false) : null;
+        ? FSDirStatAndListingOp.getFileInfo(this, iip.getPath(), iip, false,
+            false) : null;
   }
 
   /**
@@ -1736,4 +1753,20 @@ public class FSDirectory implements Closeable {
   void resetLastInodeIdWithoutChecking(long newValue) {
     inodeId.setCurrentValue(newValue);
   }
+
+  INodeAttributes getAttributes(String fullPath, byte[] path,
+      INode node, int snapshot) {
+    INodeAttributes nodeAttrs = node;
+    if (attributeProvider != null) {
+      nodeAttrs = node.getSnapshotINode(snapshot);
+      fullPath = fullPath + (fullPath.endsWith(Path.SEPARATOR) ? ""
+                                                               : Path.SEPARATOR)
+          + DFSUtil.bytes2String(path);
+      nodeAttrs = attributeProvider.getAttributes(fullPath, nodeAttrs);
+    } else {
+      nodeAttrs = node.getSnapshotINode(snapshot);
+    }
+    return nodeAttrs;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index ad661ca..f50dc4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -378,7 +378,7 @@ public class FSEditLogLoader {
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
           HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
-              fsNamesys.dir, HdfsFileStatus.EMPTY_NAME, newFile,
+              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, newFile,
               BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
               false, iip);
           fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
@@ -397,7 +397,7 @@ public class FSEditLogLoader {
           // add the op into retry cache if necessary
           if (toAddRetryCache) {
             HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
-                fsNamesys.dir,
+                fsNamesys.dir, path,
                 HdfsFileStatus.EMPTY_NAME, newFile,
                 BlockStoragePolicySuite.ID_UNSPECIFIED,
                 Snapshot.CURRENT_STATE_ID, false, iip);
@@ -471,7 +471,7 @@ public class FSEditLogLoader {
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
           HdfsFileStatus stat = FSDirStatAndListingOp.createFileStatus(
-              fsNamesys.dir, HdfsFileStatus.EMPTY_NAME, file,
+              fsNamesys.dir, path, HdfsFileStatus.EMPTY_NAME, file,
               BlockStoragePolicySuite.ID_UNSPECIFIED,
               Snapshot.CURRENT_STATE_ID, false, iip);
           fsNamesys.addCacheEntryWithPayload(appendOp.rpcClientId,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 34b5e95..9235425 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -62,6 +62,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CAC
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
@@ -277,6 +278,7 @@ import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.log4j.Appender;
@@ -536,6 +538,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private final TopConf topConf;
   private TopMetrics topMetrics;
 
+  private INodeAttributeProvider inodeAttributeProvider;
+
   /**
    * Notify that loading of this FSDirectory is complete, and
    * it is imageLoaded for use
@@ -841,6 +845,13 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       this.isDefaultAuditLogger = auditLoggers.size() == 1 &&
         auditLoggers.get(0) instanceof DefaultAuditLogger;
       this.retryCache = ignoreRetryCache ? null : initRetryCache(conf);
+      Class<? extends INodeAttributeProvider> klass = conf.getClass(
+          DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
+          null, INodeAttributeProvider.class);
+      if (klass != null) {
+        inodeAttributeProvider = ReflectionUtils.newInstance(klass, conf);
+        LOG.info("Using INode attribute provider: " + klass.getName());
+      }
     } catch(IOException e) {
       LOG.error(getClass().getSimpleName() + " initialization failed.", e);
       close();
@@ -1067,6 +1078,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     
     registerMXBean();
     DefaultMetricsSystem.instance().register(this);
+    if (inodeAttributeProvider != null) {
+      inodeAttributeProvider.start();
+      dir.setINodeAttributeProvider(inodeAttributeProvider);
+    }
     snapshotManager.registerMXBean();
   }
   
@@ -1075,6 +1090,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
   void stopCommonServices() {
     writeLock();
+    if (inodeAttributeProvider != null) {
+      dir.setINodeAttributeProvider(null);
+      inodeAttributeProvider.stop();
+    }
     try {
       if (blockManager != null) blockManager.close();
     } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index 0508484..e6570f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.List;
 import java.util.Set;
 import java.util.Stack;
 
@@ -30,6 +29,8 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -41,25 +42,25 @@ import org.apache.hadoop.security.UserGroupInformation;
  * 
  * Some of the helper methods are gaurded by {@link FSNamesystem#readLock()}.
  */
-class FSPermissionChecker {
+class FSPermissionChecker implements AccessControlEnforcer {
   static final Log LOG = LogFactory.getLog(UserGroupInformation.class);
 
   /** @return a string for throwing {@link AccessControlException} */
-  private String toAccessControlString(INode inode, int snapshotId,
+  private String toAccessControlString(INodeAttributes inodeAttrib, String path,
       FsAction access, FsPermission mode) {
-    return toAccessControlString(inode, snapshotId, access, mode, false);
+    return toAccessControlString(inodeAttrib, path, access, mode, false);
   }
 
   /** @return a string for throwing {@link AccessControlException} */
-  private String toAccessControlString(INode inode, int snapshotId, FsAction access,
-      FsPermission mode, boolean deniedFromAcl) {
+  private String toAccessControlString(INodeAttributes inodeAttrib,
+      String path, FsAction access, FsPermission mode, boolean deniedFromAcl) {
     StringBuilder sb = new StringBuilder("Permission denied: ")
-      .append("user=").append(user).append(", ")
+      .append("user=").append(getUser()).append(", ")
       .append("access=").append(access).append(", ")
-      .append("inode=\"").append(inode.getFullPathName()).append("\":")
-      .append(inode.getUserName(snapshotId)).append(':')
-      .append(inode.getGroupName(snapshotId)).append(':')
-      .append(inode.isDirectory() ? 'd' : '-')
+      .append("inode=\"").append(path).append("\":")
+      .append(inodeAttrib.getUserName()).append(':')
+      .append(inodeAttrib.getGroupName()).append(':')
+      .append(inodeAttrib.isDirectory() ? 'd' : '-')
       .append(mode);
     if (deniedFromAcl) {
       sb.append("+");
@@ -67,42 +68,59 @@ class FSPermissionChecker {
     return sb.toString();
   }
 
+  private final String fsOwner;
+  private final String supergroup;
+  private final UserGroupInformation callerUgi;
+
   private final String user;
-  /** A set with group namess. Not synchronized since it is unmodifiable */
   private final Set<String> groups;
   private final boolean isSuper;
+  private final INodeAttributeProvider attributeProvider;
+
 
   FSPermissionChecker(String fsOwner, String supergroup,
-      UserGroupInformation callerUgi) {
-    HashSet<String> s = new HashSet<String>(Arrays.asList(callerUgi.getGroupNames()));
+      UserGroupInformation callerUgi,
+      INodeAttributeProvider attributeProvider) {
+    this.fsOwner = fsOwner;
+    this.supergroup = supergroup;
+    this.callerUgi = callerUgi;
+    HashSet<String> s =
+        new HashSet<String>(Arrays.asList(callerUgi.getGroupNames()));
     groups = Collections.unmodifiableSet(s);
     user = callerUgi.getShortUserName();
     isSuper = user.equals(fsOwner) || groups.contains(supergroup);
+    this.attributeProvider = attributeProvider;
   }
 
-  /**
-   * Check if the callers group contains the required values.
-   * @param group group to check
-   */
-  public boolean containsGroup(String group) {return groups.contains(group);}
+  public boolean containsGroup(String group) {
+    return groups.contains(group);
+  }
 
   public String getUser() {
     return user;
   }
-  
+
+  public Set<String> getGroups() {
+    return groups;
+  }
+
   public boolean isSuperUser() {
     return isSuper;
   }
-  
+
+  public INodeAttributeProvider getAttributesProvider() {
+    return attributeProvider;
+  }
+
   /**
    * Verify if the caller has the required permission. This will result into 
    * an exception if the caller is not allowed to access the resource.
    */
   public void checkSuperuserPrivilege()
       throws AccessControlException {
-    if (!isSuper) {
+    if (!isSuperUser()) {
       throw new AccessControlException("Access denied for user " 
-          + user + ". Superuser privilege is required");
+          + getUser() + ". Superuser privilege is required");
     }
   }
   
@@ -154,64 +172,98 @@ class FSPermissionChecker {
     // check if (parentAccess != null) && file exists, then check sb
     // If resolveLink, the check is performed on the link target.
     final int snapshotId = inodesInPath.getPathSnapshotId();
-    final int length = inodesInPath.length();
-    final INode last = length > 0 ? inodesInPath.getLastINode() : null;
-    final INode parent = length > 1 ? inodesInPath.getINode(-2) : null;
+    final INode[] inodes = inodesInPath.getINodesArray();
+    final INodeAttributes[] inodeAttrs = new INodeAttributes[inodes.length];
+    final byte[][] pathByNameArr = new byte[inodes.length][];
+    for (int i = 0; i < inodes.length && inodes[i] != null; i++) {
+      if (inodes[i] != null) {
+        pathByNameArr[i] = inodes[i].getLocalNameBytes();
+        inodeAttrs[i] = getINodeAttrs(pathByNameArr, i, inodes[i], snapshotId);
+      }
+    }
+
+    String path = inodesInPath.getPath();
+    int ancestorIndex = inodes.length - 2;
+
+    AccessControlEnforcer enforcer =
+        getAttributesProvider().getExternalAccessControlEnforcer(this);
+    enforcer.checkPermission(fsOwner, supergroup, callerUgi, inodeAttrs, inodes,
+        pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner,
+        ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir);
+  }
 
-    checkTraverse(inodesInPath, snapshotId);
+  @Override
+  public void checkPermission(String fsOwner, String supergroup,
+      UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
+      INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
+      int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
+      FsAction parentAccess, FsAction access, FsAction subAccess,
+      boolean ignoreEmptyDir)
+      throws AccessControlException {
+    for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null;
+        ancestorIndex--);
+    checkTraverse(inodeAttrs, path, ancestorIndex);
 
+    final INodeAttributes last = inodeAttrs[inodeAttrs.length - 1];
     if (parentAccess != null && parentAccess.implies(FsAction.WRITE)
-        && length > 1 && last != null) {
-      checkStickyBit(parent, last, snapshotId);
+        && inodeAttrs.length > 1 && last != null) {
+      checkStickyBit(inodeAttrs[inodeAttrs.length - 2], last);
     }
-    if (ancestorAccess != null && length > 1) {
-      List<INode> inodes = inodesInPath.getReadOnlyINodes();
-      INode ancestor = null;
-      for (int i = inodes.size() - 2; i >= 0 && (ancestor = inodes.get(i)) ==
-          null; i--);
-      check(ancestor, snapshotId, ancestorAccess);
+    if (ancestorAccess != null && inodeAttrs.length > 1) {
+      check(inodeAttrs, path, ancestorIndex, ancestorAccess);
     }
-    if (parentAccess != null && length > 1 && parent != null) {
-      check(parent, snapshotId, parentAccess);
+    if (parentAccess != null && inodeAttrs.length > 1) {
+      check(inodeAttrs, path, inodeAttrs.length - 2, parentAccess);
     }
     if (access != null) {
-      check(last, snapshotId, access);
+      check(last, path, access);
     }
     if (subAccess != null) {
-      checkSubAccess(last, snapshotId, subAccess, ignoreEmptyDir);
+      INode rawLast = inodes[inodeAttrs.length - 1];
+      checkSubAccess(pathByNameArr, inodeAttrs.length - 1, rawLast,
+          snapshotId, subAccess, ignoreEmptyDir);
     }
     if (doCheckOwner) {
-      checkOwner(last, snapshotId);
+      checkOwner(last);
     }
   }
 
+  private INodeAttributes getINodeAttrs(byte[][] pathByNameArr, int pathIdx,
+      INode inode, int snapshotId) {
+    INodeAttributes inodeAttrs = inode.getSnapshotINode(snapshotId);
+    if (getAttributesProvider() != null) {
+      String[] elements = new String[pathIdx + 1];
+      for (int i = 0; i < elements.length; i++) {
+        elements[i] = DFSUtil.bytes2String(pathByNameArr[i]);
+      }
+      inodeAttrs = getAttributesProvider().getAttributes(elements, inodeAttrs);
+    }
+    return inodeAttrs;
+  }
+
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkOwner(INode inode, int snapshotId
+  private void checkOwner(INodeAttributes inode
       ) throws AccessControlException {
-    if (inode != null && user.equals(inode.getUserName(snapshotId))) {
+    if (getUser().equals(inode.getUserName())) {
       return;
     }
     throw new AccessControlException(
             "Permission denied. user="
-            + user + " is not the owner of inode=" + inode);
+            + getUser() + " is not the owner of inode=" + inode);
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkTraverse(INodesInPath iip, int snapshotId)
-      throws AccessControlException {
-    List<INode> inodes = iip.getReadOnlyINodes();
-    for (int i = 0; i < inodes.size() - 1; i++) {
-      INode inode = inodes.get(i);
-      if (inode == null) {
-        break;
-      }
-      check(inode, snapshotId, FsAction.EXECUTE);
+  private void checkTraverse(INodeAttributes[] inodes, String path, int last
+      ) throws AccessControlException {
+    for(int j = 0; j <= last; j++) {
+      check(inodes[j], path, FsAction.EXECUTE);
     }
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkSubAccess(INode inode, int snapshotId, FsAction access,
-      boolean ignoreEmptyDir) throws AccessControlException {
+  private void checkSubAccess(byte[][] pathByNameArr, int pathIdx, INode inode,
+      int snapshotId, FsAction access, boolean ignoreEmptyDir)
+      throws AccessControlException {
     if (inode == null || !inode.isDirectory()) {
       return;
     }
@@ -221,7 +273,9 @@ class FSPermissionChecker {
       INodeDirectory d = directories.pop();
       ReadOnlyList<INode> cList = d.getChildrenList(snapshotId);
       if (!(cList.isEmpty() && ignoreEmptyDir)) {
-        check(d, snapshotId, access);
+        //TODO have to figure this out with inodeattribute provider
+        check(getINodeAttrs(pathByNameArr, pathIdx, d, snapshotId),
+            inode.getFullPathName(), access);
       }
 
       for(INode child : cList) {
@@ -233,37 +287,37 @@ class FSPermissionChecker {
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void check(INode inode, int snapshotId, FsAction access)
-      throws AccessControlException {
+  private void check(INodeAttributes[] inodes, String path, int i, FsAction access
+      ) throws AccessControlException {
+    check(i >= 0 ? inodes[i] : null, path, access);
+  }
+
+  private void check(INodeAttributes inode, String path, FsAction access
+      ) throws AccessControlException {
     if (inode == null) {
       return;
     }
-    FsPermission mode = inode.getFsPermission(snapshotId);
-    AclFeature aclFeature = inode.getAclFeature(snapshotId);
+    final FsPermission mode = inode.getFsPermission();
+    final AclFeature aclFeature = inode.getAclFeature();
     if (aclFeature != null) {
       // It's possible that the inode has a default ACL but no access ACL.
       int firstEntry = aclFeature.getEntryAt(0);
       if (AclEntryStatusFormat.getScope(firstEntry) == AclEntryScope.ACCESS) {
-        checkAccessAcl(inode, snapshotId, access, mode, aclFeature);
+        checkAccessAcl(inode, path, access, mode, aclFeature);
         return;
       }
     }
-    checkFsPermission(inode, snapshotId, access, mode);
-  }
-
-  private void checkFsPermission(INode inode, int snapshotId, FsAction access,
-      FsPermission mode) throws AccessControlException {
-    if (user.equals(inode.getUserName(snapshotId))) { //user class
+    if (getUser().equals(inode.getUserName())) { //user class
       if (mode.getUserAction().implies(access)) { return; }
     }
-    else if (groups.contains(inode.getGroupName(snapshotId))) { //group class
+    else if (getGroups().contains(inode.getGroupName())) { //group class
       if (mode.getGroupAction().implies(access)) { return; }
     }
     else { //other class
       if (mode.getOtherAction().implies(access)) { return; }
     }
     throw new AccessControlException(
-      toAccessControlString(inode, snapshotId, access, mode));
+        toAccessControlString(inode, path, access, mode));
   }
 
   /**
@@ -282,20 +336,20 @@ class FSPermissionChecker {
    * - The other entry must not have a name.
    * - Default entries may be present, but they are ignored during enforcement.
    *
-   * @param inode INode accessed inode
+   * @param inode INodeAttributes accessed inode
    * @param snapshotId int snapshot ID
    * @param access FsAction requested permission
    * @param mode FsPermission mode from inode
    * @param aclFeature AclFeature of inode
    * @throws AccessControlException if the ACL denies permission
    */
-  private void checkAccessAcl(INode inode, int snapshotId, FsAction access,
-      FsPermission mode, AclFeature aclFeature)
+  private void checkAccessAcl(INodeAttributes inode, String path,
+      FsAction access, FsPermission mode, AclFeature aclFeature)
       throws AccessControlException {
     boolean foundMatch = false;
 
     // Use owner entry from permission bits if user is owner.
-    if (user.equals(inode.getUserName(snapshotId))) {
+    if (getUser().equals(inode.getUserName())) {
       if (mode.getUserAction().implies(access)) {
         return;
       }
@@ -314,7 +368,7 @@ class FSPermissionChecker {
         if (type == AclEntryType.USER) {
           // Use named user entry with mask from permission bits applied if user
           // matches name.
-          if (user.equals(name)) {
+          if (getUser().equals(name)) {
             FsAction masked = AclEntryStatusFormat.getPermission(entry).and(
                 mode.getGroupAction());
             if (masked.implies(access)) {
@@ -328,8 +382,8 @@ class FSPermissionChecker {
           // applied if user is a member and entry grants access.  If user is a
           // member of multiple groups that have entries that grant access, then
           // it doesn't matter which is chosen, so exit early after first match.
-          String group = name == null ? inode.getGroupName(snapshotId) : name;
-          if (groups.contains(group)) {
+          String group = name == null ? inode.getGroupName() : name;
+          if (getGroups().contains(group)) {
             FsAction masked = AclEntryStatusFormat.getPermission(entry).and(
                 mode.getGroupAction());
             if (masked.implies(access)) {
@@ -347,28 +401,28 @@ class FSPermissionChecker {
     }
 
     throw new AccessControlException(
-      toAccessControlString(inode, snapshotId, access, mode, true));
+        toAccessControlString(inode, path, access, mode));
   }
 
   /** Guarded by {@link FSNamesystem#readLock()} */
-  private void checkStickyBit(INode parent, INode inode, int snapshotId
+  private void checkStickyBit(INodeAttributes parent, INodeAttributes inode
       ) throws AccessControlException {
-    if(!parent.getFsPermission(snapshotId).getStickyBit()) {
+    if (!parent.getFsPermission().getStickyBit()) {
       return;
     }
 
     // If this user is the directory owner, return
-    if(parent.getUserName(snapshotId).equals(user)) {
+    if (parent.getUserName().equals(getUser())) {
       return;
     }
 
     // if this user is the file owner, return
-    if(inode.getUserName(snapshotId).equals(user)) {
+    if (inode.getUserName().equals(getUser())) {
       return;
     }
 
     throw new AccessControlException("Permission denied by sticky bit setting:" +
-      " user=" + user + ", inode=" + inode);
+      " user=" + getUser() + ", inode=" + inode);
   }
 
   /**
@@ -384,11 +438,11 @@ class FSPermissionChecker {
     if (isSuperUser()) {
       return;
     }
-    if (user.equals(pool.getOwnerName())
+    if (getUser().equals(pool.getOwnerName())
         && mode.getUserAction().implies(access)) {
       return;
     }
-    if (groups.contains(pool.getGroupName())
+    if (getGroups().contains(pool.getGroupName())
         && mode.getGroupAction().implies(access)) {
       return;
     }
@@ -396,7 +450,7 @@ class FSPermissionChecker {
       return;
     }
     throw new AccessControlException("Permission denied while accessing pool "
-        + pool.getPoolName() + ": user " + user + " does not have "
+        + pool.getPoolName() + ": user " + getUser() + " does not have "
         + access.toString() + " permissions.");
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
new file mode 100644
index 0000000..b12e147
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributeProvider.java
@@ -0,0 +1,135 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Set;
+
+import com.google.common.annotations.VisibleForTesting;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public abstract class INodeAttributeProvider {
+
+  /**
+   * The AccessControlEnforcer allows implementations to override the
+   * default File System permission checking logic enforced on a file system
+   * object
+   */
+  public interface AccessControlEnforcer {
+
+    /**
+     * Checks permission on a file system object. Has to throw an Exception
+     * if the filesystem object is not accessessible by the calling Ugi.
+     * @param fsOwner Filesystem owner (The Namenode user)
+     * @param supergroup super user geoup
+     * @param callerUgi UserGroupInformation of the caller
+     * @param inodeAttrs Array of INode attributes for each path element in the
+     *                   the path
+     * @param inodes Array of INodes for each path element in the path
+     * @param pathByNameArr Array of byte arrays of the LocalName
+     * @param snapshotId the snapshotId of the requested path
+     * @param path Path String
+     * @param ancestorIndex Index of ancestor
+     * @param doCheckOwner perform ownership check
+     * @param ancestorAccess The access required by the ancestor of the path.
+     * @param parentAccess The access required by the parent of the path.
+     * @param access The access required by the path.
+     * @param subAccess If path is a directory, It is the access required of
+     *                  the path and all the sub-directories. If path is not a
+     *                  directory, there should ideally be no effect.
+     * @param ignoreEmptyDir Ignore permission checking for empty directory?
+     * @throws AccessControlException
+     */
+    public abstract void checkPermission(String fsOwner, String supergroup,
+        UserGroupInformation callerUgi, INodeAttributes[] inodeAttrs,
+        INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
+        int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
+        FsAction parentAccess, FsAction access, FsAction subAccess,
+        boolean ignoreEmptyDir)
+            throws AccessControlException;
+
+  }
+  /**
+   * Initialize the provider. This method is called at NameNode startup
+   * time.
+   */
+  public abstract void start();
+
+  /**
+   * Shutdown the provider. This method is called at NameNode shutdown time.
+   */
+  public abstract void stop();
+
+  @VisibleForTesting
+  String[] getPathElements(String path) {
+    path = path.trim();
+    if (path.charAt(0) != Path.SEPARATOR_CHAR) {
+      throw new IllegalArgumentException("It must be an absolute path: " +
+          path);
+    }
+    int numOfElements = StringUtils.countMatches(path, Path.SEPARATOR);
+    if (path.length() > 1 && path.endsWith(Path.SEPARATOR)) {
+      numOfElements--;
+    }
+    String[] pathElements = new String[numOfElements];
+    int elementIdx = 0;
+    int idx = 0;
+    int found = path.indexOf(Path.SEPARATOR_CHAR, idx);
+    while (found > -1) {
+      if (found > idx) {
+        pathElements[elementIdx++] = path.substring(idx, found);
+      }
+      idx = found + 1;
+      found = path.indexOf(Path.SEPARATOR_CHAR, idx);
+    }
+    if (idx < path.length()) {
+      pathElements[elementIdx] = path.substring(idx);
+    }
+    return pathElements;
+  }
+
+  public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) {
+    return getAttributes(getPathElements(fullPath), inode);
+  }
+
+  public abstract INodeAttributes getAttributes(String[] pathElements,
+      INodeAttributes inode);
+
+  /**
+   * Can be over-ridden by implementations to provide a custom Access Control
+   * Enforcer that can provide an alternate implementation of the
+   * default permission checking logic.
+   * @param defaultEnforcer The Default AccessControlEnforcer
+   * @return The AccessControlEnforcer to use
+   */
+  public AccessControlEnforcer getExternalAccessControlEnforcer(
+      AccessControlEnforcer defaultEnforcer) {
+    return defaultEnforcer;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
index 0f76b68..7b780c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeAttributes.java
@@ -28,6 +28,9 @@ import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
  */
 @InterfaceAudience.Private
 public interface INodeAttributes {
+
+  public boolean isDirectory();
+
   /**
    * @return null if the local name is null;
    *         otherwise, return the local name byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
index 956deae..240aa15 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java
@@ -52,6 +52,10 @@ public interface INodeDirectoryAttributes extends INodeAttributes {
           storageSpace(-1).typeSpaces(-1).build();
     }
 
+    public boolean isDirectory() {
+      return true;
+    }
+
     @Override
     public boolean metadataEquals(INodeDirectoryAttributes other) {
       return other != null

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
index 0f85bab..204c8ac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
@@ -60,6 +60,11 @@ public interface INodeFileAttributes extends INodeAttributes {
     }
 
     @Override
+    public boolean isDirectory() {
+      return false;
+    }
+
+    @Override
     public short getFileReplication() {
       return HeaderFormat.getReplication(header);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
index 389b62b..f1892c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java
@@ -376,6 +376,12 @@ public class INodesInPath {
     return Collections.unmodifiableList(Arrays.asList(inodes));
   }
 
+  public INode[] getINodesArray() {
+    INode[] retArr = new INode[inodes.length];
+    System.arraycopy(inodes, 0, retArr, 0, inodes.length);
+    return retArr;
+  }
+
   /**
    * @param length number of ancestral INodes in the returned INodesInPath
    *               instance

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
index 883029a..0154a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
@@ -403,7 +403,7 @@ public class TestFSPermissionChecker {
   private void assertPermissionGranted(UserGroupInformation user, String path,
       FsAction access) throws IOException {
     INodesInPath iip = dir.getINodesInPath(path, true);
-    new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
+    dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
       false, null, null, access, null, false);
   }
 
@@ -411,7 +411,7 @@ public class TestFSPermissionChecker {
       FsAction access) throws IOException {
     try {
       INodesInPath iip = dir.getINodesInPath(path, true);
-      new FSPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
+      dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
         false, null, null, access, null, false);
       fail("expected AccessControlException for user + " + user + ", path = " +
         path + ", access = " + access);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ec1a4a0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
new file mode 100644
index 0000000..111c67c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeAttributeProvider.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
+import java.util.HashSet;
+import java.util.Set;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider.AccessControlEnforcer;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+
+public class TestINodeAttributeProvider {
+  private MiniDFSCluster miniDFS;
+  private static final Set<String> CALLED = new HashSet<String>();
+
+  public static class MyAuthorizationProvider extends INodeAttributeProvider {
+
+    public static class MyAccessControlEnforcer implements AccessControlEnforcer {
+
+      @Override
+      public void checkPermission(String fsOwner, String supergroup,
+          UserGroupInformation ugi, INodeAttributes[] inodeAttrs,
+          INode[] inodes, byte[][] pathByNameArr, int snapshotId, String path,
+          int ancestorIndex, boolean doCheckOwner, FsAction ancestorAccess,
+          FsAction parentAccess, FsAction access, FsAction subAccess,
+          boolean ignoreEmptyDir) throws AccessControlException {
+        CALLED.add("checkPermission|" + ancestorAccess + "|" + parentAccess + "|" + access);
+      }
+    }
+
+    @Override
+    public void start() {
+      CALLED.add("start");
+    }
+
+    @Override
+    public void stop() {
+      CALLED.add("stop");
+    }
+
+    @Override
+    public INodeAttributes getAttributes(String[] pathElements,
+        final INodeAttributes inode) {
+      CALLED.add("getAttributes");
+      final boolean useDefault = useDefault(pathElements);
+      return new INodeAttributes() {
+        @Override
+        public boolean isDirectory() {
+          return inode.isDirectory();
+        }
+
+        @Override
+        public byte[] getLocalNameBytes() {
+          return inode.getLocalNameBytes();
+        }
+
+        @Override
+        public String getUserName() {
+          return (useDefault) ? inode.getUserName() : "foo";
+        }
+
+        @Override
+        public String getGroupName() {
+          return (useDefault) ? inode.getGroupName() : "bar";
+        }
+
+        @Override
+        public FsPermission getFsPermission() {
+          return (useDefault) ? inode.getFsPermission()
+                              : new FsPermission(getFsPermissionShort());
+        }
+
+        @Override
+        public short getFsPermissionShort() {
+          return (useDefault) ? inode.getFsPermissionShort()
+                              : (short) getPermissionLong();
+        }
+
+        @Override
+        public long getPermissionLong() {
+          return (useDefault) ? inode.getPermissionLong() : 0770;
+        }
+
+        @Override
+        public AclFeature getAclFeature() {
+          AclFeature f;
+          if (useDefault) {
+            f = inode.getAclFeature();
+          } else {
+            AclEntry acl = new AclEntry.Builder().setType(AclEntryType.GROUP).
+                setPermission(FsAction.ALL).setName("xxx").build();
+            f = new AclFeature(AclEntryStatusFormat.toInt(
+                Lists.newArrayList(acl)));
+          }
+          return f;
+        }
+
+        @Override
+        public XAttrFeature getXAttrFeature() {
+          return (useDefault) ? inode.getXAttrFeature() : null;
+        }
+
+        @Override
+        public long getModificationTime() {
+          return (useDefault) ? inode.getModificationTime() : 0;
+        }
+
+        @Override
+        public long getAccessTime() {
+          return (useDefault) ? inode.getAccessTime() : 0;
+        }
+      };
+
+    }
+
+    @Override
+    public AccessControlEnforcer getExternalAccessControlEnforcer(
+        AccessControlEnforcer deafultEnforcer) {
+      return new MyAccessControlEnforcer();
+    }
+
+    private boolean useDefault(String[] pathElements) {
+      return (pathElements.length < 2) ||
+          !(pathElements[0].equals("user") && pathElements[1].equals("authz"));
+    }
+
+  }
+
+  @Before
+  public void setUp() throws IOException {
+    CALLED.clear();
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
+        MyAuthorizationProvider.class.getName());
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+    miniDFS = new MiniDFSCluster.Builder(conf).build();
+  }
+
+  @After
+  public void cleanUp() throws IOException {
+    CALLED.clear();
+    if (miniDFS != null) {
+      miniDFS.shutdown();
+    }
+    Assert.assertTrue(CALLED.contains("stop"));
+  }
+
+  @Test
+  public void testDelegationToProvider() throws Exception {
+    Assert.assertTrue(CALLED.contains("start"));
+    FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+    fs.mkdirs(new Path("/tmp"));
+    fs.setPermission(new Path("/tmp"), new FsPermission((short) 0777));
+    UserGroupInformation ugi = UserGroupInformation.createUserForTesting("u1",
+        new String[]{"g1"});
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+        CALLED.clear();
+        fs.mkdirs(new Path("/tmp/foo"));
+        Assert.assertTrue(CALLED.contains("getAttributes"));
+        Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
+        Assert.assertTrue(CALLED.contains("checkPermission|WRITE|null|null"));
+        CALLED.clear();
+        fs.listStatus(new Path("/tmp/foo"));
+        Assert.assertTrue(CALLED.contains("getAttributes"));
+        Assert.assertTrue(
+            CALLED.contains("checkPermission|null|null|READ_EXECUTE"));
+        CALLED.clear();
+        fs.getAclStatus(new Path("/tmp/foo"));
+        Assert.assertTrue(CALLED.contains("getAttributes"));
+        Assert.assertTrue(CALLED.contains("checkPermission|null|null|null"));
+        return null;
+      }
+    });
+  }
+
+  @Test
+  public void testCustomProvider() throws Exception {
+    FileSystem fs = FileSystem.get(miniDFS.getConfiguration(0));
+    fs.mkdirs(new Path("/user/xxx"));
+    FileStatus status = fs.getFileStatus(new Path("/user/xxx"));
+    Assert.assertEquals(System.getProperty("user.name"), status.getOwner());
+    Assert.assertEquals("supergroup", status.getGroup());
+    Assert.assertEquals(new FsPermission((short)0755), status.getPermission());
+    fs.mkdirs(new Path("/user/authz"));
+    status = fs.getFileStatus(new Path("/user/authz"));
+    Assert.assertEquals("foo", status.getOwner());
+    Assert.assertEquals("bar", status.getGroup());
+    Assert.assertEquals(new FsPermission((short) 0770), status.getPermission());
+  }
+
+}


[26/50] [abbrv] hadoop git commit: HDFS-7854. Separate class DataStreamer out of DFSOutputStream. Contributed by Li Bo.

Posted by zj...@apache.org.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
new file mode 100644
index 0000000..6047825
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -0,0 +1,1754 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status.SUCCESS;
+
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InterruptedIOException;
+import java.io.OutputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.nio.channels.ClosedChannelException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
+import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
+import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.hdfs.util.ByteArrayManager;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.Time;
+import org.apache.htrace.NullScope;
+import org.apache.htrace.Sampler;
+import org.apache.htrace.Span;
+import org.apache.htrace.Trace;
+import org.apache.htrace.TraceInfo;
+import org.apache.htrace.TraceScope;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.CacheLoader;
+import com.google.common.cache.LoadingCache;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+
+/*********************************************************************
+ *
+ * The DataStreamer class is responsible for sending data packets to the
+ * datanodes in the pipeline. It retrieves a new blockid and block locations
+ * from the namenode, and starts streaming packets to the pipeline of
+ * Datanodes. Every packet has a sequence number associated with
+ * it. When all the packets for a block are sent out and acks for each
+ * if them are received, the DataStreamer closes the current block.
+ *
+ * The DataStreamer thread picks up packets from the dataQueue, sends it to
+ * the first datanode in the pipeline and moves it from the dataQueue to the
+ * ackQueue. The ResponseProcessor receives acks from the datanodes. When an
+ * successful ack for a packet is received from all datanodes, the
+ * ResponseProcessor removes the corresponding packet from the ackQueue.
+ *
+ * In case of error, all outstanding packets are moved from ackQueue. A new
+ * pipeline is setup by eliminating the bad datanode from the original
+ * pipeline. The DataStreamer now starts sending packets from the dataQueue.
+ *
+ *********************************************************************/
+
+class DataStreamer extends Daemon {
+  /**
+   * Create a socket for a write pipeline
+   *
+   * @param first the first datanode
+   * @param length the pipeline length
+   * @param client client
+   * @return the socket connected to the first datanode
+   */
+  static Socket createSocketForPipeline(final DatanodeInfo first,
+      final int length, final DFSClient client) throws IOException {
+    final String dnAddr = first.getXferAddr(
+        client.getConf().connectToDnViaHostname);
+    if (DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
+    }
+    final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
+    final Socket sock = client.socketFactory.createSocket();
+    final int timeout = client.getDatanodeReadTimeout(length);
+    NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), client.getConf().socketTimeout);
+    sock.setSoTimeout(timeout);
+    sock.setSendBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
+    if(DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
+    }
+    return sock;
+  }
+
+  /**
+   * if this file is lazy persist
+   *
+   * @param stat the HdfsFileStatus of a file
+   * @return if this file is lazy persist
+   */
+  static boolean isLazyPersist(HdfsFileStatus stat) {
+    final BlockStoragePolicy p = blockStoragePolicySuite.getPolicy(
+        HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
+    return p != null && stat.getStoragePolicy() == p.getId();
+  }
+
+  /**
+   * release a list of packets to ByteArrayManager
+   *
+   * @param packets packets to be release
+   * @param bam ByteArrayManager
+   */
+  private static void releaseBuffer(List<DFSPacket> packets, ByteArrayManager bam) {
+    for(DFSPacket p : packets) {
+      p.releaseBuffer(bam);
+    }
+    packets.clear();
+  }
+
+  private volatile boolean streamerClosed = false;
+  private ExtendedBlock block; // its length is number of bytes acked
+  private Token<BlockTokenIdentifier> accessToken;
+  private DataOutputStream blockStream;
+  private DataInputStream blockReplyStream;
+  private ResponseProcessor response = null;
+  private volatile DatanodeInfo[] nodes = null; // list of targets for current block
+  private volatile StorageType[] storageTypes = null;
+  private volatile String[] storageIDs = null;
+  private String[] favoredNodes;
+  volatile boolean hasError = false;
+  volatile int errorIndex = -1;
+  // Restarting node index
+  AtomicInteger restartingNodeIndex = new AtomicInteger(-1);
+  private long restartDeadline = 0; // Deadline of DN restart
+  private BlockConstructionStage stage;  // block construction stage
+  private long bytesSent = 0; // number of bytes that've been sent
+  private final boolean isLazyPersistFile;
+
+  /** Nodes have been used in the pipeline before and have failed. */
+  private final List<DatanodeInfo> failed = new ArrayList<>();
+  /** The last ack sequence number before pipeline failure. */
+  private long lastAckedSeqnoBeforeFailure = -1;
+  private int pipelineRecoveryCount = 0;
+  /** Has the current block been hflushed? */
+  private boolean isHflushed = false;
+  /** Append on an existing block? */
+  private boolean isAppend;
+
+  private long currentSeqno = 0;
+  private long lastQueuedSeqno = -1;
+  private long lastAckedSeqno = -1;
+  private long bytesCurBlock = 0; // bytes written in current block
+  private final AtomicReference<IOException> lastException = new AtomicReference<>();
+  private Socket s;
+
+  private final DFSClient dfsClient;
+  private final String src;
+  /** Only for DataTransferProtocol.writeBlock(..) */
+  private final DataChecksum checksum4WriteBlock;
+  private final Progressable progress;
+  private final HdfsFileStatus stat;
+  // appending to existing partial block
+  private volatile boolean appendChunk = false;
+  // both dataQueue and ackQueue are protected by dataQueue lock
+  private final LinkedList<DFSPacket> dataQueue = new LinkedList<>();
+  private final LinkedList<DFSPacket> ackQueue = new LinkedList<>();
+  private final AtomicReference<CachingStrategy> cachingStrategy;
+  private final ByteArrayManager byteArrayManager;
+  private static final BlockStoragePolicySuite blockStoragePolicySuite =
+      BlockStoragePolicySuite.createDefaultSuite();
+  //persist blocks on namenode
+  private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
+  private boolean failPacket = false;
+  private final long dfsclientSlowLogThresholdMs;
+  private long artificialSlowdown = 0;
+
+  private final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes;
+
+  private DataStreamer(HdfsFileStatus stat, DFSClient dfsClient, String src,
+                       Progressable progress, DataChecksum checksum,
+                       AtomicReference<CachingStrategy> cachingStrategy,
+                       ByteArrayManager byteArrayManage){
+    this.dfsClient = dfsClient;
+    this.src = src;
+    this.progress = progress;
+    this.stat = stat;
+    this.checksum4WriteBlock = checksum;
+    this.cachingStrategy = cachingStrategy;
+    this.byteArrayManager = byteArrayManage;
+    isLazyPersistFile = isLazyPersist(stat);
+    this.dfsclientSlowLogThresholdMs =
+        dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
+    excludedNodes = initExcludedNodes();
+  }
+
+  /**
+   * construction with tracing info
+   */
+  DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
+               String src, Progressable progress, DataChecksum checksum,
+               AtomicReference<CachingStrategy> cachingStrategy,
+               ByteArrayManager byteArrayManage) {
+    this(stat, dfsClient, src, progress, checksum, cachingStrategy,
+        byteArrayManage);
+    isAppend = false;
+    this.block = block;
+    stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
+  }
+
+  /**
+   * Construct a data streamer for appending to the last partial block
+   * @param lastBlock last block of the file to be appended
+   * @param stat status of the file to be appended
+   * @throws IOException if error occurs
+   */
+  DataStreamer(LocatedBlock lastBlock, HdfsFileStatus stat, DFSClient dfsClient,
+               String src, Progressable progress, DataChecksum checksum,
+               AtomicReference<CachingStrategy> cachingStrategy,
+               ByteArrayManager byteArrayManage) throws IOException {
+    this(stat, dfsClient, src, progress, checksum, cachingStrategy,
+        byteArrayManage);
+    isAppend = true;
+    stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
+    block = lastBlock.getBlock();
+    bytesSent = block.getNumBytes();
+    accessToken = lastBlock.getBlockToken();
+  }
+
+  /**
+   * Set pipeline in construction
+   *
+   * @param lastBlock the last block of a file
+   * @throws IOException
+   */
+  void setPipelineInConstruction(LocatedBlock lastBlock) throws IOException{
+    // setup pipeline to append to the last block XXX retries??
+    setPipeline(lastBlock);
+    errorIndex = -1;   // no errors yet.
+    if (nodes.length < 1) {
+      throw new IOException("Unable to retrieve blocks locations " +
+          " for last block " + block +
+          "of file " + src);
+    }
+  }
+
+  private void setPipeline(LocatedBlock lb) {
+    setPipeline(lb.getLocations(), lb.getStorageTypes(), lb.getStorageIDs());
+  }
+
+  private void setPipeline(DatanodeInfo[] nodes, StorageType[] storageTypes,
+                           String[] storageIDs) {
+    this.nodes = nodes;
+    this.storageTypes = storageTypes;
+    this.storageIDs = storageIDs;
+  }
+
+  /**
+   * Set favored nodes
+   *
+   * @param favoredNodes favored nodes
+   */
+  void setFavoredNodes(String[] favoredNodes) {
+    this.favoredNodes = favoredNodes;
+  }
+
+  /**
+   * Initialize for data streaming
+   */
+  private void initDataStreaming() {
+    this.setName("DataStreamer for file " + src +
+        " block " + block);
+    response = new ResponseProcessor(nodes);
+    response.start();
+    stage = BlockConstructionStage.DATA_STREAMING;
+  }
+
+  private void endBlock() {
+    if(DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Closing old block " + block);
+    }
+    this.setName("DataStreamer for file " + src);
+    closeResponder();
+    closeStream();
+    setPipeline(null, null, null);
+    stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
+  }
+
+  /*
+   * streamer thread is the only thread that opens streams to datanode,
+   * and closes them. Any error recovery is also done by this thread.
+   */
+  @Override
+  public void run() {
+    long lastPacket = Time.monotonicNow();
+    TraceScope scope = NullScope.INSTANCE;
+    while (!streamerClosed && dfsClient.clientRunning) {
+      // if the Responder encountered an error, shutdown Responder
+      if (hasError && response != null) {
+        try {
+          response.close();
+          response.join();
+          response = null;
+        } catch (InterruptedException  e) {
+          DFSClient.LOG.warn("Caught exception ", e);
+        }
+      }
+
+      DFSPacket one;
+      try {
+        // process datanode IO errors if any
+        boolean doSleep = false;
+        if (hasError && (errorIndex >= 0 || restartingNodeIndex.get() >= 0)) {
+          doSleep = processDatanodeError();
+        }
+
+        synchronized (dataQueue) {
+          // wait for a packet to be sent.
+          long now = Time.monotonicNow();
+          while ((!streamerClosed && !hasError && dfsClient.clientRunning
+              && dataQueue.size() == 0 &&
+              (stage != BlockConstructionStage.DATA_STREAMING ||
+                  stage == BlockConstructionStage.DATA_STREAMING &&
+                      now - lastPacket < dfsClient.getConf().socketTimeout/2)) || doSleep ) {
+            long timeout = dfsClient.getConf().socketTimeout/2 - (now-lastPacket);
+            timeout = timeout <= 0 ? 1000 : timeout;
+            timeout = (stage == BlockConstructionStage.DATA_STREAMING)?
+                timeout : 1000;
+            try {
+              dataQueue.wait(timeout);
+            } catch (InterruptedException  e) {
+              DFSClient.LOG.warn("Caught exception ", e);
+            }
+            doSleep = false;
+            now = Time.monotonicNow();
+          }
+          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+            continue;
+          }
+          // get packet to be sent.
+          if (dataQueue.isEmpty()) {
+            one = createHeartbeatPacket();
+            assert one != null;
+          } else {
+            one = dataQueue.getFirst(); // regular data packet
+            long parents[] = one.getTraceParents();
+            if (parents.length > 0) {
+              scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0]));
+              // TODO: use setParents API once it's available from HTrace 3.2
+              // scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS);
+              // scope.getSpan().setParents(parents);
+            }
+          }
+        }
+
+        // get new block from namenode.
+        if (stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
+          if(DFSClient.LOG.isDebugEnabled()) {
+            DFSClient.LOG.debug("Allocating new block");
+          }
+          setPipeline(nextBlockOutputStream());
+          initDataStreaming();
+        } else if (stage == BlockConstructionStage.PIPELINE_SETUP_APPEND) {
+          if(DFSClient.LOG.isDebugEnabled()) {
+            DFSClient.LOG.debug("Append to block " + block);
+          }
+          setupPipelineForAppendOrRecovery();
+          initDataStreaming();
+        }
+
+        long lastByteOffsetInBlock = one.getLastByteOffsetBlock();
+        if (lastByteOffsetInBlock > stat.getBlockSize()) {
+          throw new IOException("BlockSize " + stat.getBlockSize() +
+              " is smaller than data size. " +
+              " Offset of packet in block " +
+              lastByteOffsetInBlock +
+              " Aborting file " + src);
+        }
+
+        if (one.isLastPacketInBlock()) {
+          // wait for all data packets have been successfully acked
+          synchronized (dataQueue) {
+            while (!streamerClosed && !hasError &&
+                ackQueue.size() != 0 && dfsClient.clientRunning) {
+              try {
+                // wait for acks to arrive from datanodes
+                dataQueue.wait(1000);
+              } catch (InterruptedException  e) {
+                DFSClient.LOG.warn("Caught exception ", e);
+              }
+            }
+          }
+          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+            continue;
+          }
+          stage = BlockConstructionStage.PIPELINE_CLOSE;
+        }
+
+        // send the packet
+        Span span = null;
+        synchronized (dataQueue) {
+          // move packet from dataQueue to ackQueue
+          if (!one.isHeartbeatPacket()) {
+            span = scope.detach();
+            one.setTraceSpan(span);
+            dataQueue.removeFirst();
+            ackQueue.addLast(one);
+            dataQueue.notifyAll();
+          }
+        }
+
+        if (DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug("DataStreamer block " + block +
+              " sending packet " + one);
+        }
+
+        // write out data to remote datanode
+        TraceScope writeScope = Trace.startSpan("writeTo", span);
+        try {
+          one.writeTo(blockStream);
+          blockStream.flush();
+        } catch (IOException e) {
+          // HDFS-3398 treat primary DN is down since client is unable to
+          // write to primary DN. If a failed or restarting node has already
+          // been recorded by the responder, the following call will have no
+          // effect. Pipeline recovery can handle only one node error at a
+          // time. If the primary node fails again during the recovery, it
+          // will be taken out then.
+          tryMarkPrimaryDatanodeFailed();
+          throw e;
+        } finally {
+          writeScope.close();
+        }
+        lastPacket = Time.monotonicNow();
+
+        // update bytesSent
+        long tmpBytesSent = one.getLastByteOffsetBlock();
+        if (bytesSent < tmpBytesSent) {
+          bytesSent = tmpBytesSent;
+        }
+
+        if (streamerClosed || hasError || !dfsClient.clientRunning) {
+          continue;
+        }
+
+        // Is this block full?
+        if (one.isLastPacketInBlock()) {
+          // wait for the close packet has been acked
+          synchronized (dataQueue) {
+            while (!streamerClosed && !hasError &&
+                ackQueue.size() != 0 && dfsClient.clientRunning) {
+              dataQueue.wait(1000);// wait for acks to arrive from datanodes
+            }
+          }
+          if (streamerClosed || hasError || !dfsClient.clientRunning) {
+            continue;
+          }
+
+          endBlock();
+        }
+        if (progress != null) { progress.progress(); }
+
+        // This is used by unit test to trigger race conditions.
+        if (artificialSlowdown != 0 && dfsClient.clientRunning) {
+          Thread.sleep(artificialSlowdown);
+        }
+      } catch (Throwable e) {
+        // Log warning if there was a real error.
+        if (restartingNodeIndex.get() == -1) {
+          // Since their messages are descriptive enough, do not always
+          // log a verbose stack-trace WARN for quota exceptions.
+          if (e instanceof QuotaExceededException) {
+            DFSClient.LOG.debug("DataStreamer Quota Exception", e);
+          } else {
+            DFSClient.LOG.warn("DataStreamer Exception", e);
+          }
+        }
+        if (e instanceof IOException) {
+          setLastException((IOException)e);
+        } else {
+          setLastException(new IOException("DataStreamer Exception: ",e));
+        }
+        hasError = true;
+        if (errorIndex == -1 && restartingNodeIndex.get() == -1) {
+          // Not a datanode issue
+          streamerClosed = true;
+        }
+      } finally {
+        scope.close();
+      }
+    }
+    closeInternal();
+  }
+
+  private void closeInternal() {
+    closeResponder();       // close and join
+    closeStream();
+    streamerClosed = true;
+    release();
+    synchronized (dataQueue) {
+      dataQueue.notifyAll();
+    }
+  }
+
+  /**
+   * release the DFSPackets in the two queues
+   *
+   */
+  void release() {
+    synchronized (dataQueue) {
+      releaseBuffer(dataQueue, byteArrayManager);
+      releaseBuffer(ackQueue, byteArrayManager);
+    }
+  }
+
+  /**
+   * wait for the ack of seqno
+   *
+   * @param seqno the sequence number to be acked
+   * @throws IOException
+   */
+  void waitForAckedSeqno(long seqno) throws IOException {
+    TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
+    try {
+      if (DFSClient.LOG.isDebugEnabled()) {
+        DFSClient.LOG.debug("Waiting for ack for: " + seqno);
+      }
+      long begin = Time.monotonicNow();
+      try {
+        synchronized (dataQueue) {
+          while (!streamerClosed) {
+            checkClosed();
+            if (lastAckedSeqno >= seqno) {
+              break;
+            }
+            try {
+              dataQueue.wait(1000); // when we receive an ack, we notify on
+              // dataQueue
+            } catch (InterruptedException ie) {
+              throw new InterruptedIOException(
+                  "Interrupted while waiting for data to be acknowledged by pipeline");
+            }
+          }
+        }
+        checkClosed();
+      } catch (ClosedChannelException e) {
+      }
+      long duration = Time.monotonicNow() - begin;
+      if (duration > dfsclientSlowLogThresholdMs) {
+        DFSClient.LOG.warn("Slow waitForAckedSeqno took " + duration
+            + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms)");
+      }
+    } finally {
+      scope.close();
+    }
+  }
+
+  /**
+   * wait for space of dataQueue and queue the packet
+   *
+   * @param packet  the DFSPacket to be queued
+   * @throws IOException
+   */
+  void waitAndQueuePacket(DFSPacket packet) throws IOException {
+    synchronized (dataQueue) {
+      try {
+        // If queue is full, then wait till we have enough space
+        boolean firstWait = true;
+        try {
+          while (!streamerClosed && dataQueue.size() + ackQueue.size() >
+              dfsClient.getConf().writeMaxPackets) {
+            if (firstWait) {
+              Span span = Trace.currentSpan();
+              if (span != null) {
+                span.addTimelineAnnotation("dataQueue.wait");
+              }
+              firstWait = false;
+            }
+            try {
+              dataQueue.wait();
+            } catch (InterruptedException e) {
+              // If we get interrupted while waiting to queue data, we still need to get rid
+              // of the current packet. This is because we have an invariant that if
+              // currentPacket gets full, it will get queued before the next writeChunk.
+              //
+              // Rather than wait around for space in the queue, we should instead try to
+              // return to the caller as soon as possible, even though we slightly overrun
+              // the MAX_PACKETS length.
+              Thread.currentThread().interrupt();
+              break;
+            }
+          }
+        } finally {
+          Span span = Trace.currentSpan();
+          if ((span != null) && (!firstWait)) {
+            span.addTimelineAnnotation("end.wait");
+          }
+        }
+        checkClosed();
+        queuePacket(packet);
+      } catch (ClosedChannelException e) {
+      }
+    }
+  }
+
+  /*
+   * close the streamer, should be called only by an external thread
+   * and only after all data to be sent has been flushed to datanode.
+   *
+   * Interrupt this data streamer if force is true
+   *
+   * @param force if this data stream is forced to be closed
+   */
+  void close(boolean force) {
+    streamerClosed = true;
+    synchronized (dataQueue) {
+      dataQueue.notifyAll();
+    }
+    if (force) {
+      this.interrupt();
+    }
+  }
+
+
+  private void checkClosed() throws IOException {
+    if (streamerClosed) {
+      IOException e = lastException.get();
+      throw e != null ? e : new ClosedChannelException();
+    }
+  }
+
+  private void closeResponder() {
+    if (response != null) {
+      try {
+        response.close();
+        response.join();
+      } catch (InterruptedException  e) {
+        DFSClient.LOG.warn("Caught exception ", e);
+      } finally {
+        response = null;
+      }
+    }
+  }
+
+  private void closeStream() {
+    if (blockStream != null) {
+      try {
+        blockStream.close();
+      } catch (IOException e) {
+        setLastException(e);
+      } finally {
+        blockStream = null;
+      }
+    }
+    if (blockReplyStream != null) {
+      try {
+        blockReplyStream.close();
+      } catch (IOException e) {
+        setLastException(e);
+      } finally {
+        blockReplyStream = null;
+      }
+    }
+    if (null != s) {
+      try {
+        s.close();
+      } catch (IOException e) {
+        setLastException(e);
+      } finally {
+        s = null;
+      }
+    }
+  }
+
+  // The following synchronized methods are used whenever
+  // errorIndex or restartingNodeIndex is set. This is because
+  // check & set needs to be atomic. Simply reading variables
+  // does not require a synchronization. When responder is
+  // not running (e.g. during pipeline recovery), there is no
+  // need to use these methods.
+
+  /** Set the error node index. Called by responder */
+  synchronized void setErrorIndex(int idx) {
+    errorIndex = idx;
+  }
+
+  /** Set the restarting node index. Called by responder */
+  synchronized void setRestartingNodeIndex(int idx) {
+    restartingNodeIndex.set(idx);
+    // If the data streamer has already set the primary node
+    // bad, clear it. It is likely that the write failed due to
+    // the DN shutdown. Even if it was a real failure, the pipeline
+    // recovery will take care of it.
+    errorIndex = -1;
+  }
+
+  /**
+   * This method is used when no explicit error report was received,
+   * but something failed. When the primary node is a suspect or
+   * unsure about the cause, the primary node is marked as failed.
+   */
+  synchronized void tryMarkPrimaryDatanodeFailed() {
+    // There should be no existing error and no ongoing restart.
+    if ((errorIndex == -1) && (restartingNodeIndex.get() == -1)) {
+      errorIndex = 0;
+    }
+  }
+
+  /**
+   * Examine whether it is worth waiting for a node to restart.
+   * @param index the node index
+   */
+  boolean shouldWaitForRestart(int index) {
+    // Only one node in the pipeline.
+    if (nodes.length == 1) {
+      return true;
+    }
+
+    // Is it a local node?
+    InetAddress addr = null;
+    try {
+      addr = InetAddress.getByName(nodes[index].getIpAddr());
+    } catch (java.net.UnknownHostException e) {
+      // we are passing an ip address. this should not happen.
+      assert false;
+    }
+
+    if (addr != null && NetUtils.isLocalAddress(addr)) {
+      return true;
+    }
+    return false;
+  }
+
+  //
+  // Processes responses from the datanodes.  A packet is removed
+  // from the ackQueue when its response arrives.
+  //
+  private class ResponseProcessor extends Daemon {
+
+    private volatile boolean responderClosed = false;
+    private DatanodeInfo[] targets = null;
+    private boolean isLastPacketInBlock = false;
+
+    ResponseProcessor (DatanodeInfo[] targets) {
+      this.targets = targets;
+    }
+
+    @Override
+    public void run() {
+
+      setName("ResponseProcessor for block " + block);
+      PipelineAck ack = new PipelineAck();
+
+      TraceScope scope = NullScope.INSTANCE;
+      while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
+        // process responses from datanodes.
+        try {
+          // read an ack from the pipeline
+          long begin = Time.monotonicNow();
+          ack.readFields(blockReplyStream);
+          long duration = Time.monotonicNow() - begin;
+          if (duration > dfsclientSlowLogThresholdMs
+              && ack.getSeqno() != DFSPacket.HEART_BEAT_SEQNO) {
+            DFSClient.LOG
+                .warn("Slow ReadProcessor read fields took " + duration
+                    + "ms (threshold=" + dfsclientSlowLogThresholdMs + "ms); ack: "
+                    + ack + ", targets: " + Arrays.asList(targets));
+          } else if (DFSClient.LOG.isDebugEnabled()) {
+            DFSClient.LOG.debug("DFSClient " + ack);
+          }
+
+          long seqno = ack.getSeqno();
+          // processes response status from datanodes.
+          for (int i = ack.getNumOfReplies()-1; i >=0  && dfsClient.clientRunning; i--) {
+            final Status reply = PipelineAck.getStatusFromHeader(ack
+                .getReply(i));
+            // Restart will not be treated differently unless it is
+            // the local node or the only one in the pipeline.
+            if (PipelineAck.isRestartOOBStatus(reply) &&
+                shouldWaitForRestart(i)) {
+              restartDeadline = dfsClient.getConf().datanodeRestartTimeout
+                  + Time.monotonicNow();
+              setRestartingNodeIndex(i);
+              String message = "A datanode is restarting: " + targets[i];
+              DFSClient.LOG.info(message);
+              throw new IOException(message);
+            }
+            // node error
+            if (reply != SUCCESS) {
+              setErrorIndex(i); // first bad datanode
+              throw new IOException("Bad response " + reply +
+                  " for block " + block +
+                  " from datanode " +
+                  targets[i]);
+            }
+          }
+
+          assert seqno != PipelineAck.UNKOWN_SEQNO :
+              "Ack for unknown seqno should be a failed ack: " + ack;
+          if (seqno == DFSPacket.HEART_BEAT_SEQNO) {  // a heartbeat ack
+            continue;
+          }
+
+          // a success ack for a data packet
+          DFSPacket one;
+          synchronized (dataQueue) {
+            one = ackQueue.getFirst();
+          }
+          if (one.getSeqno() != seqno) {
+            throw new IOException("ResponseProcessor: Expecting seqno " +
+                " for block " + block +
+                one.getSeqno() + " but received " + seqno);
+          }
+          isLastPacketInBlock = one.isLastPacketInBlock();
+
+          // Fail the packet write for testing in order to force a
+          // pipeline recovery.
+          if (DFSClientFaultInjector.get().failPacket() &&
+              isLastPacketInBlock) {
+            failPacket = true;
+            throw new IOException(
+                "Failing the last packet for testing.");
+          }
+
+          // update bytesAcked
+          block.setNumBytes(one.getLastByteOffsetBlock());
+
+          synchronized (dataQueue) {
+            scope = Trace.continueSpan(one.getTraceSpan());
+            one.setTraceSpan(null);
+            lastAckedSeqno = seqno;
+            ackQueue.removeFirst();
+            dataQueue.notifyAll();
+
+            one.releaseBuffer(byteArrayManager);
+          }
+        } catch (Exception e) {
+          if (!responderClosed) {
+            if (e instanceof IOException) {
+              setLastException((IOException)e);
+            }
+            hasError = true;
+            // If no explicit error report was received, mark the primary
+            // node as failed.
+            tryMarkPrimaryDatanodeFailed();
+            synchronized (dataQueue) {
+              dataQueue.notifyAll();
+            }
+            if (restartingNodeIndex.get() == -1) {
+              DFSClient.LOG.warn("DataStreamer ResponseProcessor exception "
+                  + " for block " + block, e);
+            }
+            responderClosed = true;
+          }
+        } finally {
+            scope.close();
+        }
+      }
+    }
+
+    void close() {
+      responderClosed = true;
+      this.interrupt();
+    }
+  }
+
+  // If this stream has encountered any errors so far, shutdown
+  // threads and mark stream as closed. Returns true if we should
+  // sleep for a while after returning from this call.
+  //
+  private boolean processDatanodeError() throws IOException {
+    if (response != null) {
+      DFSClient.LOG.info("Error Recovery for " + block +
+          " waiting for responder to exit. ");
+      return true;
+    }
+    closeStream();
+
+    // move packets from ack queue to front of the data queue
+    synchronized (dataQueue) {
+      dataQueue.addAll(0, ackQueue);
+      ackQueue.clear();
+    }
+
+    // Record the new pipeline failure recovery.
+    if (lastAckedSeqnoBeforeFailure != lastAckedSeqno) {
+      lastAckedSeqnoBeforeFailure = lastAckedSeqno;
+      pipelineRecoveryCount = 1;
+    } else {
+      // If we had to recover the pipeline five times in a row for the
+      // same packet, this client likely has corrupt data or corrupting
+      // during transmission.
+      if (++pipelineRecoveryCount > 5) {
+        DFSClient.LOG.warn("Error recovering pipeline for writing " +
+            block + ". Already retried 5 times for the same packet.");
+        lastException.set(new IOException("Failing write. Tried pipeline " +
+            "recovery 5 times without success."));
+        streamerClosed = true;
+        return false;
+      }
+    }
+    boolean doSleep = setupPipelineForAppendOrRecovery();
+
+    if (!streamerClosed && dfsClient.clientRunning) {
+      if (stage == BlockConstructionStage.PIPELINE_CLOSE) {
+
+        // If we had an error while closing the pipeline, we go through a fast-path
+        // where the BlockReceiver does not run. Instead, the DataNode just finalizes
+        // the block immediately during the 'connect ack' process. So, we want to pull
+        // the end-of-block packet from the dataQueue, since we don't actually have
+        // a true pipeline to send it over.
+        //
+        // We also need to set lastAckedSeqno to the end-of-block Packet's seqno, so that
+        // a client waiting on close() will be aware that the flush finished.
+        synchronized (dataQueue) {
+          DFSPacket endOfBlockPacket = dataQueue.remove();  // remove the end of block packet
+          Span span = endOfBlockPacket.getTraceSpan();
+          if (span != null) {
+            // Close any trace span associated with this Packet
+            TraceScope scope = Trace.continueSpan(span);
+            scope.close();
+          }
+          assert endOfBlockPacket.isLastPacketInBlock();
+          assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
+          lastAckedSeqno = endOfBlockPacket.getSeqno();
+          dataQueue.notifyAll();
+        }
+        endBlock();
+      } else {
+        initDataStreaming();
+      }
+    }
+
+    return doSleep;
+  }
+
+  void setHflush() {
+    isHflushed = true;
+  }
+
+  private int findNewDatanode(final DatanodeInfo[] original
+  ) throws IOException {
+    if (nodes.length != original.length + 1) {
+      throw new IOException(
+          new StringBuilder()
+              .append("Failed to replace a bad datanode on the existing pipeline ")
+              .append("due to no more good datanodes being available to try. ")
+              .append("(Nodes: current=").append(Arrays.asList(nodes))
+              .append(", original=").append(Arrays.asList(original)).append("). ")
+              .append("The current failed datanode replacement policy is ")
+              .append(dfsClient.dtpReplaceDatanodeOnFailure).append(", and ")
+              .append("a client may configure this via '")
+              .append(DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY)
+              .append("' in its configuration.")
+              .toString());
+    }
+    for(int i = 0; i < nodes.length; i++) {
+      int j = 0;
+      for(; j < original.length && !nodes[i].equals(original[j]); j++);
+      if (j == original.length) {
+        return i;
+      }
+    }
+    throw new IOException("Failed: new datanode not found: nodes="
+        + Arrays.asList(nodes) + ", original=" + Arrays.asList(original));
+  }
+
+  private void addDatanode2ExistingPipeline() throws IOException {
+    if (DataTransferProtocol.LOG.isDebugEnabled()) {
+      DataTransferProtocol.LOG.debug("lastAckedSeqno = " + lastAckedSeqno);
+    }
+      /*
+       * Is data transfer necessary?  We have the following cases.
+       *
+       * Case 1: Failure in Pipeline Setup
+       * - Append
+       *    + Transfer the stored replica, which may be a RBW or a finalized.
+       * - Create
+       *    + If no data, then no transfer is required.
+       *    + If there are data written, transfer RBW. This case may happens
+       *      when there are streaming failure earlier in this pipeline.
+       *
+       * Case 2: Failure in Streaming
+       * - Append/Create:
+       *    + transfer RBW
+       *
+       * Case 3: Failure in Close
+       * - Append/Create:
+       *    + no transfer, let NameNode replicates the block.
+       */
+    if (!isAppend && lastAckedSeqno < 0
+        && stage == BlockConstructionStage.PIPELINE_SETUP_CREATE) {
+      //no data have been written
+      return;
+    } else if (stage == BlockConstructionStage.PIPELINE_CLOSE
+        || stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) {
+      //pipeline is closing
+      return;
+    }
+
+    //get a new datanode
+    final DatanodeInfo[] original = nodes;
+    final LocatedBlock lb = dfsClient.namenode.getAdditionalDatanode(
+        src, stat.getFileId(), block, nodes, storageIDs,
+        failed.toArray(new DatanodeInfo[failed.size()]),
+        1, dfsClient.clientName);
+    setPipeline(lb);
+
+    //find the new datanode
+    final int d = findNewDatanode(original);
+
+    //transfer replica
+    final DatanodeInfo src = d == 0? nodes[1]: nodes[d - 1];
+    final DatanodeInfo[] targets = {nodes[d]};
+    final StorageType[] targetStorageTypes = {storageTypes[d]};
+    transfer(src, targets, targetStorageTypes, lb.getBlockToken());
+  }
+
+  private void transfer(final DatanodeInfo src, final DatanodeInfo[] targets,
+                        final StorageType[] targetStorageTypes,
+                        final Token<BlockTokenIdentifier> blockToken) throws IOException {
+    //transfer replica to the new datanode
+    Socket sock = null;
+    DataOutputStream out = null;
+    DataInputStream in = null;
+    try {
+      sock = createSocketForPipeline(src, 2, dfsClient);
+      final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
+
+      OutputStream unbufOut = NetUtils.getOutputStream(sock, writeTimeout);
+      InputStream unbufIn = NetUtils.getInputStream(sock);
+      IOStreamPair saslStreams = dfsClient.saslClient.socketSend(sock,
+          unbufOut, unbufIn, dfsClient, blockToken, src);
+      unbufOut = saslStreams.out;
+      unbufIn = saslStreams.in;
+      out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+          HdfsConstants.SMALL_BUFFER_SIZE));
+      in = new DataInputStream(unbufIn);
+
+      //send the TRANSFER_BLOCK request
+      new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
+          targets, targetStorageTypes);
+      out.flush();
+
+      //ack
+      BlockOpResponseProto response =
+          BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
+      if (SUCCESS != response.getStatus()) {
+        throw new IOException("Failed to add a datanode");
+      }
+    } finally {
+      IOUtils.closeStream(in);
+      IOUtils.closeStream(out);
+      IOUtils.closeSocket(sock);
+    }
+  }
+
+  /**
+   * Open a DataStreamer to a DataNode pipeline so that
+   * it can be written to.
+   * This happens when a file is appended or data streaming fails
+   * It keeps on trying until a pipeline is setup
+   */
+  private boolean setupPipelineForAppendOrRecovery() throws IOException {
+    // check number of datanodes
+    if (nodes == null || nodes.length == 0) {
+      String msg = "Could not get block locations. " + "Source file \""
+          + src + "\" - Aborting...";
+      DFSClient.LOG.warn(msg);
+      setLastException(new IOException(msg));
+      streamerClosed = true;
+      return false;
+    }
+
+    boolean success = false;
+    long newGS = 0L;
+    while (!success && !streamerClosed && dfsClient.clientRunning) {
+      // Sleep before reconnect if a dn is restarting.
+      // This process will be repeated until the deadline or the datanode
+      // starts back up.
+      if (restartingNodeIndex.get() >= 0) {
+        // 4 seconds or the configured deadline period, whichever is shorter.
+        // This is the retry interval and recovery will be retried in this
+        // interval until timeout or success.
+        long delay = Math.min(dfsClient.getConf().datanodeRestartTimeout,
+            4000L);
+        try {
+          Thread.sleep(delay);
+        } catch (InterruptedException ie) {
+          lastException.set(new IOException("Interrupted while waiting for " +
+              "datanode to restart. " + nodes[restartingNodeIndex.get()]));
+          streamerClosed = true;
+          return false;
+        }
+      }
+      boolean isRecovery = hasError;
+      // remove bad datanode from list of datanodes.
+      // If errorIndex was not set (i.e. appends), then do not remove
+      // any datanodes
+      //
+      if (errorIndex >= 0) {
+        StringBuilder pipelineMsg = new StringBuilder();
+        for (int j = 0; j < nodes.length; j++) {
+          pipelineMsg.append(nodes[j]);
+          if (j < nodes.length - 1) {
+            pipelineMsg.append(", ");
+          }
+        }
+        if (nodes.length <= 1) {
+          lastException.set(new IOException("All datanodes " + pipelineMsg
+              + " are bad. Aborting..."));
+          streamerClosed = true;
+          return false;
+        }
+        DFSClient.LOG.warn("Error Recovery for block " + block +
+            " in pipeline " + pipelineMsg +
+            ": bad datanode " + nodes[errorIndex]);
+        failed.add(nodes[errorIndex]);
+
+        DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
+        arraycopy(nodes, newnodes, errorIndex);
+
+        final StorageType[] newStorageTypes = new StorageType[newnodes.length];
+        arraycopy(storageTypes, newStorageTypes, errorIndex);
+
+        final String[] newStorageIDs = new String[newnodes.length];
+        arraycopy(storageIDs, newStorageIDs, errorIndex);
+
+        setPipeline(newnodes, newStorageTypes, newStorageIDs);
+
+        // Just took care of a node error while waiting for a node restart
+        if (restartingNodeIndex.get() >= 0) {
+          // If the error came from a node further away than the restarting
+          // node, the restart must have been complete.
+          if (errorIndex > restartingNodeIndex.get()) {
+            restartingNodeIndex.set(-1);
+          } else if (errorIndex < restartingNodeIndex.get()) {
+            // the node index has shifted.
+            restartingNodeIndex.decrementAndGet();
+          } else {
+            // this shouldn't happen...
+            assert false;
+          }
+        }
+
+        if (restartingNodeIndex.get() == -1) {
+          hasError = false;
+        }
+        lastException.set(null);
+        errorIndex = -1;
+      }
+
+      // Check if replace-datanode policy is satisfied.
+      if (dfsClient.dtpReplaceDatanodeOnFailure.satisfy(stat.getReplication(),
+          nodes, isAppend, isHflushed)) {
+        try {
+          addDatanode2ExistingPipeline();
+        } catch(IOException ioe) {
+          if (!dfsClient.dtpReplaceDatanodeOnFailure.isBestEffort()) {
+            throw ioe;
+          }
+          DFSClient.LOG.warn("Failed to replace datanode."
+              + " Continue with the remaining datanodes since "
+              + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_BEST_EFFORT_KEY
+              + " is set to true.", ioe);
+        }
+      }
+
+      // get a new generation stamp and an access token
+      LocatedBlock lb = dfsClient.namenode.updateBlockForPipeline(block, dfsClient.clientName);
+      newGS = lb.getBlock().getGenerationStamp();
+      accessToken = lb.getBlockToken();
+
+      // set up the pipeline again with the remaining nodes
+      if (failPacket) { // for testing
+        success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
+        failPacket = false;
+        try {
+          // Give DNs time to send in bad reports. In real situations,
+          // good reports should follow bad ones, if client committed
+          // with those nodes.
+          Thread.sleep(2000);
+        } catch (InterruptedException ie) {}
+      } else {
+        success = createBlockOutputStream(nodes, storageTypes, newGS, isRecovery);
+      }
+
+      if (restartingNodeIndex.get() >= 0) {
+        assert hasError == true;
+        // check errorIndex set above
+        if (errorIndex == restartingNodeIndex.get()) {
+          // ignore, if came from the restarting node
+          errorIndex = -1;
+        }
+        // still within the deadline
+        if (Time.monotonicNow() < restartDeadline) {
+          continue; // with in the deadline
+        }
+        // expired. declare the restarting node dead
+        restartDeadline = 0;
+        int expiredNodeIndex = restartingNodeIndex.get();
+        restartingNodeIndex.set(-1);
+        DFSClient.LOG.warn("Datanode did not restart in time: " +
+            nodes[expiredNodeIndex]);
+        // Mark the restarting node as failed. If there is any other failed
+        // node during the last pipeline construction attempt, it will not be
+        // overwritten/dropped. In this case, the restarting node will get
+        // excluded in the following attempt, if it still does not come up.
+        if (errorIndex == -1) {
+          errorIndex = expiredNodeIndex;
+        }
+        // From this point on, normal pipeline recovery applies.
+      }
+    } // while
+
+    if (success) {
+      // update pipeline at the namenode
+      ExtendedBlock newBlock = new ExtendedBlock(
+          block.getBlockPoolId(), block.getBlockId(), block.getNumBytes(), newGS);
+      dfsClient.namenode.updatePipeline(dfsClient.clientName, block, newBlock,
+          nodes, storageIDs);
+      // update client side generation stamp
+      block = newBlock;
+    }
+    return false; // do not sleep, continue processing
+  }
+
+  /**
+   * Open a DataStreamer to a DataNode so that it can be written to.
+   * This happens when a file is created and each time a new block is allocated.
+   * Must get block ID and the IDs of the destinations from the namenode.
+   * Returns the list of target datanodes.
+   */
+  private LocatedBlock nextBlockOutputStream() throws IOException {
+    LocatedBlock lb = null;
+    DatanodeInfo[] nodes = null;
+    StorageType[] storageTypes = null;
+    int count = dfsClient.getConf().nBlockWriteRetry;
+    boolean success = false;
+    ExtendedBlock oldBlock = block;
+    do {
+      hasError = false;
+      lastException.set(null);
+      errorIndex = -1;
+      success = false;
+
+      DatanodeInfo[] excluded =
+          excludedNodes.getAllPresent(excludedNodes.asMap().keySet())
+              .keySet()
+              .toArray(new DatanodeInfo[0]);
+      block = oldBlock;
+      lb = locateFollowingBlock(excluded.length > 0 ? excluded : null);
+      block = lb.getBlock();
+      block.setNumBytes(0);
+      bytesSent = 0;
+      accessToken = lb.getBlockToken();
+      nodes = lb.getLocations();
+      storageTypes = lb.getStorageTypes();
+
+      //
+      // Connect to first DataNode in the list.
+      //
+      success = createBlockOutputStream(nodes, storageTypes, 0L, false);
+
+      if (!success) {
+        DFSClient.LOG.info("Abandoning " + block);
+        dfsClient.namenode.abandonBlock(block, stat.getFileId(), src,
+            dfsClient.clientName);
+        block = null;
+        DFSClient.LOG.info("Excluding datanode " + nodes[errorIndex]);
+        excludedNodes.put(nodes[errorIndex], nodes[errorIndex]);
+      }
+    } while (!success && --count >= 0);
+
+    if (!success) {
+      throw new IOException("Unable to create new block.");
+    }
+    return lb;
+  }
+
+  // connects to the first datanode in the pipeline
+  // Returns true if success, otherwise return failure.
+  //
+  private boolean createBlockOutputStream(DatanodeInfo[] nodes,
+      StorageType[] nodeStorageTypes, long newGS, boolean recoveryFlag) {
+    if (nodes.length == 0) {
+      DFSClient.LOG.info("nodes are empty for write pipeline of block "
+          + block);
+      return false;
+    }
+    Status pipelineStatus = SUCCESS;
+    String firstBadLink = "";
+    boolean checkRestart = false;
+    if (DFSClient.LOG.isDebugEnabled()) {
+      for (int i = 0; i < nodes.length; i++) {
+        DFSClient.LOG.debug("pipeline = " + nodes[i]);
+      }
+    }
+
+    // persist blocks on namenode on next flush
+    persistBlocks.set(true);
+
+    int refetchEncryptionKey = 1;
+    while (true) {
+      boolean result = false;
+      DataOutputStream out = null;
+      try {
+        assert null == s : "Previous socket unclosed";
+        assert null == blockReplyStream : "Previous blockReplyStream unclosed";
+        s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
+        long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
+
+        OutputStream unbufOut = NetUtils.getOutputStream(s, writeTimeout);
+        InputStream unbufIn = NetUtils.getInputStream(s);
+        IOStreamPair saslStreams = dfsClient.saslClient.socketSend(s,
+            unbufOut, unbufIn, dfsClient, accessToken, nodes[0]);
+        unbufOut = saslStreams.out;
+        unbufIn = saslStreams.in;
+        out = new DataOutputStream(new BufferedOutputStream(unbufOut,
+            HdfsConstants.SMALL_BUFFER_SIZE));
+        blockReplyStream = new DataInputStream(unbufIn);
+
+        //
+        // Xmit header info to datanode
+        //
+
+        BlockConstructionStage bcs = recoveryFlag? stage.getRecoveryStage(): stage;
+
+        // We cannot change the block length in 'block' as it counts the number
+        // of bytes ack'ed.
+        ExtendedBlock blockCopy = new ExtendedBlock(block);
+        blockCopy.setNumBytes(stat.getBlockSize());
+
+        boolean[] targetPinnings = getPinnings(nodes, true);
+        // send the request
+        new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken,
+            dfsClient.clientName, nodes, nodeStorageTypes, null, bcs,
+            nodes.length, block.getNumBytes(), bytesSent, newGS,
+            checksum4WriteBlock, cachingStrategy.get(), isLazyPersistFile,
+            (targetPinnings == null ? false : targetPinnings[0]), targetPinnings);
+
+        // receive ack for connect
+        BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
+            PBHelper.vintPrefixed(blockReplyStream));
+        pipelineStatus = resp.getStatus();
+        firstBadLink = resp.getFirstBadLink();
+
+        // Got an restart OOB ack.
+        // If a node is already restarting, this status is not likely from
+        // the same node. If it is from a different node, it is not
+        // from the local datanode. Thus it is safe to treat this as a
+        // regular node error.
+        if (PipelineAck.isRestartOOBStatus(pipelineStatus) &&
+            restartingNodeIndex.get() == -1) {
+          checkRestart = true;
+          throw new IOException("A datanode is restarting.");
+        }
+		
+        String logInfo = "ack with firstBadLink as " + firstBadLink;
+        DataTransferProtoUtil.checkBlockOpStatus(resp, logInfo);
+
+        assert null == blockStream : "Previous blockStream unclosed";
+        blockStream = out;
+        result =  true; // success
+        restartingNodeIndex.set(-1);
+        hasError = false;
+      } catch (IOException ie) {
+        if (restartingNodeIndex.get() == -1) {
+          DFSClient.LOG.info("Exception in createBlockOutputStream", ie);
+        }
+        if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
+          DFSClient.LOG.info("Will fetch a new encryption key and retry, "
+              + "encryption key was invalid when connecting to "
+              + nodes[0] + " : " + ie);
+          // The encryption key used is invalid.
+          refetchEncryptionKey--;
+          dfsClient.clearDataEncryptionKey();
+          // Don't close the socket/exclude this node just yet. Try again with
+          // a new encryption key.
+          continue;
+        }
+
+        // find the datanode that matches
+        if (firstBadLink.length() != 0) {
+          for (int i = 0; i < nodes.length; i++) {
+            // NB: Unconditionally using the xfer addr w/o hostname
+            if (firstBadLink.equals(nodes[i].getXferAddr())) {
+              errorIndex = i;
+              break;
+            }
+          }
+        } else {
+          assert checkRestart == false;
+          errorIndex = 0;
+        }
+        // Check whether there is a restart worth waiting for.
+        if (checkRestart && shouldWaitForRestart(errorIndex)) {
+          restartDeadline = dfsClient.getConf().datanodeRestartTimeout
+              + Time.monotonicNow();
+          restartingNodeIndex.set(errorIndex);
+          errorIndex = -1;
+          DFSClient.LOG.info("Waiting for the datanode to be restarted: " +
+              nodes[restartingNodeIndex.get()]);
+        }
+        hasError = true;
+        setLastException(ie);
+        result =  false;  // error
+      } finally {
+        if (!result) {
+          IOUtils.closeSocket(s);
+          s = null;
+          IOUtils.closeStream(out);
+          out = null;
+          IOUtils.closeStream(blockReplyStream);
+          blockReplyStream = null;
+        }
+      }
+      return result;
+    }
+  }
+
+  private boolean[] getPinnings(DatanodeInfo[] nodes, boolean shouldLog) {
+    if (favoredNodes == null) {
+      return null;
+    } else {
+      boolean[] pinnings = new boolean[nodes.length];
+      HashSet<String> favoredSet =
+          new HashSet<String>(Arrays.asList(favoredNodes));
+      for (int i = 0; i < nodes.length; i++) {
+        pinnings[i] = favoredSet.remove(nodes[i].getXferAddrWithHostname());
+        if (DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug(nodes[i].getXferAddrWithHostname() +
+              " was chosen by name node (favored=" + pinnings[i] +
+              ").");
+        }
+      }
+      if (shouldLog && !favoredSet.isEmpty()) {
+        // There is one or more favored nodes that were not allocated.
+        DFSClient.LOG.warn(
+            "These favored nodes were specified but not chosen: " +
+                favoredSet +
+                " Specified favored nodes: " + Arrays.toString(favoredNodes));
+
+      }
+      return pinnings;
+    }
+  }
+
+  private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)
+      throws IOException {
+    int retries = dfsClient.getConf().nBlockWriteLocateFollowingRetry;
+    long sleeptime = dfsClient.getConf().
+        blockWriteLocateFollowingInitialDelayMs;
+    while (true) {
+      long localstart = Time.monotonicNow();
+      while (true) {
+        try {
+          return dfsClient.namenode.addBlock(src, dfsClient.clientName,
+              block, excludedNodes, stat.getFileId(), favoredNodes);
+        } catch (RemoteException e) {
+          IOException ue =
+              e.unwrapRemoteException(FileNotFoundException.class,
+                  AccessControlException.class,
+                  NSQuotaExceededException.class,
+                  DSQuotaExceededException.class,
+                  UnresolvedPathException.class);
+          if (ue != e) {
+            throw ue; // no need to retry these exceptions
+          }
+
+
+          if (NotReplicatedYetException.class.getName().
+              equals(e.getClassName())) {
+            if (retries == 0) {
+              throw e;
+            } else {
+              --retries;
+              DFSClient.LOG.info("Exception while adding a block", e);
+              long elapsed = Time.monotonicNow() - localstart;
+              if (elapsed > 5000) {
+                DFSClient.LOG.info("Waiting for replication for "
+                    + (elapsed / 1000) + " seconds");
+              }
+              try {
+                DFSClient.LOG.warn("NotReplicatedYetException sleeping " + src
+                    + " retries left " + retries);
+                Thread.sleep(sleeptime);
+                sleeptime *= 2;
+              } catch (InterruptedException ie) {
+                DFSClient.LOG.warn("Caught exception ", ie);
+              }
+            }
+          } else {
+            throw e;
+          }
+
+        }
+      }
+    }
+  }
+
+  /**
+   * get the block this streamer is writing to
+   *
+   * @return the block this streamer is writing to
+   */
+  ExtendedBlock getBlock() {
+    return block;
+  }
+
+  /**
+   * return the target datanodes in the pipeline
+   *
+   * @return the target datanodes in the pipeline
+   */
+  DatanodeInfo[] getNodes() {
+    return nodes;
+  }
+
+  /**
+   * return the token of the block
+   *
+   * @return the token of the block
+   */
+  Token<BlockTokenIdentifier> getBlockToken() {
+    return accessToken;
+  }
+
+  /**
+   * set last exception
+   *
+   * @param e an exception
+   */
+  void setLastException(IOException e) {
+    lastException.compareAndSet(null, e);
+  }
+
+  /**
+   * Put a packet to the data queue
+   *
+   * @param packet the packet to be put into the data queued
+   */
+  void queuePacket(DFSPacket packet) {
+    synchronized (dataQueue) {
+      if (packet == null) return;
+      packet.addTraceParent(Trace.currentSpan());
+      dataQueue.addLast(packet);
+      lastQueuedSeqno = packet.getSeqno();
+      if (DFSClient.LOG.isDebugEnabled()) {
+        DFSClient.LOG.debug("Queued packet " + packet.getSeqno());
+      }
+      dataQueue.notifyAll();
+    }
+  }
+
+  /**
+   * For heartbeat packets, create buffer directly by new byte[]
+   * since heartbeats should not be blocked.
+   */
+  private DFSPacket createHeartbeatPacket() throws InterruptedIOException {
+    final byte[] buf = new byte[PacketHeader.PKT_MAX_HEADER_LEN];
+    return new DFSPacket(buf, 0, 0, DFSPacket.HEART_BEAT_SEQNO, 0, false);
+  }
+
+  private LoadingCache<DatanodeInfo, DatanodeInfo> initExcludedNodes() {
+    return CacheBuilder.newBuilder().expireAfterWrite(
+        dfsClient.getConf().excludedNodesCacheExpiry, TimeUnit.MILLISECONDS)
+        .removalListener(new RemovalListener<DatanodeInfo, DatanodeInfo>() {
+          @Override
+          public void onRemoval(
+              RemovalNotification<DatanodeInfo, DatanodeInfo> notification) {
+            DFSClient.LOG.info("Removing node " + notification.getKey()
+                + " from the excluded nodes list");
+          }
+        }).build(new CacheLoader<DatanodeInfo, DatanodeInfo>() {
+          @Override
+          public DatanodeInfo load(DatanodeInfo key) throws Exception {
+            return key;
+          }
+        });
+  }
+
+  private static <T> void arraycopy(T[] srcs, T[] dsts, int skipIndex) {
+    System.arraycopy(srcs, 0, dsts, 0, skipIndex);
+    System.arraycopy(srcs, skipIndex+1, dsts, skipIndex, dsts.length-skipIndex);
+  }
+
+  /**
+   * check if to persist blocks on namenode
+   *
+   * @return if to persist blocks on namenode
+   */
+  AtomicBoolean getPersistBlocks(){
+    return persistBlocks;
+  }
+
+  /**
+   * check if to append a chunk
+   *
+   * @param appendChunk if to append a chunk
+   */
+  void setAppendChunk(boolean appendChunk){
+    this.appendChunk = appendChunk;
+  }
+
+  /**
+   * get if to append a chunk
+   *
+   * @return if to append a chunk
+   */
+  boolean getAppendChunk(){
+    return appendChunk;
+  }
+
+  /**
+   * get the last exception
+   *
+   * @return the last exception
+   */
+  AtomicReference<IOException> getLastException(){
+    return lastException;
+  }
+
+  /**
+   * get the socket connecting to the first datanode in pipeline
+   *
+   * @return socket connecting to the first datanode in pipeline
+   */
+  Socket getSocket() {
+    return s;
+  }
+
+  /**
+   * set socket to null
+   */
+  void setSocketToNull() {
+    this.s = null;
+  }
+
+  /**
+   * return current sequence number and then increase it by 1
+   *
+   * @return current sequence number before increasing
+   */
+  long getAndIncCurrentSeqno() {
+    long old = this.currentSeqno;
+    this.currentSeqno++;
+    return old;
+  }
+
+  /**
+   * get last queued sequence number
+   *
+   * @return last queued sequence number
+   */
+  long getLastQueuedSeqno() {
+    return lastQueuedSeqno;
+  }
+
+  /**
+   * get the number of bytes of current block
+   *
+   * @return the number of bytes of current block
+   */
+  long getBytesCurBlock() {
+    return bytesCurBlock;
+  }
+
+  /**
+   * set the bytes of current block that have been written
+   *
+   * @param bytesCurBlock bytes of current block that have been written
+   */
+  void setBytesCurBlock(long bytesCurBlock) {
+    this.bytesCurBlock = bytesCurBlock;
+  }
+
+  /**
+   * increase bytes of current block by len.
+   *
+   * @param len how many bytes to increase to current block
+   */
+  void incBytesCurBlock(long len) {
+    this.bytesCurBlock += len;
+  }
+
+  /**
+   * set artificial slow down for unit test
+   *
+   * @param period artificial slow down
+   */
+  void setArtificialSlowdown(long period) {
+    this.artificialSlowdown = period;
+  }
+
+  /**
+   * if this streamer is to terminate
+   *
+   * @return if this streamer is to terminate
+   */
+  boolean streamerClosed(){
+    return streamerClosed;
+  }
+
+  void closeSocket() throws IOException {
+    if (s != null) {
+      s.close();
+    }
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 493351b..5fc78d1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -905,7 +905,7 @@ public class DFSTestUtil {
   public static BlockOpResponseProto transferRbw(final ExtendedBlock b, 
       final DFSClient dfsClient, final DatanodeInfo... datanodes) throws IOException {
     assertEquals(2, datanodes.length);
-    final Socket s = DFSOutputStream.createSocketForPipeline(datanodes[0],
+    final Socket s = DataStreamer.createSocketForPipeline(datanodes[0],
         datanodes.length, dfsClient);
     final long writeTimeout = dfsClient.getDatanodeWriteTimeout(datanodes.length);
     final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
index 7269e39..b47e7f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
@@ -51,8 +51,11 @@ public class TestDFSOutputStream {
     DFSOutputStream dos = (DFSOutputStream) Whitebox.getInternalState(os,
         "wrappedStream");
     @SuppressWarnings("unchecked")
+    DataStreamer streamer = (DataStreamer) Whitebox
+        .getInternalState(dos, "streamer");
+    @SuppressWarnings("unchecked")
     AtomicReference<IOException> ex = (AtomicReference<IOException>) Whitebox
-        .getInternalState(dos, "lastException");
+        .getInternalState(streamer, "lastException");
     Assert.assertEquals(null, ex.get());
 
     dos.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb2eb773/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
index e1c547b..fd916a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
@@ -43,6 +43,8 @@ import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.UnknownHostException;
@@ -603,7 +605,8 @@ public class TestFileCreation {
    * Test that file leases are persisted across namenode restarts.
    */
   @Test
-  public void testFileCreationNamenodeRestart() throws IOException {
+  public void testFileCreationNamenodeRestart()
+      throws IOException, NoSuchFieldException, IllegalAccessException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
@@ -702,11 +705,18 @@ public class TestFileCreation {
       // new blocks for files that were renamed.
       DFSOutputStream dfstream = (DFSOutputStream)
                                                  (stm.getWrappedStream());
-      dfstream.setTestFilename(file1.toString());
+
+      Field f = DFSOutputStream.class.getDeclaredField("src");
+      Field modifiersField = Field.class.getDeclaredField("modifiers");
+      modifiersField.setAccessible(true);
+      modifiersField.setInt(f, f.getModifiers() & ~Modifier.FINAL);
+      f.setAccessible(true);
+
+      f.set(dfstream, file1.toString());
       dfstream = (DFSOutputStream) (stm3.getWrappedStream());
-      dfstream.setTestFilename(file3new.toString());
+      f.set(dfstream, file3new.toString());
       dfstream = (DFSOutputStream) (stm4.getWrappedStream());
-      dfstream.setTestFilename(file4new.toString());
+      f.set(dfstream, file4new.toString());
 
       // write 1 byte to file.  This should succeed because the 
       // namenode should have persisted leases.


[02/50] [abbrv] hadoop git commit: HDFS-7917. Use file to replace data dirs in test to simulate a disk failure. Contributed by Lei (Eddy) Xu.

Posted by zj...@apache.org.
HDFS-7917. Use file to replace data dirs in test to simulate a disk failure. Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d47c25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d47c25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d47c25

Branch: refs/heads/YARN-2928
Commit: 62d47c251e663b4d97f6164eaa8f3324131034f6
Parents: 2bf393b
Author: cnauroth <cn...@apache.org>
Authored: Mon Mar 23 16:29:51 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../hdfs/server/datanode/DataNodeTestUtils.java | 61 +++++++++++++++++++-
 .../datanode/TestDataNodeHotSwapVolumes.java    | 29 ++++------
 .../datanode/TestDataNodeVolumeFailure.java     | 11 +---
 .../TestDataNodeVolumeFailureReporting.java     | 46 ++++-----------
 .../TestDataNodeVolumeFailureToleration.java    |  8 +--
 6 files changed, 88 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8c99876..b88b7e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -774,6 +774,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7962. Remove duplicated logs in BlockManager. (yliu)
 
+    HDFS-7917. Use file to replace data dirs in test to simulate a disk failure.
+    (Lei (Eddy) Xu via cnauroth)
+
   OPTIMIZATIONS
 
     HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index fd51e52..f9a2ba1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -40,7 +40,9 @@ import com.google.common.base.Preconditions;
  * Utility class for accessing package-private DataNode information during tests.
  *
  */
-public class DataNodeTestUtils {  
+public class DataNodeTestUtils {
+  private static final String DIR_FAILURE_SUFFIX = ".origin";
+
   public static DatanodeRegistration 
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
     return dn.getDNRegistrationForBP(bpid);
@@ -159,4 +161,61 @@ public class DataNodeTestUtils {
       final String bpid, final long blkId) {
     return FsDatasetTestUtil.fetchReplicaInfo(dn.getFSDataset(), bpid, blkId);
   }
+
+  /**
+   * It injects disk failures to data dirs by replacing these data dirs with
+   * regular files.
+   *
+   * @param dirs data directories.
+   * @throws IOException on I/O error.
+   */
+  public static void injectDataDirFailure(File... dirs) throws IOException {
+    for (File dir : dirs) {
+      File renamedTo = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
+      if (renamedTo.exists()) {
+        throw new IOException(String.format(
+            "Can not inject failure to dir: %s because %s exists.",
+            dir, renamedTo));
+      }
+      if (!dir.renameTo(renamedTo)) {
+        throw new IOException(String.format("Failed to rename %s to %s.",
+            dir, renamedTo));
+      }
+      if (!dir.createNewFile()) {
+        throw new IOException(String.format(
+            "Failed to create file %s to inject disk failure.", dir));
+      }
+    }
+  }
+
+  /**
+   * Restore the injected data dir failures.
+   *
+   * @see {@link #injectDataDirFailures}.
+   * @param dirs data directories.
+   * @throws IOException
+   */
+  public static void restoreDataDirFromFailure(File... dirs)
+      throws IOException {
+    for (File dir : dirs) {
+      File renamedDir = new File(dir.getPath() + DIR_FAILURE_SUFFIX);
+      if (renamedDir.exists()) {
+        if (dir.exists()) {
+          if (!dir.isFile()) {
+            throw new IOException(
+                "Injected failure data dir is supposed to be file: " + dir);
+          }
+          if (!dir.delete()) {
+            throw new IOException(
+                "Failed to delete injected failure data dir: " + dir);
+          }
+        }
+        if (!renamedDir.renameTo(dir)) {
+          throw new IOException(String.format(
+              "Failed to recover injected failure data dir %s to %s.",
+              renamedDir, dir));
+        }
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 8ab3dd2..2f51d45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockMissingException;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -682,26 +681,18 @@ public class TestDataNodeHotSwapVolumes {
         failedVolume != null);
     long used = failedVolume.getDfsUsed();
 
-    try {
-      assertTrue("Couldn't chmod local vol: " + dirToFail,
-          FileUtil.setExecutable(dirToFail, false));
-      // Call and wait DataNode to detect disk failure.
-      long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
-      dn.checkDiskErrorAsync();
-      while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
-        Thread.sleep(100);
-      }
-
-      createFile(new Path("/test1"), 32, (short)2);
-      assertEquals(used, failedVolume.getDfsUsed());
-    } finally {
-      // Need to restore the mode on dirToFail. Otherwise, if an Exception
-      // is thrown above, the following tests can not delete this data directory
-      // and thus fail to start MiniDFSCluster.
-      assertTrue("Couldn't restore executable for: " + dirToFail,
-          FileUtil.setExecutable(dirToFail, true));
+    DataNodeTestUtils.injectDataDirFailure(dirToFail);
+    // Call and wait DataNode to detect disk failure.
+    long lastDiskErrorCheck = dn.getLastDiskErrorCheck();
+    dn.checkDiskErrorAsync();
+    while (dn.getLastDiskErrorCheck() == lastDiskErrorCheck) {
+      Thread.sleep(100);
     }
 
+    createFile(new Path("/test1"), 32, (short)2);
+    assertEquals(used, failedVolume.getDfsUsed());
+
+    DataNodeTestUtils.restoreDataDirFromFailure(dirToFail);
     dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, oldDataDir);
 
     createFile(new Path("/test2"), 32, (short)2);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 9cbad6d..0428b81 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -121,10 +121,6 @@ public class TestDataNodeVolumeFailure {
     if(cluster != null) {
       cluster.shutdown();
     }
-    for (int i = 0; i < 3; i++) {
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
-    }
   }
   
   /*
@@ -159,7 +155,7 @@ public class TestDataNodeVolumeFailure {
         !deteteBlocks(failedDir)
         ) {
       throw new IOException("Could not delete hdfs directory '" + failedDir + "'");
-    }    
+    }
     data_fail.setReadOnly();
     failedDir.setReadOnly();
     System.out.println("Deleteing " + failedDir.getPath() + "; exist=" + failedDir.exists());
@@ -217,7 +213,7 @@ public class TestDataNodeVolumeFailure {
     DFSTestUtil.waitReplication(fs, file1, (short) 2);
 
     File dn0Vol1 = new File(dataDir, "data" + (2 * 0 + 1));
-    assertTrue(FileUtil.setExecutable(dn0Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn0Vol1);
     DataNode dn0 = cluster.getDataNodes().get(0);
     long lastDiskErrorCheck = dn0.getLastDiskErrorCheck();
     dn0.checkDiskErrorAsync();
@@ -291,8 +287,7 @@ public class TestDataNodeVolumeFailure {
     // Fail the first volume on both datanodes
     File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
index 9842f25..aac288a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
@@ -34,7 +34,6 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationException;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -87,19 +86,6 @@ public class TestDataNodeVolumeFailureReporting {
 
   @After
   public void tearDown() throws Exception {
-    // Restore executable permission on all directories where a failure may have
-    // been simulated by denying execute access.  This is based on the maximum
-    // number of datanodes and the maximum number of storages per data node used
-    // throughout the tests in this suite.
-    assumeTrue(!Path.WINDOWS);
-    int maxDataNodes = 3;
-    int maxStoragesPerDataNode = 4;
-    for (int i = 0; i < maxDataNodes; i++) {
-      for (int j = 1; j <= maxStoragesPerDataNode; j++) {
-        String subDir = "data" + ((i * maxStoragesPerDataNode) + j);
-        FileUtil.setExecutable(new File(dataDir, subDir), true);
-      }
-    }
     IOUtils.cleanup(LOG, fs);
     if (cluster != null) {
       cluster.shutdown();
@@ -141,8 +127,7 @@ public class TestDataNodeVolumeFailureReporting {
      * fail. The client does not retry failed nodes even though
      * perhaps they could succeed because just a single volume failed.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     /*
      * Create file1 and wait for 3 replicas (ie all DNs can still
@@ -179,7 +164,7 @@ public class TestDataNodeVolumeFailureReporting {
      * Now fail a volume on the third datanode. We should be able to get
      * three replicas since we've already identified the other failures.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn3Vol1);
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)3);
@@ -208,7 +193,7 @@ public class TestDataNodeVolumeFailureReporting {
      * and that it's no longer up. Only wait for two replicas since
      * we'll never get a third.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false));
+    DataNodeTestUtils.injectDataDirFailure(dn3Vol2);
     Path file3 = new Path("/test3");
     DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file3, (short)2);
@@ -233,10 +218,8 @@ public class TestDataNodeVolumeFailureReporting {
      * restart, so file creation should be able to succeed after
      * restoring the data directories and restarting the datanodes.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true));
+    DataNodeTestUtils.restoreDataDirFromFailure(
+        dn1Vol1, dn2Vol1, dn3Vol1, dn3Vol2);
     cluster.restartDataNodes();
     cluster.waitActive();
     Path file4 = new Path("/test4");
@@ -275,8 +258,7 @@ public class TestDataNodeVolumeFailureReporting {
     // third healthy so one node in the pipeline will not fail). 
     File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn2Vol1);
 
     Path file1 = new Path("/test1");
     DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
@@ -323,14 +305,7 @@ public class TestDataNodeVolumeFailureReporting {
 
     // Make the first two volume directories on the first two datanodes
     // non-accessible.
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1,
-        false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol2,
-        false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1,
-        false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol2,
-        false));
+    DataNodeTestUtils.injectDataDirFailure(dn1Vol1, dn1Vol2, dn2Vol1, dn2Vol2);
 
     // Create file1 and wait for 3 replicas (ie all DNs can still store a block).
     // Then assert that all DNs are up, despite the volume failures.
@@ -380,8 +355,8 @@ public class TestDataNodeVolumeFailureReporting {
     File dn1Vol2 = new File(dataDir, "data"+(2*0+2));
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
     File dn2Vol2 = new File(dataDir, "data"+(2*1+2));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn1Vol1);
+    DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
     Path file1 = new Path("/test1");
     DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);
@@ -449,8 +424,7 @@ public class TestDataNodeVolumeFailureReporting {
 
     // Replace failed volume with healthy volume and run reconfigure DataNode.
     // The failed volume information should be cleared.
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
+    DataNodeTestUtils.restoreDataDirFromFailure(dn1Vol1, dn2Vol1);
     reconfigureDataNode(dns.get(0), dn1Vol1, dn1Vol2);
     reconfigureDataNode(dns.get(1), dn2Vol1, dn2Vol2);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d47c25/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
index 73dc77c..5b7ac30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
@@ -76,10 +76,6 @@ public class TestDataNodeVolumeFailureToleration {
 
   @After
   public void tearDown() throws Exception {
-    for (int i = 0; i < 3; i++) {
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
-    }
     cluster.shutdown();
   }
 
@@ -152,7 +148,7 @@ public class TestDataNodeVolumeFailureToleration {
 
     // Fail a volume on the 2nd DN
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    DataNodeTestUtils.injectDataDirFailure(dn2Vol1);
 
     // Should only get two replicas (the first DN and the 3rd)
     Path file1 = new Path("/test1");
@@ -165,7 +161,7 @@ public class TestDataNodeVolumeFailureToleration {
 
     // If we restore the volume we should still only be able to get
     // two replicas since the DN is still considered dead.
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
+    DataNodeTestUtils.restoreDataDirFromFailure(dn2Vol1);
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)2);


[37/50] [abbrv] hadoop git commit: Addendum for HADOOP-10670.

Posted by zj...@apache.org.
Addendum for HADOOP-10670.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fbc48533
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fbc48533
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fbc48533

Branch: refs/heads/YARN-2928
Commit: fbc485331468e9a9486a6939d1a42273bb873d01
Parents: 8e1416a
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Mar 25 12:29:44 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 .../util/FileSignerSecretProvider.java          | 84 ++++++++++++++++++++
 .../util/TestFileSignerSecretProvider.java      | 51 ++++++++++++
 2 files changed, 135 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc48533/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java
new file mode 100644
index 0000000..e8aa160
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/FileSignerSecretProvider.java
@@ -0,0 +1,84 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import com.google.common.base.Charsets;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+
+import javax.servlet.ServletContext;
+import java.io.*;
+import java.nio.charset.Charset;
+import java.util.Properties;
+
+/**
+ * A SignerSecretProvider that simply loads a secret from a specified file.
+ */
+@InterfaceStability.Unstable
+@InterfaceAudience.Private
+public class FileSignerSecretProvider extends SignerSecretProvider {
+
+  private byte[] secret;
+  private byte[][] secrets;
+
+  public FileSignerSecretProvider() {}
+
+  @Override
+  public void init(Properties config, ServletContext servletContext,
+                   long tokenValidity) throws Exception {
+
+    String signatureSecretFile = config.getProperty(
+        AuthenticationFilter.SIGNATURE_SECRET_FILE, null);
+
+    Reader reader = null;
+    if (signatureSecretFile != null) {
+      try {
+        StringBuilder sb = new StringBuilder();
+        reader = new InputStreamReader(
+            new FileInputStream(signatureSecretFile), Charsets.UTF_8);
+        int c = reader.read();
+        while (c > -1) {
+          sb.append((char) c);
+          c = reader.read();
+        }
+        secret = sb.toString().getBytes(Charset.forName("UTF-8"));
+      } catch (IOException ex) {
+        throw new RuntimeException("Could not read signature secret file: " +
+            signatureSecretFile);
+      } finally {
+        if (reader != null) {
+          try {
+            reader.close();
+          } catch (IOException e) {
+            // nothing to do
+          }
+        }
+      }
+    }
+
+    secrets = new byte[][]{secret};
+  }
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secret;
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fbc48533/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestFileSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestFileSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestFileSignerSecretProvider.java
new file mode 100644
index 0000000..1856410
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestFileSignerSecretProvider.java
@@ -0,0 +1,51 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.FileWriter;
+import java.io.Writer;
+import java.util.Properties;
+
+public class TestFileSignerSecretProvider {
+
+  @Test
+  public void testGetSecrets() throws Exception {
+    File testDir = new File(System.getProperty("test.build.data",
+        "target/test-dir"));
+    testDir.mkdirs();
+    String secretValue = "hadoop";
+    File secretFile = new File(testDir, "http-secret.txt");
+    Writer writer = new FileWriter(secretFile);
+    writer.write(secretValue);
+    writer.close();
+
+    FileSignerSecretProvider secretProvider
+            = new FileSignerSecretProvider();
+    Properties secretProviderProps = new Properties();
+    secretProviderProps.setProperty(
+            AuthenticationFilter.SIGNATURE_SECRET_FILE,
+        secretFile.getAbsolutePath());
+    secretProvider.init(secretProviderProps, null, -1);
+    Assert.assertArrayEquals(secretValue.getBytes(),
+        secretProvider.getCurrentSecret());
+    byte[][] allSecrets = secretProvider.getAllSecrets();
+    Assert.assertEquals(1, allSecrets.length);
+    Assert.assertArrayEquals(secretValue.getBytes(), allSecrets[0]);
+  }
+}


[19/50] [abbrv] hadoop git commit: MAPREDUCE-6285. ClientServiceDelegate should not retry upon AuthenticationException. Contributed by Jonathan Eagles.

Posted by zj...@apache.org.
MAPREDUCE-6285. ClientServiceDelegate should not retry upon AuthenticationException. Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5934949f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5934949f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5934949f

Branch: refs/heads/YARN-2928
Commit: 5934949f3343c387b9c58044b5b7517f8f3c6162
Parents: f070575
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 25 00:56:26 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 ++
 .../hadoop/mapred/ClientServiceDelegate.java    |  6 +++
 .../mapred/TestClientServiceDelegate.java       | 44 ++++++++++++++++++++
 3 files changed, 53 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5934949f/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index b8a2a1c..2b16c30 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -496,6 +496,9 @@ Release 2.7.0 - UNRELEASED
     MAPREDUCE-6275. Race condition in FileOutputCommitter v2 for
     user-specified task output subdirs (Gera Shegalov and Siqi Li via jlowe)
 
+    MAPREDUCE-6285. ClientServiceDelegate should not retry upon
+    AuthenticationException. (Jonathan Eagles via ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5934949f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index 686fa0c..8517c19 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -64,6 +64,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
 import org.apache.hadoop.mapreduce.v2.util.MRApps;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -328,6 +329,11 @@ public class ClientServiceDelegate {
         // Force reconnection by setting the proxy to null.
         realProxy = null;
         // HS/AMS shut down
+
+        if (e.getCause() instanceof AuthorizationException) {
+          throw new IOException(e.getTargetException());
+        }
+
         // if it's AM shut down, do not decrement maxClientRetry as we wait for
         // AM to be restarted.
         if (!usingAMProxy.get()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5934949f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index 7d6b2f3..b85f18d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.Counters;
 import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
 import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
@@ -183,6 +184,49 @@ public class TestClientServiceDelegate {
   }
 
   @Test
+  public void testNoRetryOnAMAuthorizationException() throws Exception {
+    if (!isAMReachableFromClient) {
+      return;
+    }
+
+    ResourceMgrDelegate rm = mock(ResourceMgrDelegate.class);
+    when(rm.getApplicationReport(TypeConverter.toYarn(oldJobId).getAppId()))
+      .thenReturn(getRunningApplicationReport("am1", 78));
+
+    // throw authorization exception on first invocation
+    final MRClientProtocol amProxy = mock(MRClientProtocol.class);
+    when(amProxy.getJobReport(any(GetJobReportRequest.class)))
+      .thenThrow(new AuthorizationException("Denied"));
+    Configuration conf = new YarnConfiguration();
+    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
+    conf.setBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED,
+      !isAMReachableFromClient);
+    ClientServiceDelegate clientServiceDelegate =
+        new ClientServiceDelegate(conf, rm, oldJobId, null) {
+          @Override
+          MRClientProtocol instantiateAMProxy(
+              final InetSocketAddress serviceAddr) throws IOException {
+            super.instantiateAMProxy(serviceAddr);
+            return amProxy;
+          }
+        };
+
+    try {
+      clientServiceDelegate.getJobStatus(oldJobId);
+      Assert.fail("Exception should be thrown upon AuthorizationException");
+    } catch (IOException e) {
+      Assert.assertEquals(AuthorizationException.class.getName() + ": Denied",
+          e.getMessage());
+    }
+
+    // assert maxClientRetry is not decremented.
+    Assert.assertEquals(conf.getInt(MRJobConfig.MR_CLIENT_MAX_RETRIES,
+      MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES), clientServiceDelegate
+      .getMaxClientRetry());
+    verify(amProxy, times(1)).getJobReport(any(GetJobReportRequest.class));
+  }
+
+  @Test
   public void testHistoryServerNotConfigured() throws Exception {
     //RM doesn't have app report and job History Server is not configured
     ClientServiceDelegate clientServiceDelegate = getClientServiceDelegate(


[38/50] [abbrv] hadoop git commit: HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot of time if disks are busy. Contributed by Rushabh Shah.

Posted by zj...@apache.org.
HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes a lot of time if disks are busy. Contributed by Rushabh Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19bc19e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19bc19e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19bc19e8

Branch: refs/heads/YARN-2928
Commit: 19bc19e82b31403eccdd9232fba3b94df01eebdc
Parents: fbc4853
Author: Kihwal Lee <ki...@apache.org>
Authored: Wed Mar 25 14:42:28 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../hadoop/hdfs/protocol/BlockListAsLongs.java  |  37 +++
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java | 268 ++++++++++++++-----
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   3 +-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |   8 +-
 .../datanode/fsdataset/impl/FsVolumeList.java   |   7 +-
 .../apache/hadoop/hdfs/UpgradeUtilities.java    |   5 +-
 .../hdfs/server/datanode/DataNodeTestUtils.java |   7 +
 .../fsdataset/impl/TestWriteToReplica.java      | 152 +++++++++++
 .../namenode/TestListCorruptFileBlocks.java     |   6 +
 .../namenode/TestProcessCorruptBlocks.java      |   3 +
 .../ha/TestPendingCorruptDnMessages.java        |   3 +
 13 files changed, 430 insertions(+), 79 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8f1d5fc..62c2f91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -339,6 +339,9 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
 
+    HDFS-7928. Scanning blocks from disk during rolling upgrade startup takes
+    a lot of time if disks are busy (Rushabh S Shah via kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
index 1c89ee4..834546b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
@@ -33,6 +35,7 @@ import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.CodedOutputStream;
+import com.google.protobuf.WireFormat;
 
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -108,6 +111,40 @@ public abstract class BlockListAsLongs implements Iterable<BlockReportReplica> {
     return builder.build();
   }
 
+  public static BlockListAsLongs readFrom(InputStream is) throws IOException {
+    CodedInputStream cis = CodedInputStream.newInstance(is);
+    int numBlocks = -1;
+    ByteString blocksBuf = null;
+    while (!cis.isAtEnd()) {
+      int tag = cis.readTag();
+      int field = WireFormat.getTagFieldNumber(tag);
+      switch(field) {
+        case 0:
+          break;
+        case 1:
+          numBlocks = (int)cis.readInt32();
+          break;
+        case 2:
+          blocksBuf = cis.readBytes();
+          break;
+        default:
+          cis.skipField(tag);
+          break;
+      }
+    }
+    if (numBlocks != -1 && blocksBuf != null) {
+      return decodeBuffer(numBlocks, blocksBuf);
+    }
+    return null;
+  }
+
+  public void writeTo(OutputStream os) throws IOException {
+    CodedOutputStream cos = CodedOutputStream.newInstance(os);
+    cos.writeInt32(1, getNumberOfBlocks());
+    cos.writeBytes(2, getBlocksBuffer());
+    cos.flush();
+  }
+  
   public static Builder builder() {
     return new BlockListAsLongs.Builder();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d94375e..3368124 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -41,8 +41,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMOR
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
@@ -159,6 +157,7 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -2500,6 +2499,10 @@ public class DataNode extends ReconfigurableBase
     return blockScanner;
   }
 
+  @VisibleForTesting
+  DirectoryScanner getDirectoryScanner() {
+    return directoryScanner;
+  }
 
   public static void secureMain(String args[], SecureResources resources) {
     int errorCode = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index 5a69e1e..6daf039 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -23,12 +23,12 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
-import java.io.FileWriter;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStreamWriter;
 import java.io.RandomAccessFile;
 import java.io.Writer;
+import java.util.Iterator;
 import java.util.Scanner;
 
 import org.apache.commons.io.FileUtils;
@@ -39,6 +39,8 @@ import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportReplica;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@@ -55,6 +57,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.io.Files;
 /**
  * A block pool slice represents a portion of a block pool stored on a volume.  
  * Taken together, all BlockPoolSlices sharing a block pool ID across a 
@@ -77,7 +80,9 @@ class BlockPoolSlice {
   private volatile boolean dfsUsedSaved = false;
   private static final int SHUTDOWN_HOOK_PRIORITY = 30;
   private final boolean deleteDuplicateReplicas;
-  
+  private static final String REPLICA_CACHE_FILE = "replicas";
+  private final long replicaCacheExpiry = 5*60*1000;
+
   // TODO:FEDERATION scalability issue - a thread per DU is needed
   private final DU dfsUsage;
 
@@ -310,11 +315,14 @@ class BlockPoolSlice {
       FsDatasetImpl.LOG.info(
           "Recovered " + numRecovered + " replicas from " + lazypersistDir);
     }
-
-    // add finalized replicas
-    addToReplicasMap(volumeMap, finalizedDir, lazyWriteReplicaMap, true);
-    // add rbw replicas
-    addToReplicasMap(volumeMap, rbwDir, lazyWriteReplicaMap, false);
+    
+    boolean  success = readReplicasFromCache(volumeMap, lazyWriteReplicaMap);
+    if (!success) {
+      // add finalized replicas
+      addToReplicasMap(volumeMap, finalizedDir, lazyWriteReplicaMap, true);
+      // add rbw replicas
+      addToReplicasMap(volumeMap, rbwDir, lazyWriteReplicaMap, false);
+    }
   }
 
   /**
@@ -401,6 +409,75 @@ class BlockPoolSlice {
     FileUtil.fullyDelete(source);
     return numRecovered;
   }
+  
+  private void addReplicaToReplicasMap(Block block, ReplicaMap volumeMap,
+      final RamDiskReplicaTracker lazyWriteReplicaMap,boolean isFinalized)
+      throws IOException {
+    ReplicaInfo newReplica = null;
+    long blockId = block.getBlockId();
+    long genStamp = block.getGenerationStamp();
+    if (isFinalized) {
+      newReplica = new FinalizedReplica(blockId, 
+          block.getNumBytes(), genStamp, volume, DatanodeUtil
+          .idToBlockDir(finalizedDir, blockId));
+    } else {
+      File file = new File(rbwDir, block.getBlockName());
+      boolean loadRwr = true;
+      File restartMeta = new File(file.getParent()  +
+          File.pathSeparator + "." + file.getName() + ".restart");
+      Scanner sc = null;
+      try {
+        sc = new Scanner(restartMeta, "UTF-8");
+        // The restart meta file exists
+        if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
+          // It didn't expire. Load the replica as a RBW.
+          // We don't know the expected block length, so just use 0
+          // and don't reserve any more space for writes.
+          newReplica = new ReplicaBeingWritten(blockId,
+              validateIntegrityAndSetLength(file, genStamp), 
+              genStamp, volume, file.getParentFile(), null, 0);
+          loadRwr = false;
+        }
+        sc.close();
+        if (!restartMeta.delete()) {
+          FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " +
+              restartMeta.getPath());
+        }
+      } catch (FileNotFoundException fnfe) {
+        // nothing to do hereFile dir =
+      } finally {
+        if (sc != null) {
+          sc.close();
+        }
+      }
+      // Restart meta doesn't exist or expired.
+      if (loadRwr) {
+        newReplica = new ReplicaWaitingToBeRecovered(blockId,
+            validateIntegrityAndSetLength(file, genStamp),
+            genStamp, volume, file.getParentFile());
+      }
+    }
+
+    ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
+    if (oldReplica == null) {
+      volumeMap.add(bpid, newReplica);
+    } else {
+      // We have multiple replicas of the same block so decide which one
+      // to keep.
+      newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
+    }
+
+    // If we are retaining a replica on transient storage make sure
+    // it is in the lazyWriteReplicaMap so it can be persisted
+    // eventually.
+    if (newReplica.getVolume().isTransientStorage()) {
+      lazyWriteReplicaMap.addReplica(bpid, blockId,
+          (FsVolumeImpl) newReplica.getVolume());
+    } else {
+      lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
+    }
+  }
+  
 
   /**
    * Add replicas under the given directory to the volume map
@@ -434,66 +511,9 @@ class BlockPoolSlice {
       long genStamp = FsDatasetUtil.getGenerationStampFromFile(
           files, file);
       long blockId = Block.filename2id(file.getName());
-      ReplicaInfo newReplica = null;
-      if (isFinalized) {
-        newReplica = new FinalizedReplica(blockId, 
-            file.length(), genStamp, volume, file.getParentFile());
-      } else {
-
-        boolean loadRwr = true;
-        File restartMeta = new File(file.getParent()  +
-            File.pathSeparator + "." + file.getName() + ".restart");
-        Scanner sc = null;
-        try {
-          sc = new Scanner(restartMeta, "UTF-8");
-          // The restart meta file exists
-          if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
-            // It didn't expire. Load the replica as a RBW.
-            // We don't know the expected block length, so just use 0
-            // and don't reserve any more space for writes.
-            newReplica = new ReplicaBeingWritten(blockId,
-                validateIntegrityAndSetLength(file, genStamp),
-                genStamp, volume, file.getParentFile(), null, 0);
-            loadRwr = false;
-          }
-          sc.close();
-          if (!restartMeta.delete()) {
-            FsDatasetImpl.LOG.warn("Failed to delete restart meta file: " +
-              restartMeta.getPath());
-          }
-        } catch (FileNotFoundException fnfe) {
-          // nothing to do hereFile dir =
-        } finally {
-          if (sc != null) {
-            sc.close();
-          }
-        }
-        // Restart meta doesn't exist or expired.
-        if (loadRwr) {
-          newReplica = new ReplicaWaitingToBeRecovered(blockId,
-              validateIntegrityAndSetLength(file, genStamp),
-              genStamp, volume, file.getParentFile());
-        }
-      }
-
-      ReplicaInfo oldReplica = volumeMap.get(bpid, newReplica.getBlockId());
-      if (oldReplica == null) {
-        volumeMap.add(bpid, newReplica);
-      } else {
-        // We have multiple replicas of the same block so decide which one
-        // to keep.
-        newReplica = resolveDuplicateReplicas(newReplica, oldReplica, volumeMap);
-      }
-
-      // If we are retaining a replica on transient storage make sure
-      // it is in the lazyWriteReplicaMap so it can be persisted
-      // eventually.
-      if (newReplica.getVolume().isTransientStorage()) {
-        lazyWriteReplicaMap.addReplica(bpid, blockId,
-                                       (FsVolumeImpl) newReplica.getVolume());
-      } else {
-        lazyWriteReplicaMap.discardReplica(bpid, blockId, false);
-      }
+      Block block = new Block(blockId, file.length(), genStamp); 
+      addReplicaToReplicasMap(block, volumeMap, lazyWriteReplicaMap, 
+          isFinalized);
     }
   }
 
@@ -649,9 +669,121 @@ class BlockPoolSlice {
     return currentDir.getAbsolutePath();
   }
   
-  void shutdown() {
+  void shutdown(BlockListAsLongs blocksListToPersist) {
+    saveReplicas(blocksListToPersist);
     saveDfsUsed();
     dfsUsedSaved = true;
     dfsUsage.shutdown();
   }
+
+  private boolean readReplicasFromCache(ReplicaMap volumeMap,
+      final RamDiskReplicaTracker lazyWriteReplicaMap) {
+    ReplicaMap tmpReplicaMap = new ReplicaMap(this);
+    File replicaFile = new File(currentDir, REPLICA_CACHE_FILE);
+    // Check whether the file exists or not.
+    if (!replicaFile.exists()) {
+      LOG.info("Replica Cache file: "+  replicaFile.getPath() + 
+          " doesn't exist ");
+      return false;
+    }
+    long fileLastModifiedTime = replicaFile.lastModified();
+    if (System.currentTimeMillis() > fileLastModifiedTime + replicaCacheExpiry) {
+      LOG.info("Replica Cache file: " + replicaFile.getPath() + 
+          " has gone stale");
+      // Just to make findbugs happy
+      if (!replicaFile.delete()) {
+        LOG.info("Replica Cache file: " + replicaFile.getPath() + 
+            " cannot be deleted");
+      }
+      return false;
+    }
+    FileInputStream inputStream = null;
+    try {
+      inputStream = new FileInputStream(replicaFile);
+      BlockListAsLongs blocksList =  BlockListAsLongs.readFrom(inputStream);
+      Iterator<BlockReportReplica> iterator = blocksList.iterator();
+      while (iterator.hasNext()) {
+        BlockReportReplica replica = iterator.next();
+        switch (replica.getState()) {
+        case FINALIZED:
+          addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, true);
+          break;
+        case RUR:
+        case RBW:
+        case RWR:
+          addReplicaToReplicasMap(replica, tmpReplicaMap, lazyWriteReplicaMap, false);
+          break;
+        default:
+          break;
+        }
+      }
+      inputStream.close();
+      // Now it is safe to add the replica into volumeMap
+      // In case of any exception during parsing this cache file, fall back
+      // to scan all the files on disk.
+      for (ReplicaInfo info: tmpReplicaMap.replicas(bpid)) {
+        volumeMap.add(bpid, info);
+      }
+      LOG.info("Successfully read replica from cache file : " 
+          + replicaFile.getPath());
+      return true;
+    } catch (Exception e) {
+      // Any exception we need to revert back to read from disk
+      // Log the error and return false
+      LOG.info("Exception occured while reading the replicas cache file: "
+          + replicaFile.getPath(), e );
+      return false;
+    }
+    finally {
+      if (!replicaFile.delete()) {
+        LOG.info("Failed to delete replica cache file: " +
+            replicaFile.getPath());
+      }
+      // close the inputStream
+      IOUtils.closeStream(inputStream);
+    }
+  } 
+  
+  private void saveReplicas(BlockListAsLongs blocksListToPersist) {
+    if (blocksListToPersist == null || 
+        blocksListToPersist.getNumberOfBlocks()== 0) {
+      return;
+    }
+    File tmpFile = new File(currentDir, REPLICA_CACHE_FILE + ".tmp");
+    if (tmpFile.exists() && !tmpFile.delete()) {
+      LOG.warn("Failed to delete tmp replicas file in " +
+        tmpFile.getPath());
+      return;
+    }
+    File replicaCacheFile = new File(currentDir, REPLICA_CACHE_FILE);
+    if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
+      LOG.warn("Failed to delete replicas file in " +
+          replicaCacheFile.getPath());
+      return;
+    }
+    
+    FileOutputStream out = null;
+    try {
+      out = new FileOutputStream(tmpFile);
+      blocksListToPersist.writeTo(out);
+      out.close();
+      // Renaming the tmp file to replicas
+      Files.move(tmpFile, replicaCacheFile);
+    } catch (Exception e) {
+      // If write failed, the volume might be bad. Since the cache file is
+      // not critical, log the error, delete both the files (tmp and cache)
+      // and continue.
+      LOG.warn("Failed to write replicas to cache ", e);
+      if (replicaCacheFile.exists() && !replicaCacheFile.delete()) {
+        LOG.warn("Failed to delete replicas file: " + 
+            replicaCacheFile.getPath());
+      }
+    } finally {
+      IOUtils.closeStream(out);
+      if (tmpFile.exists() && !tmpFile.delete()) {
+        LOG.warn("Failed to delete tmp file in " +
+            tmpFile.getPath());
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 05c4871..cf471ca 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2463,8 +2463,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   @Override
   public synchronized void shutdownBlockPool(String bpid) {
     LOG.info("Removing block pool " + bpid);
+    Map<DatanodeStorage, BlockListAsLongs> blocksPerVolume =  getBlockReports(bpid);
     volumeMap.cleanUpBlockPool(bpid);
-    volumes.removeBlockPool(bpid);
+    volumes.removeBlockPool(bpid, blocksPerVolume);
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 23efbdf..4dbc7f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
@@ -65,7 +66,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import org.apache.hadoop.util.Time;
 import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -805,7 +805,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
     }
     Set<Entry<String, BlockPoolSlice>> set = bpSlices.entrySet();
     for (Entry<String, BlockPoolSlice> entry : set) {
-      entry.getValue().shutdown();
+      entry.getValue().shutdown(null);
     }
   }
 
@@ -815,10 +815,10 @@ public class FsVolumeImpl implements FsVolumeSpi {
     bpSlices.put(bpid, bp);
   }
   
-  void shutdownBlockPool(String bpid) {
+  void shutdownBlockPool(String bpid, BlockListAsLongs blocksListsAsLongs) {
     BlockPoolSlice bp = bpSlices.get(bpid);
     if (bp != null) {
-      bp.shutdown();
+      bp.shutdown(blocksListsAsLongs);
     }
     bpSlices.remove(bpid);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index a5611c5..4fddfb9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -35,10 +35,12 @@ import java.util.concurrent.atomic.AtomicReference;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Time;
 
@@ -428,9 +430,10 @@ class FsVolumeList {
         bpid + ": " + totalTimeTaken + "ms");
   }
   
-  void removeBlockPool(String bpid) {
+  void removeBlockPool(String bpid, Map<DatanodeStorage, BlockListAsLongs>
+      blocksPerVolume) {
     for (FsVolumeImpl v : volumes.get()) {
-      v.shutdownBlockPool(bpid);
+      v.shutdownBlockPool(bpid, blocksPerVolume.get(v.toDatanodeStorage()));
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index 2e5348e..e9891bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -304,10 +304,11 @@ public class UpgradeUtilities {
         continue;
       }
 
-      // skip VERSION and dfsUsed file for DataNodes
+      // skip VERSION and dfsUsed and replicas file for DataNodes
       if (nodeType == DATA_NODE &&
           (list[i].getName().equals("VERSION") ||
-              list[i].getName().equals("dfsUsed"))) {
+              list[i].getName().equals("dfsUsed") ||
+              list[i].getName().equals("replicas"))) {
         continue;
       }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
index f9a2ba1..9dee724 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
@@ -218,4 +218,11 @@ public class DataNodeTestUtils {
       }
     }
   }
+  
+  public static void runDirectoryScanner(DataNode dn) throws IOException {
+    DirectoryScanner directoryScanner = dn.getDirectoryScanner();
+    if (directoryScanner != null) {
+      dn.getDirectoryScanner().reconcile();
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
index 9325cdc..96a73c6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestWriteToReplica.java
@@ -17,14 +17,25 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
+import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.FinalizedReplica;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaAlreadyExistsException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten;
@@ -34,6 +45,7 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaUnderRecovery;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaWaitingToBeRecovered;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.junit.Assert;
 import org.junit.Test;
@@ -501,4 +513,144 @@ public class TestWriteToReplica {
           + "genstamp and replaced it with the newer one: " + blocks[NON_EXISTENT]);
     }
   }
+  
+  /**
+   * This is a test to check the replica map before and after the datanode 
+   * quick restart (less than 5 minutes)
+   * @throws Exception
+   */
+  @Test
+  public  void testReplicaMapAfterDatanodeRestart() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
+        .build();
+    try {
+      cluster.waitActive();
+      NameNode nn1 = cluster.getNameNode(0);
+      NameNode nn2 = cluster.getNameNode(1);
+      assertNotNull("cannot create nn1", nn1);
+      assertNotNull("cannot create nn2", nn2);
+      
+      // check number of volumes in fsdataset
+      DataNode dn = cluster.getDataNodes().get(0);
+      FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.
+          getFSDataset(dn);
+      ReplicaMap replicaMap = dataSet.volumeMap;
+      
+      List<FsVolumeImpl> volumes = dataSet.getVolumes();
+      // number of volumes should be 2 - [data1, data2]
+      assertEquals("number of volumes is wrong", 2, volumes.size());
+      ArrayList<String> bpList = new ArrayList<String>(Arrays.asList(
+          cluster.getNamesystem(0).getBlockPoolId(), 
+          cluster.getNamesystem(1).getBlockPoolId()));
+      
+      Assert.assertTrue("Cluster should have 2 block pools", 
+          bpList.size() == 2);
+      
+      createReplicas(bpList, volumes, replicaMap);
+      ReplicaMap oldReplicaMap = new ReplicaMap(this);
+      oldReplicaMap.addAll(replicaMap);
+      
+      cluster.restartDataNode(0);
+      cluster.waitActive();
+      dn = cluster.getDataNodes().get(0);
+      dataSet = (FsDatasetImpl) dn.getFSDataset();
+      testEqualityOfReplicaMap(oldReplicaMap, dataSet.volumeMap, bpList);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Compare the replica map before and after the restart
+   **/
+  private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap 
+      newReplicaMap, List<String> bpidList) {
+    // Traversing through newReplica map and remove the corresponding 
+    // replicaInfo from oldReplicaMap.
+    for (String bpid: bpidList) {
+      for (ReplicaInfo info: newReplicaMap.replicas(bpid)) {
+        assertNotNull("Volume map before restart didn't contain the "
+            + "blockpool: " + bpid, oldReplicaMap.replicas(bpid));
+        
+        ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid, 
+            info.getBlockId());
+        // Volume map after restart contains a blockpool id which 
+        assertNotNull("Old Replica Map didnt't contain block with blockId: " +
+            info.getBlockId(), oldReplicaInfo);
+        
+        ReplicaState oldState = oldReplicaInfo.getState();
+        // Since after restart, all the RWR, RBW and RUR blocks gets 
+        // converted to RWR
+        if (info.getState() == ReplicaState.RWR) {
+           if (oldState == ReplicaState.RWR || oldState == ReplicaState.RBW 
+               || oldState == ReplicaState.RUR) {
+             oldReplicaMap.remove(bpid, oldReplicaInfo);
+           }
+        } else if (info.getState() == ReplicaState.FINALIZED && 
+            oldState == ReplicaState.FINALIZED) {
+          oldReplicaMap.remove(bpid, oldReplicaInfo);
+        }
+      }
+    }
+    
+    // We don't persist the ReplicaInPipeline replica
+    // and if the old replica map contains any replica except ReplicaInPipeline
+    // then we didn't persist that replica
+    for (String bpid: bpidList) {
+      for (ReplicaInfo replicaInfo: oldReplicaMap.replicas(bpid)) {
+        if (replicaInfo.getState() != ReplicaState.TEMPORARY) {
+          Assert.fail("After datanode restart we lost the block with blockId: "
+              +  replicaInfo.getBlockId());
+        }
+      }
+    }
+  }
+
+  private void createReplicas(List<String> bpList, List<FsVolumeImpl> volumes,
+      ReplicaMap volumeMap) throws IOException {
+    Assert.assertTrue("Volume map can't be null" , volumeMap != null);
+    
+    // Here we create all different type of replicas and add it
+    // to volume map. 
+    // Created all type of ReplicaInfo, each under Blkpool corresponding volume
+    long id = 1; // This variable is used as both blockId and genStamp
+    for (String bpId: bpList) {
+      for (FsVolumeImpl volume: volumes) {
+        ReplicaInfo finalizedReplica = new FinalizedReplica(id, 1, id, volume,
+            DatanodeUtil.idToBlockDir(volume.getFinalizedDir(bpId), id));
+        volumeMap.add(bpId, finalizedReplica);
+        id++;
+        
+        ReplicaInfo rbwReplica = new ReplicaBeingWritten(id, 1, id, volume, 
+            volume.getRbwDir(bpId), null, 100);
+        volumeMap.add(bpId, rbwReplica);
+        id++;
+
+        ReplicaInfo rwrReplica = new ReplicaWaitingToBeRecovered(id, 1, id, 
+            volume, volume.getRbwDir(bpId));
+        volumeMap.add(bpId, rwrReplica);
+        id++;
+        
+        ReplicaInfo ripReplica = new ReplicaInPipeline(id, id, volume, 
+            volume.getTmpDir(bpId), 0);
+        volumeMap.add(bpId, ripReplica);
+        id++;
+      }
+    }
+    
+    for (String bpId: bpList) {
+      for (ReplicaInfo replicaInfo: volumeMap.replicas(bpId)) {
+        File parentFile = replicaInfo.getBlockFile().getParentFile();
+        if (!parentFile.exists()) {
+          if (!parentFile.mkdirs()) {
+            throw new IOException("Failed to mkdirs " + parentFile);
+          }
+        }
+        replicaInfo.getBlockFile().createNewFile();
+        replicaInfo.getMetaFile().createNewFile();
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index 92ea111..5d319b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 import org.slf4j.Logger;
@@ -483,6 +485,10 @@ public class TestListCorruptFileBlocks {
         }
       }
 
+      // Run the direcrtoryScanner to update the Datanodes volumeMap
+      DataNode dn = cluster.getDataNodes().get(0);
+      DataNodeTestUtils.runDirectoryScanner(dn);
+
       // Occasionally the BlockPoolSliceScanner can run before we have removed
       // the blocks. Restart the Datanode to trigger the scanner into running
       // once more.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
index 168ebb9..37abc5b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.junit.Test;
 
 public class TestProcessCorruptBlocks {
@@ -269,6 +270,8 @@ public class TestProcessCorruptBlocks {
     // But the datadirectory will not change
     assertTrue(cluster.corruptReplica(dnIndex, block));
 
+    // Run directory scanner to update the DN's volume map  
+    DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
     DataNodeProperties dnProps = cluster.stopDataNode(0);
 
     // Each datanode has multiple data dirs, check each

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19bc19e8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
index 4d4fed6..443500c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPendingCorruptDnMessages.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.util.ThreadUtil;
 import org.junit.Test;
 
@@ -69,6 +70,8 @@ public class TestPendingCorruptDnMessages {
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
       assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
       
+      // Run directory dsscanner to update Datanode's volumeMap
+      DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
       // Stop the DN so the replica with the changed gen stamp will be reported
       // when this DN starts up.
       DataNodeProperties dnProps = cluster.stopDataNode(0);


[43/50] [abbrv] hadoop git commit: HDFS-7824. GetContentSummary API and its namenode implementation for Storage Type Quota/Usage. (Contributed by Xiaoyu Yao)

Posted by zj...@apache.org.
HDFS-7824. GetContentSummary API and its namenode implementation for Storage Type Quota/Usage. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd1081b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd1081b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd1081b3

Branch: refs/heads/YARN-2928
Commit: bd1081b3ca08692c67c533c2c190c8dd6cfaf400
Parents: b1ab3f2
Author: Arpit Agarwal <ar...@apache.org>
Authored: Thu Mar 26 10:24:11 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/fs/ContentSummary.java    | 155 ++++++++++++++++++-
 .../java/org/apache/hadoop/fs/FileContext.java  |  15 +-
 .../java/org/apache/hadoop/fs/FileSystem.java   |  12 +-
 .../apache/hadoop/fs/TestContentSummary.java    |  52 ++++---
 .../org/apache/hadoop/fs/shell/TestCount.java   |   2 +
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  14 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  42 ++++-
 .../server/blockmanagement/BlockCollection.java |   2 +-
 .../server/blockmanagement/BlockManager.java    |   2 +-
 .../hdfs/server/namenode/ContentCounts.java     | 146 +++++++++++++++++
 .../ContentSummaryComputationContext.java       |  25 +--
 .../namenode/DirectoryWithQuotaFeature.java     |   6 +-
 .../hadoop/hdfs/server/namenode/INode.java      |  23 +--
 .../hdfs/server/namenode/INodeDirectory.java    |   2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  28 +++-
 .../hdfs/server/namenode/INodeReference.java    |   4 +-
 .../hdfs/server/namenode/INodeSymlink.java      |   2 +-
 .../snapshot/DirectorySnapshottableFeature.java |   4 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   9 +-
 .../apache/hadoop/hdfs/util/EnumCounters.java   |   6 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |   5 +-
 .../hadoop-hdfs/src/main/proto/hdfs.proto       |  14 ++
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 .../server/namenode/TestQuotaByStorageType.java |  80 ++++++++++
 25 files changed, 563 insertions(+), 92 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index 6276dda..66137d0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
@@ -36,17 +37,106 @@ public class ContentSummary implements Writable{
   private long quota;
   private long spaceConsumed;
   private long spaceQuota;
-  
+  private long typeConsumed[];
+  private long typeQuota[];
+
+  public static class Builder{
+    public Builder() {
+      this.quota = -1;
+      this.spaceQuota = -1;
+
+      typeConsumed = new long[StorageType.values().length];
+      typeQuota = new long[StorageType.values().length];
+      for (int i = 0; i < typeQuota.length; i++) {
+        typeQuota[i] = -1;
+      }
+    }
+
+    public Builder length(long length) {
+      this.length = length;
+      return this;
+    }
+
+    public Builder fileCount(long fileCount) {
+      this.fileCount = fileCount;
+      return this;
+    }
+
+    public Builder directoryCount(long directoryCount) {
+      this.directoryCount = directoryCount;
+      return this;
+    }
+
+    public Builder quota(long quota){
+      this.quota = quota;
+      return this;
+    }
+
+    public Builder spaceConsumed(long spaceConsumed) {
+      this.spaceConsumed = spaceConsumed;
+      return this;
+    }
+
+    public Builder spaceQuota(long spaceQuota) {
+      this.spaceQuota = spaceQuota;
+      return this;
+    }
+
+    public Builder typeConsumed(long typeConsumed[]) {
+      for (int i = 0; i < typeConsumed.length; i++) {
+        this.typeConsumed[i] = typeConsumed[i];
+      }
+      return this;
+    }
+
+    public Builder typeQuota(StorageType type, long quota) {
+      this.typeQuota[type.ordinal()] = quota;
+      return this;
+    }
+
+    public Builder typeConsumed(StorageType type, long consumed) {
+      this.typeConsumed[type.ordinal()] = consumed;
+      return this;
+    }
+
+    public Builder typeQuota(long typeQuota[]) {
+      for (int i = 0; i < typeQuota.length; i++) {
+        this.typeQuota[i] = typeQuota[i];
+      }
+      return this;
+    }
+
+    public ContentSummary build() {
+      return new ContentSummary(length, fileCount, directoryCount, quota,
+          spaceConsumed, spaceQuota, typeConsumed, typeQuota);
+    }
+
+    private long length;
+    private long fileCount;
+    private long directoryCount;
+    private long quota;
+    private long spaceConsumed;
+    private long spaceQuota;
+    private long typeConsumed[];
+    private long typeQuota[];
+  }
 
-  /** Constructor */
+  /** Constructor deprecated by ContentSummary.Builder*/
+  @Deprecated
   public ContentSummary() {}
   
-  /** Constructor */
+  /** Constructor, deprecated by ContentSummary.Builder
+   *  This constructor implicitly set spaceConsumed the same as length.
+   *  spaceConsumed and length must be set explicitly with
+   *  ContentSummary.Builder
+   * */
+  @Deprecated
   public ContentSummary(long length, long fileCount, long directoryCount) {
     this(length, fileCount, directoryCount, -1L, length, -1L);
   }
 
-  /** Constructor */
+  /** Constructor, deprecated by ContentSummary.Builder */
+  @Deprecated
   public ContentSummary(
       long length, long fileCount, long directoryCount, long quota,
       long spaceConsumed, long spaceQuota) {
@@ -58,6 +148,21 @@ public class ContentSummary implements Writable{
     this.spaceQuota = spaceQuota;
   }
 
+  /** Constructor for ContentSummary.Builder*/
+  private ContentSummary(
+      long length, long fileCount, long directoryCount, long quota,
+      long spaceConsumed, long spaceQuota, long typeConsumed[],
+      long typeQuota[]) {
+    this.length = length;
+    this.fileCount = fileCount;
+    this.directoryCount = directoryCount;
+    this.quota = quota;
+    this.spaceConsumed = spaceConsumed;
+    this.spaceQuota = spaceQuota;
+    this.typeConsumed = typeConsumed;
+    this.typeQuota = typeQuota;
+  }
+
   /** @return the length */
   public long getLength() {return length;}
 
@@ -70,12 +175,48 @@ public class ContentSummary implements Writable{
   /** Return the directory quota */
   public long getQuota() {return quota;}
   
-  /** Retuns (disk) space consumed */ 
+  /** Retuns storage space consumed */
   public long getSpaceConsumed() {return spaceConsumed;}
 
-  /** Returns (disk) space quota */
+  /** Returns storage space quota */
   public long getSpaceQuota() {return spaceQuota;}
-  
+
+  /** Returns storage type quota */
+  public long getTypeQuota(StorageType type) {
+    return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
+  }
+
+  /** Returns storage type consumed*/
+  public long getTypeConsumed(StorageType type) {
+    return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
+  }
+
+  /** Returns true if any storage type quota has been set*/
+  public boolean isTypeQuotaSet() {
+    if (typeQuota == null) {
+      return false;
+    }
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      if (typeQuota[t.ordinal()] > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /** Returns true if any storage type consumption information is available*/
+  public boolean isTypeConsumedAvailable() {
+    if (typeConsumed == null) {
+      return false;
+    }
+    for (StorageType t : StorageType.getTypesSupportingQuota()) {
+      if (typeConsumed[t.ordinal()] > 0) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   @Override
   @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 2713144..aad8be9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -1644,20 +1644,27 @@ public class FileContext {
         UnsupportedFileSystemException, IOException {
       FileStatus status = FileContext.this.getFileStatus(f);
       if (status.isFile()) {
-        return new ContentSummary(status.getLen(), 1, 0);
+        long length = status.getLen();
+        return new ContentSummary.Builder().length(length).
+            fileCount(1).directoryCount(0).spaceConsumed(length).
+            build();
       }
       long[] summary = {0, 0, 1};
-      RemoteIterator<FileStatus> statusIterator = 
+      RemoteIterator<FileStatus> statusIterator =
         FileContext.this.listStatus(f);
       while(statusIterator.hasNext()) {
         FileStatus s = statusIterator.next();
+        long length = s.getLen();
         ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
-                                       new ContentSummary(s.getLen(), 1, 0);
+            new ContentSummary.Builder().length(length).fileCount(1).
+            directoryCount(0).spaceConsumed(length).build();
         summary[0] += c.getLength();
         summary[1] += c.getFileCount();
         summary[2] += c.getDirectoryCount();
       }
-      return new ContentSummary(summary[0], summary[1], summary[2]);
+      return new ContentSummary.Builder().length(summary[0]).
+          fileCount(summary[1]).directoryCount(summary[2]).
+          spaceConsumed(summary[0]).build();
     }
     
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 2ca8813..305fef2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -1467,18 +1467,24 @@ public abstract class FileSystem extends Configured implements Closeable {
     FileStatus status = getFileStatus(f);
     if (status.isFile()) {
       // f is a file
-      return new ContentSummary(status.getLen(), 1, 0);
+      long length = status.getLen();
+      return new ContentSummary.Builder().length(length).
+          fileCount(1).directoryCount(0).spaceConsumed(length).build();
     }
     // f is a directory
     long[] summary = {0, 0, 1};
     for(FileStatus s : listStatus(f)) {
+      long length = s.getLen();
       ContentSummary c = s.isDirectory() ? getContentSummary(s.getPath()) :
-                                     new ContentSummary(s.getLen(), 1, 0);
+          new ContentSummary.Builder().length(length).
+          fileCount(1).directoryCount(0).spaceConsumed(length).build();
       summary[0] += c.getLength();
       summary[1] += c.getFileCount();
       summary[2] += c.getDirectoryCount();
     }
-    return new ContentSummary(summary[0], summary[1], summary[2]);
+    return new ContentSummary.Builder().length(summary[0]).
+        fileCount(summary[1]).directoryCount(summary[2]).
+        spaceConsumed(summary[0]).build();
   }
 
   final private static PathFilter DEFAULT_FILTER = new PathFilter() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
index 5db0de3..7cc7ae4 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestContentSummary.java
@@ -32,13 +32,13 @@ public class TestContentSummary {
   // check the empty constructor correctly initialises the object
   @Test
   public void testConstructorEmpty() {
-    ContentSummary contentSummary = new ContentSummary();
+    ContentSummary contentSummary = new ContentSummary.Builder().build();
     assertEquals("getLength", 0, contentSummary.getLength());
     assertEquals("getFileCount", 0, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", 0, contentSummary.getDirectoryCount());
-    assertEquals("getQuota", 0, contentSummary.getQuota());
+    assertEquals("getQuota", -1, contentSummary.getQuota());
     assertEquals("getSpaceConsumed", 0, contentSummary.getSpaceConsumed());
-    assertEquals("getSpaceQuota", 0, contentSummary.getSpaceQuota());
+    assertEquals("getSpaceQuota", -1, contentSummary.getSpaceQuota());
   }
 
   // check the full constructor with quota information
@@ -51,8 +51,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     assertEquals("getLength", length, contentSummary.getLength());
     assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", directoryCount,
@@ -70,8 +71,9 @@ public class TestContentSummary {
     long fileCount = 22222;
     long directoryCount = 33333;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).
+        spaceConsumed(length).build();
     assertEquals("getLength", length, contentSummary.getLength());
     assertEquals("getFileCount", fileCount, contentSummary.getFileCount());
     assertEquals("getDirectoryCount", directoryCount,
@@ -91,8 +93,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
 
     DataOutput out = mock(DataOutput.class);
     InOrder inOrder = inOrder(out);
@@ -116,7 +119,7 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66666;
 
-    ContentSummary contentSummary = new ContentSummary();
+    ContentSummary contentSummary = new ContentSummary.Builder().build();
 
     DataInput in = mock(DataInput.class);
     when(in.readLong()).thenReturn(length).thenReturn(fileCount)
@@ -159,8 +162,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       44444          -11111           66665           11110"
         + "        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(true));
@@ -173,8 +177,8 @@ public class TestContentSummary {
     long fileCount = 22222;
     long directoryCount = 33333;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).build();
     String expected = "        none             inf            none"
         + "             inf        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(true));
@@ -190,8 +194,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString(false));
   }
@@ -206,8 +211,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = 66665;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "       44444          -11111           66665"
         + "           11110        33333        22222              11111 ";
     assertEquals(expected, contentSummary.toString());
@@ -223,8 +229,9 @@ public class TestContentSummary {
     long spaceConsumed = 1073741825;
     long spaceQuota = 1;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "     212.0 M            1023               1 "
         + "           -1 G       32.6 K      211.9 M              8.0 E ";
     assertEquals(expected, contentSummary.toString(true, true));
@@ -240,8 +247,9 @@ public class TestContentSummary {
     long spaceConsumed = 55555;
     long spaceQuota = Long.MAX_VALUE;
 
-    ContentSummary contentSummary = new ContentSummary(length, fileCount,
-        directoryCount, quota, spaceConsumed, spaceQuota);
+    ContentSummary contentSummary = new ContentSummary.Builder().length(length).
+        fileCount(fileCount).directoryCount(directoryCount).quota(quota).
+        spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
     String expected = "      32.6 K      211.9 M              8.0 E ";
     assertEquals(expected, contentSummary.toString(false, true));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
index 1f2f2d4..d5f097d 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestCount.java
@@ -315,6 +315,8 @@ public class TestCount {
   // mock content system
   static class MockContentSummary extends ContentSummary {
 
+    @SuppressWarnings("deprecation")
+    // suppress warning on the usage of deprecated ContentSummary constructor
     public MockContentSummary() {
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 20b212e..e797d12 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -1013,13 +1013,13 @@ public class HttpFSFileSystem extends FileSystem
     HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
     JSONObject json = (JSONObject) ((JSONObject)
       HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
-    return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_QUOTA_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON),
-                              (Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)
-    );
+    return new ContentSummary.Builder().
+        length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)).
+        fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)).
+        directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)).
+        quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)).
+        spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)).
+        spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 51842ff..e16348a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1300,6 +1300,9 @@ Release 2.7.0 - UNRELEASED
       HDFS-7806. Refactor: move StorageType from hadoop-hdfs to
       hadoop-common. (Xiaoyu Yao via Arpit Agarwal)
 
+      HDFS-7824. GetContentSummary API and its namenode implementation for
+      Storage Type Quota/Usage. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index b841850..9446b70 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1728,21 +1728,49 @@ public class PBHelper {
   
   public static ContentSummary convert(ContentSummaryProto cs) {
     if (cs == null) return null;
-    return new ContentSummary(
-      cs.getLength(), cs.getFileCount(), cs.getDirectoryCount(), cs.getQuota(),
-      cs.getSpaceConsumed(), cs.getSpaceQuota());
+    ContentSummary.Builder builder = new ContentSummary.Builder();
+    builder.length(cs.getLength()).
+        fileCount(cs.getFileCount()).
+        directoryCount(cs.getDirectoryCount()).
+        quota(cs.getQuota()).
+        spaceConsumed(cs.getSpaceConsumed()).
+        spaceQuota(cs.getSpaceQuota());
+    if (cs.hasTypeQuotaInfos()) {
+      for (HdfsProtos.StorageTypeQuotaInfoProto info :
+          cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
+        StorageType type = PBHelper.convertStorageType(info.getType());
+        builder.typeConsumed(type, info.getConsumed());
+        builder.typeQuota(type, info.getQuota());
+      }
+    }
+    return builder.build();
   }
   
   public static ContentSummaryProto convert(ContentSummary cs) {
     if (cs == null) return null;
-    return ContentSummaryProto.newBuilder().
-        setLength(cs.getLength()).
+    ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
+        builder.setLength(cs.getLength()).
         setFileCount(cs.getFileCount()).
         setDirectoryCount(cs.getDirectoryCount()).
         setQuota(cs.getQuota()).
         setSpaceConsumed(cs.getSpaceConsumed()).
-        setSpaceQuota(cs.getSpaceQuota()).
-        build();
+        setSpaceQuota(cs.getSpaceQuota());
+
+    if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
+      HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
+          HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
+      for (StorageType t: StorageType.getTypesSupportingQuota()) {
+        HdfsProtos.StorageTypeQuotaInfoProto info =
+            HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
+                setType(convertStorageType(t)).
+                setConsumed(cs.getTypeConsumed(t)).
+                setQuota(cs.getTypeQuota(t)).
+                build();
+        isb.addTypeQuotaInfo(info);
+      }
+      builder.setTypeQuotaInfos(isb);
+    }
+    return builder.build();
   }
 
   public static NNHAStatusHeartbeat convert(NNHAStatusHeartbeatProto s) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 1547611..e9baf85 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -36,7 +36,7 @@ public interface BlockCollection {
   /** 
    * Get content summary.
    */
-  public ContentSummary computeContentSummary();
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
 
   /**
    * @return the number of blocks

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 91cfead..ad40782 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -745,7 +745,7 @@ public class BlockManager {
         // always decrement total blocks
         -1);
 
-    final long fileLength = bc.computeContentSummary().getLength();
+    final long fileLength = bc.computeContentSummary(getStoragePolicySuite()).getLength();
     final long pos = fileLength - ucBlock.getNumBytes();
     return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
new file mode 100644
index 0000000..16f0771
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentCounts.java
@@ -0,0 +1,146 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.util.EnumCounters;
+
+/**
+ * The counter to be computed for content types such as file, directory and symlink,
+ * and the storage type usage such as SSD, DISK, ARCHIVE.
+ */
+public class ContentCounts {
+  private EnumCounters<Content> contents;
+  private EnumCounters<StorageType> types;
+
+  public static class Builder {
+    private EnumCounters<Content> contents;
+    // storage spaces used by corresponding storage types
+    private EnumCounters<StorageType> types;
+
+    public Builder() {
+      contents = new EnumCounters<Content>(Content.class);
+      types = new EnumCounters<StorageType>(StorageType.class);
+    }
+
+    public Builder file(long file) {
+      contents.set(Content.FILE, file);
+      return this;
+    }
+
+    public Builder directory(long directory) {
+      contents.set(Content.DIRECTORY, directory);
+      return this;
+    }
+
+    public Builder symlink(long symlink) {
+      contents.set(Content.SYMLINK, symlink);
+      return this;
+    }
+
+    public Builder length(long length) {
+      contents.set(Content.LENGTH, length);
+      return this;
+    }
+
+    public Builder storagespace(long storagespace) {
+      contents.set(Content.DISKSPACE, storagespace);
+      return this;
+    }
+
+    public Builder snapshot(long snapshot) {
+      contents.set(Content.SNAPSHOT, snapshot);
+      return this;
+    }
+
+    public Builder snapshotable_directory(long snapshotable_directory) {
+      contents.set(Content.SNAPSHOTTABLE_DIRECTORY, snapshotable_directory);
+      return this;
+    }
+
+    public ContentCounts build() {
+      return new ContentCounts(contents, types);
+    }
+  }
+
+  private ContentCounts(EnumCounters<Content> contents,
+      EnumCounters<StorageType> types) {
+    this.contents = contents;
+    this.types = types;
+  }
+
+  // Get the number of files.
+  public long getFileCount() {
+    return contents.get(Content.FILE);
+  }
+
+  // Get the number of directories.
+  public long getDirectoryCount() {
+    return contents.get(Content.DIRECTORY);
+  }
+
+  // Get the number of symlinks.
+  public long getSymlinkCount() {
+    return contents.get(Content.SYMLINK);
+  }
+
+  // Get the total of file length in bytes.
+  public long getLength() {
+    return contents.get(Content.LENGTH);
+  }
+
+  // Get the total of storage space usage in bytes including replication.
+  public long getStoragespace() {
+    return contents.get(Content.DISKSPACE);
+  }
+
+  // Get the number of snapshots
+  public long getSnapshotCount() {
+    return contents.get(Content.SNAPSHOT);
+  }
+
+  // Get the number of snapshottable directories.
+  public long getSnapshotableDirectoryCount() {
+    return contents.get(Content.SNAPSHOTTABLE_DIRECTORY);
+  }
+
+  public long[] getTypeSpaces() {
+    return types.asArray();
+  }
+
+  public long getTypeSpace(StorageType t) {
+    return types.get(t);
+  }
+
+  public void addContent(Content c, long val) {
+    contents.add(c, val);
+  }
+
+  public void addContents(ContentCounts that) {
+    contents.add(that.contents);
+    types.add(that.types);
+  }
+
+  public void addTypeSpace(StorageType t, long val) {
+    types.add(t, val);
+  }
+
+  public void addTypeSpaces(EnumCounters<StorageType> that) {
+    this.types.add(that);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 63fa8c1..31f34b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -26,7 +27,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 public class ContentSummaryComputationContext {
   private FSDirectory dir = null;
   private FSNamesystem fsn = null;
-  private Content.Counts counts = null;
+  private BlockStoragePolicySuite bsps = null;
+  private ContentCounts counts = null;
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
@@ -46,12 +48,13 @@ public class ContentSummaryComputationContext {
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
-    this.counts = Content.Counts.newInstance();
+    this.counts = new ContentCounts.Builder().build();
   }
 
   /** Constructor for blocking computation. */
-  public ContentSummaryComputationContext() {
+  public ContentSummaryComputationContext(BlockStoragePolicySuite bsps) {
     this(null, null, 0);
+    this.bsps = bsps;
   }
 
   /** Return current yield count */
@@ -73,10 +76,10 @@ public class ContentSummaryComputationContext {
     }
 
     // Have we reached the limit?
-    long currentCount = counts.get(Content.FILE) +
-        counts.get(Content.SYMLINK) +
-        counts.get(Content.DIRECTORY) +
-        counts.get(Content.SNAPSHOTTABLE_DIRECTORY);
+    long currentCount = counts.getFileCount() +
+        counts.getSymlinkCount() +
+        counts.getDirectoryCount() +
+        counts.getSnapshotableDirectoryCount();
     if (currentCount <= nextCountLimit) {
       return false;
     }
@@ -114,11 +117,15 @@ public class ContentSummaryComputationContext {
   }
 
   /** Get the content counts */
-  public Content.Counts getCounts() {
+  public ContentCounts getCounts() {
     return counts;
   }
 
   public BlockStoragePolicySuite getBlockStoragePolicySuite() {
-      return fsn.getBlockManager().getStoragePolicySuite();
+    Preconditions.checkState((bsps != null || fsn != null),
+        "BlockStoragePolicySuite must be either initialized or available via" +
+            " FSNameSystem");
+    return (bsps != null) ? bsps:
+        fsn.getBlockManager().getStoragePolicySuite();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
index 01eb22f..31b45ad 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/DirectoryWithQuotaFeature.java
@@ -126,12 +126,12 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
 
   ContentSummaryComputationContext computeContentSummary(final INodeDirectory dir,
       final ContentSummaryComputationContext summary) {
-    final long original = summary.getCounts().get(Content.DISKSPACE);
+    final long original = summary.getCounts().getStoragespace();
     long oldYieldCount = summary.getYieldCount();
     dir.computeDirectoryContentSummary(summary, Snapshot.CURRENT_STATE_ID);
     // Check only when the content has not changed in the middle.
     if (oldYieldCount == summary.getYieldCount()) {
-      checkStoragespace(dir, summary.getCounts().get(Content.DISKSPACE) - original);
+      checkStoragespace(dir, summary.getCounts().getStoragespace() - original);
     }
     return summary;
   }
@@ -277,4 +277,4 @@ public final class DirectoryWithQuotaFeature implements INode.Feature {
     return "Quota[" + namespaceString() + ", " + storagespaceString() +
         ", " + typeSpaceString() + "]";
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 8c4e466..586cce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -432,9 +432,9 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
       BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes);
 
   /** Compute {@link ContentSummary}. Blocking call */
-  public final ContentSummary computeContentSummary() {
+  public final ContentSummary computeContentSummary(BlockStoragePolicySuite bsps) {
     return computeAndConvertContentSummary(
-        new ContentSummaryComputationContext());
+        new ContentSummaryComputationContext(bsps));
   }
 
   /**
@@ -442,17 +442,22 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
    */
   public final ContentSummary computeAndConvertContentSummary(
       ContentSummaryComputationContext summary) {
-    Content.Counts counts = computeContentSummary(summary).getCounts();
+    ContentCounts counts = computeContentSummary(summary).getCounts();
     final QuotaCounts q = getQuotaCounts();
-    return new ContentSummary(counts.get(Content.LENGTH),
-        counts.get(Content.FILE) + counts.get(Content.SYMLINK),
-        counts.get(Content.DIRECTORY), q.getNameSpace(),
-        counts.get(Content.DISKSPACE), q.getStorageSpace());
-    // TODO: storage type quota reporting HDFS-7701.
+    return new ContentSummary.Builder().
+        length(counts.getLength()).
+        fileCount(counts.getFileCount() + counts.getSymlinkCount()).
+        directoryCount(counts.getDirectoryCount()).
+        quota(q.getNameSpace()).
+        spaceConsumed(counts.getStoragespace()).
+        spaceQuota(q.getStorageSpace()).
+        typeConsumed(counts.getTypeSpaces()).
+        typeQuota(q.getTypeSpaces().asArray()).
+        build();
   }
 
   /**
-   * Count subtree content summary with a {@link Content.Counts}.
+   * Count subtree content summary with a {@link ContentCounts}.
    *
    * @param summary the context object holding counts for the subtree.
    * @return The same objects as summary.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index ebb8ae4..dadb8c7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -664,7 +664,7 @@ public class INodeDirectory extends INodeWithAdditionalFields
     }
 
     // Increment the directory count for this directory.
-    summary.getCounts().add(Content.DIRECTORY, 1);
+    summary.getCounts().addContent(Content.DIRECTORY, 1);
     // Relinquish and reacquire locks if necessary.
     summary.yield();
     return summary;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index ae554fe..a6f07f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -599,22 +599,36 @@ public class INodeFile extends INodeWithAdditionalFields
   @Override
   public final ContentSummaryComputationContext computeContentSummary(
       final ContentSummaryComputationContext summary) {
-    final Content.Counts counts = summary.getCounts();
+    final ContentCounts counts = summary.getCounts();
     FileWithSnapshotFeature sf = getFileWithSnapshotFeature();
+    long fileLen = 0;
     if (sf == null) {
-      counts.add(Content.LENGTH, computeFileSize());
-      counts.add(Content.FILE, 1);
+      fileLen = computeFileSize();
+      counts.addContent(Content.FILE, 1);
     } else {
       final FileDiffList diffs = sf.getDiffs();
       final int n = diffs.asList().size();
-      counts.add(Content.FILE, n);
+      counts.addContent(Content.FILE, n);
       if (n > 0 && sf.isCurrentFileDeleted()) {
-        counts.add(Content.LENGTH, diffs.getLast().getFileSize());
+        fileLen =  diffs.getLast().getFileSize();
       } else {
-        counts.add(Content.LENGTH, computeFileSize());
+        fileLen = computeFileSize();
+      }
+    }
+    counts.addContent(Content.LENGTH, fileLen);
+    counts.addContent(Content.DISKSPACE, storagespaceConsumed());
+
+    if (getStoragePolicyID() != BlockStoragePolicySuite.ID_UNSPECIFIED){
+      BlockStoragePolicy bsp = summary.getBlockStoragePolicySuite().
+          getPolicy(getStoragePolicyID());
+      List<StorageType> storageTypes = bsp.chooseStorageTypes(getFileReplication());
+      for (StorageType t : storageTypes) {
+        if (!t.supportTypeQuota()) {
+          continue;
+        }
+        counts.addTypeSpace(t, fileLen);
       }
     }
-    counts.add(Content.DISKSPACE, storagespaceConsumed());
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
index 911279a..eee50a5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java
@@ -23,6 +23,7 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
+import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -512,7 +513,8 @@ public abstract class INodeReference extends INode {
       //only count storagespace for WithName
       final QuotaCounts q = new QuotaCounts.Builder().build();
       computeQuotaUsage(summary.getBlockStoragePolicySuite(), q, false, lastSnapshotId);
-      summary.getCounts().add(Content.DISKSPACE, q.getStorageSpace());
+      summary.getCounts().addContent(Content.DISKSPACE, q.getStorageSpace());
+      summary.getCounts().addTypeSpaces(q.getTypeSpaces());
       return summary;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
index fe75687..120d0dc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
@@ -102,7 +102,7 @@ public class INodeSymlink extends INodeWithAdditionalFields {
   @Override
   public ContentSummaryComputationContext computeContentSummary(
       final ContentSummaryComputationContext summary) {
-    summary.getCounts().add(Content.SYMLINK, 1);
+    summary.getCounts().addContent(Content.SYMLINK, 1);
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
index 5168f0b..fa1bf94 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java
@@ -237,8 +237,8 @@ public class DirectorySnapshottableFeature extends DirectoryWithSnapshotFeature
       final INodeDirectory snapshotRoot,
       final ContentSummaryComputationContext summary) {
     snapshotRoot.computeContentSummary(summary);
-    summary.getCounts().add(Content.SNAPSHOT, snapshotsByNames.size());
-    summary.getCounts().add(Content.SNAPSHOTTABLE_DIRECTORY, 1);
+    summary.getCounts().addContent(Content.SNAPSHOT, snapshotsByNames.size());
+    summary.getCounts().addContent(Content.SNAPSHOTTABLE_DIRECTORY, 1);
     return summary;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 07ff744..d55332f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.AclStorage;
 import org.apache.hadoop.hdfs.server.namenode.Content;
+import org.apache.hadoop.hdfs.server.namenode.ContentCounts;
 import org.apache.hadoop.hdfs.server.namenode.ContentSummaryComputationContext;
 import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
 import org.apache.hadoop.hdfs.server.namenode.INode;
@@ -650,19 +651,19 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
   }
 
   public void computeContentSummary4Snapshot(final BlockStoragePolicySuite bsps,
-      final Content.Counts counts) {
+      final ContentCounts counts) {
     // Create a new blank summary context for blocking processing of subtree.
     ContentSummaryComputationContext summary = 
-        new ContentSummaryComputationContext();
+        new ContentSummaryComputationContext(bsps);
     for(DirectoryDiff d : diffs) {
       for(INode deleted : d.getChildrenDiff().getList(ListType.DELETED)) {
         deleted.computeContentSummary(summary);
       }
     }
     // Add the counts from deleted trees.
-    counts.add(summary.getCounts());
+    counts.addContents(summary.getCounts());
     // Add the deleted directory count.
-    counts.add(Content.DIRECTORY, diffs.asList().size());
+    counts.addContent(Content.DIRECTORY, diffs.asList().size());
   }
   
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
index 18f4bd6..86ba341 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
@@ -21,6 +21,7 @@ import java.util.Arrays;
 import java.util.HashMap;
 
 import com.google.common.base.Preconditions;
+import org.apache.commons.lang.ArrayUtils;
 
 /**
  * Counters for an enum type.
@@ -64,6 +65,11 @@ public class EnumCounters<E extends Enum<E>> {
     return counters[e.ordinal()];
   }
 
+  /** @return the values of counter as a shadow copy of array*/
+  public long[] asArray() {
+    return ArrayUtils.clone(counters);
+  }
+
   /** Negate all counters. */
   public final void negation() {
     for(int i = 0; i < counters.length; i++) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index ae9612f..d53bc31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -553,8 +553,9 @@ public class JsonUtil {
     final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
     final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
 
-    return new ContentSummary(length, fileCount, directoryCount,
-        quota, spaceConsumed, spaceQuota);
+    return new ContentSummary.Builder().length(length).fileCount(fileCount).
+        directoryCount(directoryCount).quota(quota).spaceConsumed(spaceConsumed).
+        spaceQuota(spaceQuota).build();
   }
 
   /** Convert a MD5MD5CRC32FileChecksum to a Json string. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index 2966e51..7d94f04 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -134,6 +134,20 @@ message ContentSummaryProto {
   required uint64 quota = 4;
   required uint64 spaceConsumed = 5;
   required uint64 spaceQuota = 6;
+  optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
+}
+
+/**
+ * Storage type quota and usage information of a file or directory
+ */
+message StorageTypeQuotaInfosProto {
+  repeated StorageTypeQuotaInfoProto typeQuotaInfo = 1;
+}
+
+message StorageTypeQuotaInfoProto {
+  required StorageTypeProto type = 1;
+  required uint64 quota = 2;
+  required uint64 consumed = 3;
 }
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 485cb9b..32fae45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1221,7 +1221,7 @@ public class TestReplicationPolicy {
     when(mbc.isUnderConstruction()).thenReturn(true);
     ContentSummary cs = mock(ContentSummary.class);
     when(cs.getLength()).thenReturn((long)1);
-    when(mbc.computeContentSummary()).thenReturn(cs);
+    when(mbc.computeContentSummary(bm.getStoragePolicySuite())).thenReturn(cs);
     info.setBlockCollection(mbc);
     bm.addBlockCollection(info, mbc);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd1081b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
index aee756f..6d38937 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
@@ -24,6 +24,7 @@ package org.apache.hadoop.hdfs.server.namenode;
   import org.apache.commons.logging.Log;
   import org.apache.commons.logging.LogFactory;
   import org.apache.hadoop.conf.Configuration;
+  import org.apache.hadoop.fs.ContentSummary;
   import org.apache.hadoop.fs.Path;
   import org.apache.hadoop.fs.StorageType;
   import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -156,6 +157,11 @@ public class TestQuotaByStorageType {
     ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(file1Len, ssdConsumed);
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
   }
 
   @Test(timeout = 60000)
@@ -192,6 +198,11 @@ public class TestQuotaByStorageType {
     fnode.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts, true);
     assertEquals(fnode.dumpTreeRecursively().toString(), 0,
         counts.getTypeSpaces().get(StorageType.SSD));
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), 0);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
   }
 
   @Test(timeout = 60000)
@@ -233,6 +244,11 @@ public class TestQuotaByStorageType {
     } catch (Throwable t) {
       LOG.info("Got expected exception ", t);
     }
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
   }
 
   /**
@@ -554,6 +570,11 @@ public class TestQuotaByStorageType {
     assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
         counts1.getTypeSpaces().get(StorageType.SSD));
 
+    ContentSummary cs1 = dfs.getContentSummary(sub1);
+    assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);
+
     // Delete the snapshot s1
     dfs.deleteSnapshot(sub1, "s1");
 
@@ -566,6 +587,11 @@ public class TestQuotaByStorageType {
     sub1Node.computeQuotaUsage(fsn.getBlockManager().getStoragePolicySuite(), counts2, true);
     assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
         counts2.getTypeSpaces().get(StorageType.SSD));
+
+    ContentSummary cs2 = dfs.getContentSummary(sub1);
+    assertEquals(cs2.getSpaceConsumed(), 0);
+    assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
+    assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
   }
 
   @Test(timeout = 60000)
@@ -601,6 +627,11 @@ public class TestQuotaByStorageType {
     ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(newFile1Len, ssdConsumed);
+
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
   }
 
   @Test
@@ -701,6 +732,55 @@ public class TestQuotaByStorageType {
         .getDirectoryWithQuotaFeature()
         .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
     assertEquals(file1Len, ssdConsumedAfterNNRestart);
+  }
+
+  @Test(timeout = 60000)
+  public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
+    final Path foo = new Path(dir, "foo");
+    Path createdFile1 = new Path(foo, "created_file1.data");
+    dfs.mkdirs(foo);
+
+    // set storage policy on directory "foo" to ONESSD
+    dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
 
+    INode fnode = fsdir.getINode4Write(foo.toString());
+    assertTrue(fnode.isDirectory());
+    assertTrue(!fnode.isQuotaSet());
+
+    // Create file of size 2 * BLOCKSIZE under directory "foo"
+    long file1Len = BLOCKSIZE * 2;
+    int bufLen = BLOCKSIZE / 16;
+    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
+
+    // Verify getContentSummary without any quota set
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
+    assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
+  }
+
+  @Test(timeout = 60000)
+  public void testContentSummaryWithoutStoragePolicy() throws Exception {
+    final Path foo = new Path(dir, "foo");
+    Path createdFile1 = new Path(foo, "created_file1.data");
+    dfs.mkdirs(foo);
+
+    INode fnode = fsdir.getINode4Write(foo.toString());
+    assertTrue(fnode.isDirectory());
+    assertTrue(!fnode.isQuotaSet());
+
+    // Create file of size 2 * BLOCKSIZE under directory "foo"
+    long file1Len = BLOCKSIZE * 2;
+    int bufLen = BLOCKSIZE / 16;
+    DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
+
+    // Verify getContentSummary without any quota set
+    // Expect no type quota and usage information available
+    ContentSummary cs = dfs.getContentSummary(foo);
+    assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
+    for (StorageType t : StorageType.values()) {
+      assertEquals(cs.getTypeConsumed(t), 0);
+      assertEquals(cs.getTypeQuota(t), -1);
+    }
   }
 }
\ No newline at end of file


[17/50] [abbrv] hadoop git commit: YARN-3383. AdminService should use warn instead of info to log exception when operation fails. (Li Lu via wangda)

Posted by zj...@apache.org.
YARN-3383. AdminService should use warn instead of info to log exception when operation fails. (Li Lu via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb394518
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb394518
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb394518

Branch: refs/heads/YARN-2928
Commit: bb394518efb9e41a27b1b07360833ec648ead1bf
Parents: 7f43b8c
Author: Wangda Tan <wa...@apache.org>
Authored: Tue Mar 24 10:33:09 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                                   | 3 +++
 .../apache/hadoop/yarn/server/resourcemanager/AdminService.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb394518/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0605477..e8c36a4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -153,6 +153,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3241. FairScheduler handles "invalid" queue names inconsistently. 
     (Zhihai Xu via kasha)
 
+    YARN-3383. AdminService should use "warn" instead of "info" to log exception 
+    when operation fails. (Li Lu via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb394518/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
index 22b92c2..12714de 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
@@ -699,7 +699,7 @@ public class AdminService extends CompositeService implements
 
   private YarnException logAndWrapException(Exception exception, String user,
       String argName, String msg) throws YarnException {
-    LOG.info("Exception " + msg, exception);
+    LOG.warn("Exception " + msg, exception);
     RMAuditLogger.logFailure(user, argName, "", 
         "AdminService", "Exception " + msg);
     return RPCUtil.getRemoteException(exception);


[44/50] [abbrv] hadoop git commit: HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.

Posted by zj...@apache.org.
HDFS-7963. Fix expected tracing spans in TestTracing. Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1ab3f28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1ab3f28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1ab3f28

Branch: refs/heads/YARN-2928
Commit: b1ab3f284aa99dcb3100285cbd99ebd74dfa2806
Parents: 0a9ce4a
Author: Kihwal Lee <ki...@apache.org>
Authored: Thu Mar 26 08:42:45 2015 -0500
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/tracing/TestTracing.java  | 28 ++++++++++++++++----
 2 files changed, 26 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1ab3f28/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 62c2f91..51842ff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1277,6 +1277,9 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7977. NFS couldn't take percentile intervals (brandonli)
 
+    HDFS-7963. Fix expected tracing spans in TestTracing along with HDFS-7054.
+    (Masatake Iwasaki via kihwal)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1ab3f28/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
index 0bbd5b4..3720abe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -88,7 +88,10 @@ public class TestTracing {
       "ClientNamenodeProtocol#fsync",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
       "ClientNamenodeProtocol#complete",
-      "DFSOutputStream",
+      "newStreamForCreate",
+      "DFSOutputStream#writeChunk",
+      "DFSOutputStream#close",
+      "dataStreamer",
       "OpWriteBlockProto",
       "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
       "ClientNamenodeProtocol#addBlock"
@@ -102,10 +105,25 @@ public class TestTracing {
     long spanStart = s.getStartTimeMillis();
     long spanEnd = s.getStopTimeMillis();
 
-    // There should only be one trace id as it should all be homed in the
-    // top trace.
-    for (Span span : SetSpanReceiver.SetHolder.spans.values()) {
-      Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+    // Spans homed in the top trace shoud have same trace id.
+    // Spans having multiple parents (e.g. "dataStreamer" added by HDFS-7054)
+    // and children of them are exception.
+    String[] spansInTopTrace = {
+      "testWriteTraceHooks",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
+      "ClientNamenodeProtocol#create",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
+      "ClientNamenodeProtocol#fsync",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
+      "ClientNamenodeProtocol#complete",
+      "newStreamForCreate",
+      "DFSOutputStream#writeChunk",
+      "DFSOutputStream#close",
+    };
+    for (String desc : spansInTopTrace) {
+      for (Span span : map.get(desc)) {
+        Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+      }
     }
   }
 


[12/50] [abbrv] hadoop git commit: YARN-3393. Getting application(s) goes wrong when app finishes before starting the attempt. Contributed by Zhijie Shen

Posted by zj...@apache.org.
YARN-3393. Getting application(s) goes wrong when app finishes before
starting the attempt. Contributed by Zhijie Shen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4294b6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4294b6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4294b6a

Branch: refs/heads/YARN-2928
Commit: b4294b6a9c4c8b2296293a2444a420a77787a64c
Parents: 62d47c2
Author: Xuan <xg...@apache.org>
Authored: Mon Mar 23 20:33:16 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 ++
 ...pplicationHistoryManagerOnTimelineStore.java | 13 +++----
 ...pplicationHistoryManagerOnTimelineStore.java | 39 +++++++++++++++++---
 3 files changed, 42 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4294b6a/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b5f57b8..70b81d4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -878,6 +878,9 @@ Release 2.7.0 - UNRELEASED
     YARN-3336. FileSystem memory leak in DelegationTokenRenewer.
     (Zhihai Xu via cnauroth)
 
+    YARN-3393. Getting application(s) goes wrong when app finishes before
+    starting the attempt. (Zhijie Shen via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4294b6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 1010f62..49041c7 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -517,15 +517,14 @@ public class ApplicationHistoryManagerOnTimelineStore extends AbstractService
       if (app.appReport.getCurrentApplicationAttemptId() != null) {
         ApplicationAttemptReport appAttempt =
             getApplicationAttempt(app.appReport.getCurrentApplicationAttemptId());
-        if (appAttempt != null) {
-          app.appReport.setHost(appAttempt.getHost());
-          app.appReport.setRpcPort(appAttempt.getRpcPort());
-          app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
-          app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
-        }
+        app.appReport.setHost(appAttempt.getHost());
+        app.appReport.setRpcPort(appAttempt.getRpcPort());
+        app.appReport.setTrackingUrl(appAttempt.getTrackingUrl());
+        app.appReport.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl());
       }
-    } catch (AuthorizationException e) {
+    } catch (AuthorizationException | ApplicationAttemptNotFoundException e) {
       // AuthorizationException is thrown because the user doesn't have access
+      // It's possible that the app is finished before the first attempt is created.
       app.appReport.setDiagnostics(null);
       app.appReport.setCurrentApplicationAttemptId(null);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4294b6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
index 50a15f1..8cf1240 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java
@@ -76,6 +76,10 @@ public class TestApplicationHistoryManagerOnTimelineStore {
   @BeforeClass
   public static void prepareStore() throws Exception {
     store = createStore(SCALE);
+    TimelineEntities entities = new TimelineEntities();
+    entities.addEntity(createApplicationTimelineEntity(
+        ApplicationId.newInstance(0, SCALE + 1), true, false));
+    store.put(entities);
   }
 
   public static TimelineStore createStore(int scale) throws Exception {
@@ -129,9 +133,9 @@ public class TestApplicationHistoryManagerOnTimelineStore {
       TimelineEntities entities = new TimelineEntities();
       ApplicationId appId = ApplicationId.newInstance(0, i);
       if (i == 2) {
-        entities.addEntity(createApplicationTimelineEntity(appId, true));
+        entities.addEntity(createApplicationTimelineEntity(appId, true, true));
       } else {
-        entities.addEntity(createApplicationTimelineEntity(appId, false));
+        entities.addEntity(createApplicationTimelineEntity(appId, false, true));
       }
       store.put(entities);
       for (int j = 1; j <= scale; ++j) {
@@ -216,6 +220,27 @@ public class TestApplicationHistoryManagerOnTimelineStore {
   }
 
   @Test
+  public void testGetApplicationReportWithNotAttempt() throws Exception {
+    final ApplicationId appId = ApplicationId.newInstance(0, SCALE + 1);
+    ApplicationReport app;
+    if (callerUGI == null) {
+      app = historyManager.getApplication(appId);
+    } else {
+      app =
+          callerUGI.doAs(new PrivilegedExceptionAction<ApplicationReport> () {
+            @Override
+            public ApplicationReport run() throws Exception {
+              return historyManager.getApplication(appId);
+            }
+          });
+    }
+    Assert.assertNotNull(app);
+    Assert.assertEquals(appId, app.getApplicationId());
+    Assert.assertEquals(ApplicationAttemptId.newInstance(appId, -1),
+        app.getCurrentApplicationAttemptId());
+  }
+
+  @Test
   public void testGetApplicationAttemptReport() throws Exception {
     final ApplicationAttemptId appAttemptId =
         ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1);
@@ -308,7 +333,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
     Collection<ApplicationReport> apps =
         historyManager.getAllApplications().values();
     Assert.assertNotNull(apps);
-    Assert.assertEquals(SCALE, apps.size());
+    Assert.assertEquals(SCALE + 1, apps.size());
   }
 
   @Test
@@ -408,7 +433,7 @@ public class TestApplicationHistoryManagerOnTimelineStore {
   }
 
   private static TimelineEntity createApplicationTimelineEntity(
-      ApplicationId appId, boolean emptyACLs) {
+      ApplicationId appId, boolean emptyACLs, boolean noAttempt) {
     TimelineEntity entity = new TimelineEntity();
     entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE);
     entity.setEntityId(appId.toString());
@@ -447,8 +472,10 @@ public class TestApplicationHistoryManagerOnTimelineStore {
         FinalApplicationStatus.UNDEFINED.toString());
     eventInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO,
         YarnApplicationState.FINISHED.toString());
-    eventInfo.put(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO,
-        ApplicationAttemptId.newInstance(appId, 1));
+    if (noAttempt) {
+      eventInfo.put(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO,
+          ApplicationAttemptId.newInstance(appId, 1));
+    }
     tEvent.setEventInfo(eventInfo);
     entity.addEvent(tEvent);
     return entity;


[18/50] [abbrv] hadoop git commit: HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.

Posted by zj...@apache.org.
HDFS-7961. Trigger full block report after hot swapping disk. Contributed by Eddy Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7eaa30c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7eaa30c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7eaa30c6

Branch: refs/heads/YARN-2928
Commit: 7eaa30c637ddfff94a84ea2771eacd9c69018521
Parents: 5934949
Author: Andrew Wang <wa...@apache.org>
Authored: Tue Mar 24 09:07:02 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 ++
 .../datanode/TestDataNodeHotSwapVolumes.java    | 42 ++++++++++++++++++++
 3 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eaa30c6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ee9a5db..70be18a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1251,6 +1251,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-7960. The full block report should prune zombie storages even if
     they're not empty. (cmccabe and Eddy Xu via wang)
 
+    HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eaa30c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index e9befb4..d94375e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -634,6 +634,10 @@ public class DataNode extends ReconfigurableBase
       conf.set(DFS_DATANODE_DATA_DIR_KEY,
           Joiner.on(",").join(effectiveVolumes));
       dataDirs = getStorageLocations(conf);
+
+      // Send a full block report to let NN acknowledge the volume changes.
+      triggerBlockReport(new BlockReportOptions.Factory()
+          .setIncremental(false).build());
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eaa30c6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 2f51d45..f5772e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -34,12 +34,16 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Test;
@@ -59,6 +63,7 @@ import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.mockito.Mockito;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.hamcrest.CoreMatchers.anyOf;
@@ -70,6 +75,9 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.timeout;
 
 public class TestDataNodeHotSwapVolumes {
   private static final Log LOG = LogFactory.getLog(
@@ -702,4 +710,38 @@ public class TestDataNodeHotSwapVolumes {
     // More data has been written to this volume.
     assertTrue(restoredVolume.getDfsUsed() > used);
   }
+
+  /** Test that a full block report is sent after hot swapping volumes */
+  @Test(timeout=100000)
+  public void testFullBlockReportAfterRemovingVolumes()
+      throws IOException, ReconfigurationException {
+
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+
+    // Similar to TestTriggerBlockReport, set a really long value for
+    // dfs.heartbeat.interval, so that incremental block reports and heartbeats
+    // won't be sent during this test unless they're triggered
+    // manually.
+    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10800000L);
+    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1080L);
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    cluster.waitActive();
+
+    final DataNode dn = cluster.getDataNodes().get(0);
+    DatanodeProtocolClientSideTranslatorPB spy =
+        DataNodeTestUtils.spyOnBposToNN(dn, cluster.getNameNode());
+
+    // Remove a data dir from datanode
+    File dataDirToKeep = new File(cluster.getDataDirectory(), "data1");
+    dn.reconfigurePropertyImpl(DFS_DATANODE_DATA_DIR_KEY, dataDirToKeep.toString());
+
+    // We should get 1 full report
+    Mockito.verify(spy, timeout(60000).times(1)).blockReport(
+        any(DatanodeRegistration.class),
+        anyString(),
+        any(StorageBlockReport[].class),
+        any(BlockReportContext.class));
+  }
 }


[39/50] [abbrv] hadoop git commit: HADOOP-10670. Allow AuthenticationFilters to load secret from signature secret files. Contributed by Kai Zheng.

Posted by zj...@apache.org.
HADOOP-10670. Allow AuthenticationFilters to load secret from signature secret files. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88494bea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88494bea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88494bea

Branch: refs/heads/YARN-2928
Commit: 88494bea17f7fce2cced3326554258d7c2c9dabe
Parents: e8cdede
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Mar 25 11:12:27 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 .../server/AuthenticationFilter.java            | 18 ++++----
 .../server/TestAuthenticationFilter.java        | 38 ++++++++++++++++-
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../AuthenticationFilterInitializer.java        | 26 ------------
 .../security/TestAuthenticationFilter.java      | 12 ------
 ...TimelineAuthenticationFilterInitializer.java | 43 ++------------------
 .../http/RMAuthenticationFilterInitializer.java | 31 --------------
 7 files changed, 55 insertions(+), 116 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index e891ed2..43bb4b0 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -18,12 +18,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
-import org.apache.hadoop.security.authentication.util.Signer;
-import org.apache.hadoop.security.authentication.util.SignerException;
-import org.apache.hadoop.security.authentication.util.RandomSignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.ZKSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.*;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -147,6 +142,8 @@ public class AuthenticationFilter implements Filter {
    */
   public static final String SIGNATURE_SECRET = "signature.secret";
 
+  public static final String SIGNATURE_SECRET_FILE = SIGNATURE_SECRET + ".file";
+
   /**
    * Constant for the configuration property that indicates the validity of the generated token.
    */
@@ -283,7 +280,12 @@ public class AuthenticationFilter implements Filter {
     // fallback to old behavior
     if (signerSecretProviderName == null) {
       String signatureSecret = config.getProperty(SIGNATURE_SECRET, null);
-      if (signatureSecret != null) {
+      String signatureSecretFile = config.getProperty(
+          SIGNATURE_SECRET_FILE, null);
+      // The precedence from high to low : file, inline string, random
+      if (signatureSecretFile != null) {
+        providerClassName = FileSignerSecretProvider.class.getName();
+      } else if (signatureSecret != null) {
         providerClassName = StringSignerSecretProvider.class.getName();
       } else {
         providerClassName = RandomSignerSecretProvider.class.getName();
@@ -295,6 +297,8 @@ public class AuthenticationFilter implements Filter {
         randomSecret = true;
       } else if ("string".equals(signerSecretProviderName)) {
         providerClassName = StringSignerSecretProvider.class.getName();
+      } else if ("file".equals(signerSecretProviderName)) {
+        providerClassName = FileSignerSecretProvider.class.getName();
       } else if ("zookeeper".equals(signerSecretProviderName)) {
         providerClassName = ZKSignerSecretProvider.class.getName();
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index c01c182..a03894b 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -13,7 +13,10 @@
  */
 package org.apache.hadoop.security.authentication.server;
 
+import java.io.File;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.io.Writer;
 import java.net.HttpCookie;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -197,7 +200,7 @@ public class TestAuthenticationFilter {
       filter.destroy();
     }
 
-    // custom secret
+    // custom secret as inline
     filter = new AuthenticationFilter();
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
@@ -231,6 +234,39 @@ public class TestAuthenticationFilter {
       filter.destroy();
     }
 
+    // custom secret by file
+    File testDir = new File(System.getProperty("test.build.data",
+        "target/test-dir"));
+    testDir.mkdirs();
+    String secretValue = "hadoop";
+    File secretFile = new File(testDir, "http-secret.txt");
+    Writer writer = new FileWriter(secretFile);
+    writer.write(secretValue);
+    writer.close();
+
+    filter = new AuthenticationFilter();
+    try {
+      FilterConfig config = Mockito.mock(FilterConfig.class);
+      Mockito.when(config.getInitParameter(
+          AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
+      Mockito.when(config.getInitParameter(
+          AuthenticationFilter.SIGNATURE_SECRET_FILE))
+          .thenReturn(secretFile.getAbsolutePath());
+      Mockito.when(config.getInitParameterNames()).thenReturn(
+          new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+              AuthenticationFilter.SIGNATURE_SECRET_FILE)).elements());
+      ServletContext context = Mockito.mock(ServletContext.class);
+      Mockito.when(context.getAttribute(
+          AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+          .thenReturn(null);
+      Mockito.when(config.getServletContext()).thenReturn(context);
+      filter.init(config);
+      Assert.assertFalse(filter.isRandomSecret());
+      Assert.assertFalse(filter.isCustomSignerSecretProvider());
+    } finally {
+      filter.destroy();
+    }
+
     // custom cookie domain and cookie path
     filter = new AuthenticationFilter();
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index a01a201..46dfee4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -710,6 +710,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-9329. document native build dependencies in BUILDING.txt (Vijay Bhat
     via Colin P. McCabe)
 
+    HADOOP-10670. Allow AuthenticationFilters to load secret from signature
+    secret files. (Kai Zheng via wheat9)
+
   OPTIMIZATIONS
 
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
index 43d1b66..cb3830d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.security;
 
-import com.google.common.base.Charsets;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
@@ -25,11 +24,7 @@ import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 
-import java.io.FileInputStream;
-import java.io.FileReader;
 import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
 import java.util.HashMap;
 import java.util.Map;
 
@@ -50,8 +45,6 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
 
   static final String PREFIX = "hadoop.http.authentication.";
 
-  static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
-
   /**
    * Initializes hadoop-auth AuthenticationFilter.
    * <p/>
@@ -77,25 +70,6 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
       }
     }
 
-    String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
-    if (signatureSecretFile == null) {
-      throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);      
-    }
-
-    StringBuilder secret = new StringBuilder();
-    try (Reader reader = new InputStreamReader(
-        new FileInputStream(signatureSecretFile), Charsets.UTF_8)) {
-      int c = reader.read();
-      while (c > -1) {
-        secret.append((char)c);
-        c = reader.read();
-      }
-      reader.close();
-      filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
-    } catch (IOException ex) {
-      throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);            
-    }
-
     //Resolve _HOST into bind address
     String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
     String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
index b6aae0e..c8179e2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationFilter.java
@@ -37,17 +37,6 @@ public class TestAuthenticationFilter extends TestCase {
   public void testConfiguration() throws Exception {
     Configuration conf = new Configuration();
     conf.set("hadoop.http.authentication.foo", "bar");
-    
-    File testDir = new File(System.getProperty("test.build.data", 
-                                               "target/test-dir"));
-    testDir.mkdirs();
-    File secretFile = new File(testDir, "http-secret.txt");
-    Writer writer = new FileWriter(new File(testDir, "http-secret.txt"));
-    writer.write("hadoop");
-    writer.close();
-    conf.set(AuthenticationFilterInitializer.PREFIX + 
-             AuthenticationFilterInitializer.SIGNATURE_SECRET_FILE, 
-             secretFile.getAbsolutePath());
 
     conf.set(HttpServer2.BIND_ADDRESS, "barhost");
     
@@ -68,7 +57,6 @@ public class TestAuthenticationFilter extends TestCase {
 
           assertEquals("simple", conf.get("type"));
           assertEquals("36000", conf.get("token.validity"));
-          assertEquals("hadoop", conf.get("signature.secret"));
           assertNull(conf.get("cookie.domain"));
           assertEquals("true", conf.get("simple.anonymous.allowed"));
           assertEquals("HTTP/barhost@LOCALHOST",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
index 1ee8181..a3c136c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/security/TimelineAuthenticationFilterInitializer.java
@@ -18,20 +18,11 @@
 
 package org.apache.hadoop.yarn.server.timeline.security;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.Reader;
-import java.nio.charset.Charset;
-import java.util.HashMap;
-import java.util.Map;
-
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.HttpServer2;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
@@ -42,7 +33,9 @@ import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAu
 import org.apache.hadoop.security.token.delegation.web.PseudoDelegationTokenAuthenticationHandler;
 import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
 
-import com.google.common.annotations.VisibleForTesting;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
 
 /**
  * Initializes {@link TimelineAuthenticationFilter} which provides support for
@@ -62,9 +55,6 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
    */
   public static final String PREFIX = "yarn.timeline-service.http-authentication.";
 
-  private static final String SIGNATURE_SECRET_FILE =
-      TimelineAuthenticationFilter.SIGNATURE_SECRET + ".file";
-
   @VisibleForTesting
   Map<String, String> filterConfig;
 
@@ -106,31 +96,6 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
       }
     }
 
-    String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
-    if (signatureSecretFile != null) {
-      Reader reader = null;
-      try {
-        StringBuilder secret = new StringBuilder();
-        reader = new InputStreamReader(new FileInputStream(new File(signatureSecretFile)),
-                                      Charset.forName("UTF-8"));
-
-        int c = reader.read();
-        while (c > -1) {
-          secret.append((char) c);
-          c = reader.read();
-        }
-        filterConfig
-            .put(TimelineAuthenticationFilter.SIGNATURE_SECRET,
-                secret.toString());
-      } catch (IOException ex) {
-        throw new RuntimeException(
-            "Could not read HTTP signature secret file: "
-                + signatureSecretFile);
-      } finally {
-        IOUtils.closeStream(reader);
-      }
-    }
-
     String authType = filterConfig.get(AuthenticationFilter.AUTH_TYPE);
     if (authType.equals(PseudoAuthenticationHandler.TYPE)) {
       filterConfig.put(AuthenticationFilter.AUTH_TYPE,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88494bea/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
index a62cda3..9fc1334 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/http/RMAuthenticationFilterInitializer.java
@@ -43,14 +43,11 @@ import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
 public class RMAuthenticationFilterInitializer extends FilterInitializer {
 
   String configPrefix;
-  String signatureSecretFileProperty;
   String kerberosPrincipalProperty;
   String cookiePath;
 
   public RMAuthenticationFilterInitializer() {
     this.configPrefix = "hadoop.http.authentication.";
-    this.signatureSecretFileProperty =
-        AuthenticationFilter.SIGNATURE_SECRET + ".file";
     this.kerberosPrincipalProperty = KerberosAuthenticationHandler.PRINCIPAL;
     this.cookiePath = "/";
   }
@@ -77,34 +74,6 @@ public class RMAuthenticationFilterInitializer extends FilterInitializer {
       }
     }
 
-    String signatureSecretFile = filterConfig.get(signatureSecretFileProperty);
-    if (signatureSecretFile != null) {
-      Reader reader = null;
-      try {
-        StringBuilder secret = new StringBuilder();
-        reader =
-            new InputStreamReader(new FileInputStream(signatureSecretFile),
-              "UTF-8");
-        int c = reader.read();
-        while (c > -1) {
-          secret.append((char) c);
-          c = reader.read();
-        }
-        filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET,
-          secret.toString());
-      } catch (IOException ex) {
-        // if running in non-secure mode, this filter only gets added
-        // because the user has not setup his own filter so just generate
-        // a random secret. in secure mode, the user needs to setup security
-        if (UserGroupInformation.isSecurityEnabled()) {
-          throw new RuntimeException(
-            "Could not read HTTP signature secret file: " + signatureSecretFile);
-        }
-      } finally {
-        IOUtils.closeQuietly(reader);
-      }
-    }
-
     // Resolve _HOST into bind address
     String bindAddress = conf.get(HttpServer2.BIND_ADDRESS);
     String principal = filterConfig.get(kerberosPrincipalProperty);


[08/50] [abbrv] hadoop git commit: HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts. Contributed by Brandon Li

Posted by zj...@apache.org.
HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b44e8df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b44e8df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b44e8df

Branch: refs/heads/YARN-2928
Commit: 5b44e8df159736e1a276134ada9241b12ffcbe9f
Parents: 3ecc7dc
Author: Brandon Li <br...@apache.org>
Authored: Mon Mar 23 10:06:47 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/nfs/NfsExports.java  |  2 +-
 .../org/apache/hadoop/nfs/TestNfsExports.java   | 22 ++++++++++++++++++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 ++
 .../src/site/markdown/HdfsNfsGateway.md         |  8 ++++---
 4 files changed, 28 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b44e8df/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
index 8b6b46a..af96565 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
@@ -391,7 +391,7 @@ public class NfsExports {
       return new CIDRMatch(privilege,
           new SubnetUtils(pair[0], pair[1]).getInfo());
     } else if (host.contains("*") || host.contains("?") || host.contains("[")
-        || host.contains("]")) {
+        || host.contains("]") || host.contains("(") || host.contains(")")) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Using Regex match for '" + host + "' and " + privilege);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b44e8df/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
index 349e82a..542975d 100644
--- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
+++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java
@@ -23,8 +23,8 @@ import org.junit.Test;
 
 public class TestNfsExports {
 
-  private final String address1 = "192.168.0.1";
-  private final String address2 = "10.0.0.1";
+  private final String address1 = "192.168.0.12";
+  private final String address2 = "10.0.0.12";
   private final String hostname1 = "a.b.com";
   private final String hostname2 = "a.b.org";
   
@@ -165,6 +165,24 @@ public class TestNfsExports {
   }
   
   @Test
+  public void testRegexGrouping() {
+    NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod,
+        "192.168.0.(12|34)");
+    Assert.assertEquals(AccessPrivilege.READ_ONLY,
+        matcher.getAccessPrivilege(address1, hostname1));
+    // address1 will hit the cache
+    Assert.assertEquals(AccessPrivilege.READ_ONLY,
+        matcher.getAccessPrivilege(address1, hostname2));
+
+    matcher = new NfsExports(CacheSize, ExpirationPeriod, "\\w*.a.b.com");
+    Assert.assertEquals(AccessPrivilege.READ_ONLY,
+        matcher.getAccessPrivilege("1.2.3.4", "web.a.b.com"));
+    // address "1.2.3.4" will hit the cache
+    Assert.assertEquals(AccessPrivilege.READ_ONLY,
+        matcher.getAccessPrivilege("1.2.3.4", "email.a.b.org"));
+  }
+  
+  @Test
   public void testMultiMatchers() throws Exception {
     long shortExpirationPeriod = 1 * 1000 * 1000 * 1000; // 1s
     NfsExports matcher = new NfsExports(CacheSize, shortExpirationPeriod, 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b44e8df/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e82c4c4..8c99876 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1232,6 +1232,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now()
     (Vinayakumar B via kihwal)
 
+    HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts (brandonli)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b44e8df/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
index e6666d4..b7e1733 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsNfsGateway.md
@@ -144,10 +144,12 @@ It's strongly recommended for the users to update a few configuration properties
 *   By default, the export can be mounted by any client. To better control the access,
     users can update the following property. The value string contains machine name and
     access privilege, separated by whitespace
-    characters. The machine name format can be a single host, a Java regular expression, or an IPv4 address. The access
+    characters. The machine name format can be a single host, a "*", a Java regular expression, or an IPv4 address. The access
     privilege uses rw or ro to specify read/write or read-only access of the machines to exports. If the access privilege is not provided, the default is read-only. Entries are separated by ";".
-    For example: "192.168.0.0/22 rw ; host.\*\\.example\\.com ; host1.test.org ro;". Only the NFS gateway needs to restart after
-    this property is updated.
+    For example: "192.168.0.0/22 rw ; \\\\w\*\\\\.example\\\\.com ; host1.test.org ro;". Only the NFS gateway needs to restart after
+    this property is updated. Note that, here Java regular expression is differnt with the regrulation expression used in 
+    Linux NFS export table, such as, using "\\\\w\*\\\\.example\\\\.com" instead of "\*.example.com", "192\\\\.168\\\\.0\\\\.(11|22)"
+    instead of "192.168.0.[11|22]" and so on.  
 
         <property>
           <name>nfs.exports.allowed.hosts</name>


[46/50] [abbrv] hadoop git commit: YARN-3400. [JDK 8] Build Failure due to unreported exceptions in RPCUtil (rkanter)

Posted by zj...@apache.org.
YARN-3400. [JDK 8] Build Failure due to unreported exceptions in RPCUtil (rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/555cd967
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/555cd967
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/555cd967

Branch: refs/heads/YARN-2928
Commit: 555cd96772dcb4bed5357f858db17e4ca95acf91
Parents: bd1081b
Author: Robert Kanter <rk...@apache.org>
Authored: Thu Mar 26 11:00:20 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:49 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +++
 .../org/apache/hadoop/yarn/ipc/RPCUtil.java     | 23 ++++++++++++++++----
 2 files changed, 22 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/555cd967/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bbeb0d8..cd39b1a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -158,6 +158,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3383. AdminService should use "warn" instead of "info" to log exception 
     when operation fails. (Li Lu via wangda)
 
+    YARN-3400. [JDK 8] Build Failure due to unreported exceptions in
+    RPCUtil (rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/555cd967/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
index ada0669..1086ff0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
@@ -70,6 +70,21 @@ public class RPCUtil {
     }
   }
 
+  private static <T extends YarnException> T instantiateYarnException(
+      Class<? extends T> cls, RemoteException re) throws RemoteException {
+    return instantiateException(cls, re);
+  }
+
+  private static <T extends IOException> T instantiateIOException(
+      Class<? extends T> cls, RemoteException re) throws RemoteException {
+    return instantiateException(cls, re);
+  }
+
+  private static <T extends RuntimeException> T instantiateRuntimeException(
+      Class<? extends T> cls, RemoteException re) throws RemoteException {
+    return instantiateException(cls, re);
+  }
+
   /**
    * Utility method that unwraps and returns appropriate exceptions.
    * 
@@ -94,17 +109,17 @@ public class RPCUtil {
           // Assume this to be a new exception type added to YARN. This isn't
           // absolutely correct since the RPC layer could add an exception as
           // well.
-          throw instantiateException(YarnException.class, re);
+          throw instantiateYarnException(YarnException.class, re);
         }
 
         if (YarnException.class.isAssignableFrom(realClass)) {
-          throw instantiateException(
+          throw instantiateYarnException(
               realClass.asSubclass(YarnException.class), re);
         } else if (IOException.class.isAssignableFrom(realClass)) {
-          throw instantiateException(realClass.asSubclass(IOException.class),
+          throw instantiateIOException(realClass.asSubclass(IOException.class),
               re);
         } else if (RuntimeException.class.isAssignableFrom(realClass)) {
-          throw instantiateException(
+          throw instantiateRuntimeException(
               realClass.asSubclass(RuntimeException.class), re);
         } else {
           throw re;


[30/50] [abbrv] hadoop git commit: HADOOP-11014. Potential resource leak in JavaKeyStoreProvider due to unclosed stream. (ozawa)

Posted by zj...@apache.org.
HADOOP-11014. Potential resource leak in JavaKeyStoreProvider due to unclosed stream. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7971742d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7971742d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7971742d

Branch: refs/heads/YARN-2928
Commit: 7971742d6bad5521dcbd6f4cded14400cdd63068
Parents: 86682fb
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 25 16:59:40 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt      |  3 +++
 .../hadoop/crypto/key/JavaKeyStoreProvider.java      | 15 ++++++++-------
 .../hadoop/security/alias/JavaKeyStoreProvider.java  | 15 +++++++--------
 3 files changed, 18 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7971742d/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index e3cadf5..0c311df 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1142,6 +1142,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11609. Correct credential commands info in
     CommandsManual.html#credential. (Varun Saxena via ozawa)
 
+    HADOOP-11014. Potential resource leak in JavaKeyStoreProvider due to
+    unclosed stream. (ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7971742d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
index 091cab5..c6d60a3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java
@@ -22,6 +22,7 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -303,9 +304,11 @@ public class JavaKeyStoreProvider extends KeyProvider {
 
   private FsPermission loadFromPath(Path p, char[] password)
       throws IOException, NoSuchAlgorithmException, CertificateException {
-    FileStatus s = fs.getFileStatus(p);
-    keyStore.load(fs.open(p), password);
-    return s.getPermission();
+    try (FSDataInputStream in = fs.open(p)) {
+      FileStatus s = fs.getFileStatus(p);
+      keyStore.load(in, password);
+      return s.getPermission();
+    }
   }
 
   private Path constructNewPath(Path path) {
@@ -599,9 +602,8 @@ public class JavaKeyStoreProvider extends KeyProvider {
   }
 
   protected void writeToNew(Path newPath) throws IOException {
-    FSDataOutputStream out =
-        FileSystem.create(fs, newPath, permissions);
-    try {
+    try (FSDataOutputStream out =
+        FileSystem.create(fs, newPath, permissions);) {
       keyStore.store(out, password);
     } catch (KeyStoreException e) {
       throw new IOException("Can't store keystore " + this, e);
@@ -612,7 +614,6 @@ public class JavaKeyStoreProvider extends KeyProvider {
       throw new IOException(
           "Certificate exception storing keystore " + this, e);
     }
-    out.close();
   }
 
   protected boolean backupToOld(Path oldPath)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7971742d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
index 05958a0..5e5cebb 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
@@ -22,6 +22,7 @@ import org.apache.commons.io.Charsets;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -98,11 +99,8 @@ public class JavaKeyStoreProvider extends CredentialProvider {
         ClassLoader cl = Thread.currentThread().getContextClassLoader();
         URL pwdFile = cl.getResource(pwFile);
         if (pwdFile != null) {
-          InputStream is = pwdFile.openStream();
-          try {
+          try (InputStream is = pwdFile.openStream()) {
             password = IOUtils.toString(is).trim().toCharArray();
-          } finally {
-            is.close();
           }
         }
       }
@@ -110,6 +108,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
     if (password == null) {
       password = KEYSTORE_PASSWORD_DEFAULT.toCharArray();
     }
+
     try {
       keyStore = KeyStore.getInstance(SCHEME_NAME);
       if (fs.exists(path)) {
@@ -118,7 +117,9 @@ public class JavaKeyStoreProvider extends CredentialProvider {
         FileStatus s = fs.getFileStatus(path);
         permissions = s.getPermission();
 
-        keyStore.load(fs.open(path), password);
+        try (FSDataInputStream in = fs.open(path)) {
+          keyStore.load(in, password);
+        }
       } else {
         permissions = new FsPermission("700");
         // required to create an empty keystore. *sigh*
@@ -257,8 +258,7 @@ public class JavaKeyStoreProvider extends CredentialProvider {
         return;
       }
       // write out the keystore
-      FSDataOutputStream out = FileSystem.create(fs, path, permissions);
-      try {
+      try (FSDataOutputStream out = FileSystem.create(fs, path, permissions)) {
         keyStore.store(out, password);
       } catch (KeyStoreException e) {
         throw new IOException("Can't store keystore " + this, e);
@@ -268,7 +268,6 @@ public class JavaKeyStoreProvider extends CredentialProvider {
         throw new IOException("Certificate exception storing keystore " + this,
             e);
       }
-      out.close();
       changed = false;
     }
     finally {


[04/50] [abbrv] hadoop git commit: YARN-2868. FairScheduler: Metric for latency to allocate first container for an application. (Ray Chiang via kasha)

Posted by zj...@apache.org.
YARN-2868. FairScheduler: Metric for latency to allocate first container for an application. (Ray Chiang via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bf393bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bf393bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bf393bc

Branch: refs/heads/YARN-2928
Commit: 2bf393bc770dfdf9e424cce031c4d9a59c93ad56
Parents: bc9adb6
Author: Karthik Kambatla <ka...@apache.org>
Authored: Mon Mar 23 14:07:05 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                    |  3 +++
 .../resourcemanager/scheduler/QueueMetrics.java    |  8 +++++++-
 .../scheduler/SchedulerApplicationAttempt.java     | 17 +++++++++++++++++
 .../scheduler/fair/FairScheduler.java              | 11 ++++++++++-
 4 files changed, 37 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf393bc/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8ea93d3..b5f57b8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -123,6 +123,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3350. YARN RackResolver spams logs with messages at info level. 
     (Wilfred Spiegelenburg via junping_du)
 
+    YARN-2868. FairScheduler: Metric for latency to allocate first container 
+    for an application. (Ray Chiang via kasha)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf393bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
index 507b798..58b1ed1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterInt;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
+import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -74,6 +75,7 @@ public class QueueMetrics implements MetricsSource {
   @Metric("# of reserved containers") MutableGaugeInt reservedContainers;
   @Metric("# of active users") MutableGaugeInt activeUsers;
   @Metric("# of active applications") MutableGaugeInt activeApplications;
+  @Metric("App Attempt First Container Allocation Delay") MutableRate appAttemptFirstContainerAllocationDelay;
   private final MutableGaugeInt[] runningTime;
   private TimeBucketMetrics<ApplicationId> runBuckets;
 
@@ -462,7 +464,11 @@ public class QueueMetrics implements MetricsSource {
       parent.deactivateApp(user);
     }
   }
-  
+
+  public void addAppAttemptFirstContainerAllocationDelay(long latency) {
+    appAttemptFirstContainerAllocationDelay.add(latency);
+  }
+
   public int getAppsSubmitted() {
     return appsSubmitted.value();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf393bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
index 799a5c1..bf5641d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
@@ -25,6 +25,7 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.lang.time.DateUtils;
 import org.apache.commons.logging.Log;
@@ -93,6 +94,8 @@ public class SchedulerApplicationAttempt {
   private LogAggregationContext logAggregationContext;
   
   protected ResourceUsage attemptResourceUsage = new ResourceUsage();
+  private AtomicLong firstAllocationRequestSentTime = new AtomicLong(0);
+  private AtomicLong firstContainerAllocatedTime = new AtomicLong(0);
 
   protected List<RMContainer> newlyAllocatedContainers = 
       new ArrayList<RMContainer>();
@@ -648,4 +651,18 @@ public class SchedulerApplicationAttempt {
           Resources.clone(headroom));
     }
   }
+
+  public void recordContainerRequestTime(long value) {
+    firstAllocationRequestSentTime.compareAndSet(0, value);
+  }
+
+  public void recordContainerAllocationTime(long value) {
+    if (firstContainerAllocatedTime.compareAndSet(0, value)) {
+      long timediff = firstContainerAllocatedTime.longValue() -
+          firstAllocationRequestSentTime.longValue();
+      if (timediff > 0) {
+        queue.getMetrics().addAppAttemptFirstContainerAllocationDelay(timediff);
+      }
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bf393bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 98a8de2..04c7f70 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -898,6 +898,9 @@ public class FairScheduler extends
         clusterResource, minimumAllocation, getMaximumResourceCapability(),
         incrAllocation);
 
+    // Record container allocation start time
+    application.recordContainerRequestTime(getClock().getTime());
+
     // Set amResource for this app
     if (!application.getUnmanagedAM() && ask.size() == 1
         && application.getLiveContainers().isEmpty()) {
@@ -931,7 +934,7 @@ public class FairScheduler extends
         LOG.debug("Preempting " + application.getPreemptionContainers().size()
             + " container(s)");
       }
-      
+
       Set<ContainerId> preemptionContainerIds = new HashSet<ContainerId>();
       for (RMContainer container : application.getPreemptionContainers()) {
         preemptionContainerIds.add(container.getContainerId());
@@ -940,6 +943,12 @@ public class FairScheduler extends
       application.updateBlacklist(blacklistAdditions, blacklistRemovals);
       ContainersAndNMTokensAllocation allocation =
           application.pullNewlyAllocatedContainersAndNMTokens();
+
+      // Record container allocation time
+      if (!(allocation.getContainerList().isEmpty())) {
+        application.recordContainerAllocationTime(getClock().getTime());
+      }
+
       Resource headroom = application.getHeadroom();
       application.setApplicationHeadroomForMetrics(headroom);
       return new Allocation(allocation.getContainerList(), headroom,


[50/50] [abbrv] hadoop git commit: HADOOP-11748. The secrets of auth cookies should not be specified in configuration in clear text. Contributed by Li Lu and Haohui Mai.

Posted by zj...@apache.org.
HADOOP-11748. The secrets of auth cookies should not be specified in configuration in clear text. Contributed by Li Lu and Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/526c90e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/526c90e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/526c90e0

Branch: refs/heads/YARN-2928
Commit: 526c90e0dcc3427f295693d3b4f277042c61218e
Parents: fee5961
Author: Haohui Mai <wh...@apache.org>
Authored: Thu Mar 26 16:29:36 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:50 2015 -0700

----------------------------------------------------------------------
 .../server/AuthenticationFilter.java            |   7 +-
 .../util/StringSignerSecretProvider.java        |  53 ------
 .../server/TestAuthenticationFilter.java        | 173 ++++++-------------
 .../util/StringSignerSecretProvider.java        |  55 ++++++
 .../util/StringSignerSecretProviderCreator.java |  33 ++++
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml  |   6 +
 .../hadoop/fs/http/server/TestHttpFSServer.java |   6 +-
 8 files changed, 157 insertions(+), 179 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index 43bb4b0..5c22fce 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -279,14 +279,11 @@ public class AuthenticationFilter implements Filter {
             = config.getProperty(SIGNER_SECRET_PROVIDER, null);
     // fallback to old behavior
     if (signerSecretProviderName == null) {
-      String signatureSecret = config.getProperty(SIGNATURE_SECRET, null);
       String signatureSecretFile = config.getProperty(
           SIGNATURE_SECRET_FILE, null);
-      // The precedence from high to low : file, inline string, random
+      // The precedence from high to low : file, random
       if (signatureSecretFile != null) {
         providerClassName = FileSignerSecretProvider.class.getName();
-      } else if (signatureSecret != null) {
-        providerClassName = StringSignerSecretProvider.class.getName();
       } else {
         providerClassName = RandomSignerSecretProvider.class.getName();
         randomSecret = true;
@@ -295,8 +292,6 @@ public class AuthenticationFilter implements Filter {
       if ("random".equals(signerSecretProviderName)) {
         providerClassName = RandomSignerSecretProvider.class.getName();
         randomSecret = true;
-      } else if ("string".equals(signerSecretProviderName)) {
-        providerClassName = StringSignerSecretProvider.class.getName();
       } else if ("file".equals(signerSecretProviderName)) {
         providerClassName = FileSignerSecretProvider.class.getName();
       } else if ("zookeeper".equals(signerSecretProviderName)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
deleted file mode 100644
index 57ddd37..0000000
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License. See accompanying LICENSE file.
- */
-package org.apache.hadoop.security.authentication.util;
-
-import java.nio.charset.Charset;
-import java.util.Properties;
-import javax.servlet.ServletContext;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-
-/**
- * A SignerSecretProvider that simply creates a secret based on a given String.
- */
-@InterfaceStability.Unstable
-@InterfaceAudience.Private
-public class StringSignerSecretProvider extends SignerSecretProvider {
-
-  private byte[] secret;
-  private byte[][] secrets;
-
-  public StringSignerSecretProvider() {}
-
-  @Override
-  public void init(Properties config, ServletContext servletContext,
-          long tokenValidity) throws Exception {
-    String signatureSecret = config.getProperty(
-            AuthenticationFilter.SIGNATURE_SECRET, null);
-    secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
-    secrets = new byte[][]{secret};
-  }
-
-  @Override
-  public byte[] getCurrentSecret() {
-    return secret;
-  }
-
-  @Override
-  public byte[][] getAllSecrets() {
-    return secrets;
-  }
-}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
index a03894b..26c10a9 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.util.Signer;
 import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
-import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -158,15 +158,15 @@ public class TestAuthenticationFilter {
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
-      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
+      Mockito.when(config.getInitParameter(
+          AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn(
           (new Long(TOKEN_VALIDITY_SEC)).toString());
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
+          new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+                                           AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
+      Mockito.when(context.getAttribute(AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+          .thenReturn(null);
       Mockito.when(config.getServletContext()).thenReturn(context);
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
@@ -179,27 +179,6 @@ public class TestAuthenticationFilter {
       filter.destroy();
     }
 
-    // string secret
-    filter = new AuthenticationFilter();
-    try {
-      FilterConfig config = Mockito.mock(FilterConfig.class);
-      Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
-      Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
-      Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
-                                 AuthenticationFilter.SIGNATURE_SECRET)).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
-      filter.init(config);
-      Assert.assertFalse(filter.isRandomSecret());
-      Assert.assertFalse(filter.isCustomSignerSecretProvider());
-    } finally {
-      filter.destroy();
-    }
-
     // custom secret as inline
     filter = new AuthenticationFilter();
     try {
@@ -278,11 +257,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_PATH)).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
       Assert.assertEquals(".foo.com", filter.getCookieDomain());
       Assert.assertEquals("/bar", filter.getCookiePath());
@@ -303,11 +278,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
       Assert.assertTrue(DummyAuthenticationHandler.init);
     } finally {
@@ -345,11 +316,7 @@ public class TestAuthenticationFilter {
       Mockito.when(config.getInitParameterNames()).thenReturn(
           new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
               AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
 
       filter.init(config);
       Assert.assertEquals(PseudoAuthenticationHandler.class, 
@@ -372,11 +339,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -392,6 +355,7 @@ public class TestAuthenticationFilter {
   @Test
   public void testGetToken() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
+
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       Mockito.when(config.getInitParameter("management.operation.return")).
@@ -404,21 +368,13 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      SignerSecretProvider secretProvider =
+          getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
-      Properties secretProviderProps = new Properties();
-      secretProviderProps.setProperty(
-              AuthenticationFilter.SIGNATURE_SECRET, "secret");
-      secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+
       Signer signer = new Signer(secretProvider);
       String tokenSigned = signer.sign(token.toString());
 
@@ -448,18 +404,14 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       AuthenticationToken token =
           new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, "secret");
@@ -500,17 +452,13 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, "secret");
@@ -536,6 +484,23 @@ public class TestAuthenticationFilter {
     }
   }
 
+  private static SignerSecretProvider getMockedServletContextWithStringSigner(
+      FilterConfig config) throws Exception {
+    Properties secretProviderProps = new Properties();
+    secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET,
+                                    "secret");
+    SignerSecretProvider secretProvider =
+        StringSignerSecretProviderCreator.newStringSignerSecretProvider();
+    secretProvider.init(secretProviderProps, null, TOKEN_VALIDITY_SEC);
+
+    ServletContext context = Mockito.mock(ServletContext.class);
+    Mockito.when(context.getAttribute(
+            AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
+            .thenReturn(secretProvider);
+    Mockito.when(config.getServletContext()).thenReturn(context);
+    return secretProvider;
+  }
+
   @Test
   public void testDoFilterNotAuthenticated() throws Exception {
     AuthenticationFilter filter = new AuthenticationFilter();
@@ -549,11 +514,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -603,11 +564,7 @@ public class TestAuthenticationFilter {
             AuthenticationFilter.AUTH_TOKEN_VALIDITY,
             AuthenticationFilter.SIGNATURE_SECRET, "management.operation" +
             ".return", "expired.token")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+    getMockedServletContextWithStringSigner(config);
 
     if (withDomainPath) {
       Mockito.when(config.getInitParameter(AuthenticationFilter
@@ -661,8 +618,8 @@ public class TestAuthenticationFilter {
         Mockito.verify(chain).doFilter(Mockito.any(ServletRequest.class),
                 Mockito.any(ServletResponse.class));
 
-        StringSignerSecretProvider secretProvider
-                = new StringSignerSecretProvider();
+        SignerSecretProvider secretProvider =
+            StringSignerSecretProviderCreator.newStringSignerSecretProvider();
         Properties secretProviderProps = new Properties();
         secretProviderProps.setProperty(
                 AuthenticationFilter.SIGNATURE_SECRET, "secret");
@@ -734,11 +691,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -746,8 +699,8 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, "secret");
@@ -795,11 +748,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -863,11 +812,7 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -875,8 +820,8 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", DummyAuthenticationHandler.TYPE);
       token.setExpires(System.currentTimeMillis() - TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, secret);
@@ -942,11 +887,7 @@ public class TestAuthenticationFilter {
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         AuthenticationFilter.SIGNATURE_SECRET,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -954,8 +895,8 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "invalidtype");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, secret);
@@ -989,11 +930,7 @@ public class TestAuthenticationFilter {
         new Vector<String>(
           Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                         "management.operation.return")).elements());
-      ServletContext context = Mockito.mock(ServletContext.class);
-      Mockito.when(context.getAttribute(
-              AuthenticationFilter.SIGNER_SECRET_PROVIDER_ATTRIBUTE))
-              .thenReturn(null);
-      Mockito.when(config.getServletContext()).thenReturn(context);
+      getMockedServletContextWithStringSigner(config);
       filter.init(config);
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -1013,8 +950,8 @@ public class TestAuthenticationFilter {
 
       AuthenticationToken token = new AuthenticationToken("u", "p", "t");
       token.setExpires(System.currentTimeMillis() + TOKEN_VALIDITY_SEC);
-      StringSignerSecretProvider secretProvider
-              = new StringSignerSecretProvider();
+      SignerSecretProvider secretProvider =
+          StringSignerSecretProviderCreator.newStringSignerSecretProvider();
       Properties secretProviderProps = new Properties();
       secretProviderProps.setProperty(
               AuthenticationFilter.SIGNATURE_SECRET, "secret");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
new file mode 100644
index 0000000..7e5b10e
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java
@@ -0,0 +1,55 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import java.nio.charset.Charset;
+import java.util.Properties;
+import javax.servlet.ServletContext;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
+
+/**
+ * A SignerSecretProvider that simply creates a secret based on a given String.
+ */
+@InterfaceStability.Unstable
+@VisibleForTesting
+class StringSignerSecretProvider extends SignerSecretProvider {
+
+  private byte[] secret;
+  private byte[][] secrets;
+
+  public StringSignerSecretProvider() {}
+
+  @Override
+  public void init(Properties config, ServletContext servletContext,
+          long tokenValidity) throws Exception {
+    String signatureSecret = config.getProperty(
+            AuthenticationFilter.SIGNATURE_SECRET, null);
+    secret = signatureSecret.getBytes(Charset.forName("UTF-8"));
+    secrets = new byte[][]{secret};
+  }
+
+  @Override
+  public byte[] getCurrentSecret() {
+    return secret;
+  }
+
+  @Override
+  public byte[][] getAllSecrets() {
+    return secrets;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java
new file mode 100644
index 0000000..e567e7b
--- /dev/null
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProviderCreator.java
@@ -0,0 +1,33 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.util;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Helper class for creating StringSignerSecretProviders in unit tests
+ */
+@InterfaceStability.Unstable
+@VisibleForTesting
+public class StringSignerSecretProviderCreator {
+  /**
+   * @return a new StringSignerSecretProvider
+   * @throws Exception
+   */
+  public static StringSignerSecretProvider newStringSignerSecretProvider()
+      throws Exception {
+    return new StringSignerSecretProvider();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 40b4f84..e739a8f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1163,6 +1163,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11738. Fix a link of Protocol Buffers 2.5 for download in BUILDING.txt.
     (ozawa)
 
+    HADOOP-11748. The secrets of auth cookies should not be specified in
+    configuration in clear text. (Li Lu and Haohui Mai via wheat9)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index ddc6033..520e30f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -196,6 +196,12 @@
       <type>test-jar</type>
     </dependency>
     <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-auth</artifactId>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
       <groupId>log4j</groupId>
       <artifactId>log4j</artifactId>
       <scope>compile</scope>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/526c90e0/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
index 763d168..14b7a43 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
@@ -18,6 +18,8 @@
 package org.apache.hadoop.fs.http.server;
 
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
+import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
 import org.apache.hadoop.security.token.delegation.web.DelegationTokenAuthenticator;
 import org.apache.hadoop.security.token.delegation.web.KerberosDelegationTokenAuthenticationHandler;
 import org.json.simple.JSONArray;
@@ -68,7 +70,6 @@ import org.mortbay.jetty.webapp.WebAppContext;
 import com.google.common.collect.Maps;
 import java.util.Properties;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
-import org.apache.hadoop.security.authentication.util.StringSignerSecretProvider;
 
 public class TestHttpFSServer extends HFSTestCase {
 
@@ -687,7 +688,8 @@ public class TestHttpFSServer extends HFSTestCase {
       new AuthenticationToken("u", "p",
           new KerberosDelegationTokenAuthenticationHandler().getType());
     token.setExpires(System.currentTimeMillis() + 100000000);
-    StringSignerSecretProvider secretProvider = new StringSignerSecretProvider();
+    SignerSecretProvider secretProvider =
+        StringSignerSecretProviderCreator.newStringSignerSecretProvider();
     Properties secretProviderProps = new Properties();
     secretProviderProps.setProperty(AuthenticationFilter.SIGNATURE_SECRET, "secret");
     secretProvider.init(secretProviderProps, null, -1);


[32/50] [abbrv] hadoop git commit: YARN-3397. yarn rmadmin should skip -failover. (J.Andreina via kasha)

Posted by zj...@apache.org.
YARN-3397. yarn rmadmin should skip -failover. (J.Andreina via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13502621
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13502621
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13502621

Branch: refs/heads/YARN-2928
Commit: 1350262180bc4884905078e2ee0aa27c886495d3
Parents: 28b129a
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Mar 25 07:42:27 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                  |  2 ++
 .../hadoop/yarn/client/cli/RMAdminCLI.java       |  5 +++--
 .../hadoop/yarn/client/cli/TestRMAdminCLI.java   | 19 +++++++++----------
 3 files changed, 14 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/13502621/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index d84948d..bbeb0d8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -129,6 +129,8 @@ Release 2.8.0 - UNRELEASED
     YARN-2868. FairScheduler: Metric for latency to allocate first container 
     for an application. (Ray Chiang via kasha)
 
+    YARN-3397. yarn rmadmin should skip -failover. (J.Andreina via kasha)
+
   OPTIMIZATIONS
 
     YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13502621/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
index 4642add..420eeb0 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java
@@ -134,7 +134,8 @@ public class RMAdminCLI extends HAAdmin {
 
   private static void appendHAUsage(final StringBuilder usageBuilder) {
     for (Map.Entry<String,UsageInfo> cmdEntry : USAGE.entrySet()) {
-      if (cmdEntry.getKey().equals("-help")) {
+      if (cmdEntry.getKey().equals("-help")
+          || cmdEntry.getKey().equals("-failover")) {
         continue;
       }
       UsageInfo usageInfo = cmdEntry.getValue();
@@ -225,7 +226,7 @@ public class RMAdminCLI extends HAAdmin {
     }
     if (isHAEnabled) {
       for (String cmdKey : USAGE.keySet()) {
-        if (!cmdKey.equals("-help")) {
+        if (!cmdKey.equals("-help") && !cmdKey.equals("-failover")) {
           buildHelpMsg(cmdKey, helpBuilder);
           helpBuilder.append("\n");
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/13502621/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
index c22494c..6067110 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java
@@ -364,16 +364,15 @@ public class TestRMAdminCLI {
       assertEquals(0, rmAdminCLIWithHAEnabled.run(args));
       oldOutPrintStream.println(dataOut);
       String expectedHelpMsg = 
-          "yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper" +
-              "UserGroupsConfiguration] [-refreshUserToGroupsMappings] " +
-              "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup" +
-              " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]" +
-              " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode " +
-              "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] " +
-              "[-transitionToActive [--forceactive] <serviceId>] " + 
-              "[-transitionToStandby <serviceId>] [-failover" +
-              " [--forcefence] [--forceactive] <serviceId> <serviceId>] " +
-              "[-getServiceState <serviceId>] [-checkHealth <serviceId>] [-help [cmd]]";
+          "yarn rmadmin [-refreshQueues] [-refreshNodes] [-refreshSuper"
+              + "UserGroupsConfiguration] [-refreshUserToGroupsMappings] "
+              + "[-refreshAdminAcls] [-refreshServiceAcl] [-getGroup"
+              + " [username]] [[-addToClusterNodeLabels [label1,label2,label3]]"
+              + " [-removeFromClusterNodeLabels [label1,label2,label3]] [-replaceLabelsOnNode "
+              + "[node1[:port]=label1,label2 node2[:port]=label1] [-directlyAccessNodeLabelStore]] "
+              + "[-transitionToActive [--forceactive] <serviceId>] "
+              + "[-transitionToStandby <serviceId>] "
+              + "[-getServiceState <serviceId>] [-checkHealth <serviceId>] [-help [cmd]]";
       String actualHelpMsg = dataOut.toString();
       assertTrue(String.format("Help messages: %n " + actualHelpMsg + " %n doesn't include expected " +
           "messages: %n" + expectedHelpMsg), actualHelpMsg.contains(expectedHelpMsg


[05/50] [abbrv] hadoop git commit: YARN-3241. FairScheduler handles invalid queue names inconsistently. (Zhihai Xu via kasha)

Posted by zj...@apache.org.
YARN-3241. FairScheduler handles invalid queue names inconsistently. (Zhihai Xu via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc9adb6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc9adb6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc9adb6b

Branch: refs/heads/YARN-2928
Commit: bc9adb6baa2fffa1fdbd0b66ea7008b4f49d0e01
Parents: 9a879a9
Author: Karthik Kambatla <ka...@apache.org>
Authored: Mon Mar 23 13:22:03 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../fair/AllocationFileLoaderService.java       |  8 +-
 .../scheduler/fair/FairScheduler.java           |  2 +
 .../fair/InvalidQueueNameException.java         | 39 ++++++++++
 .../scheduler/fair/QueueManager.java            | 16 ++++
 .../fair/TestAllocationFileLoaderService.java   | 25 ++++++-
 .../scheduler/fair/TestFairScheduler.java       | 78 ++++++++++++++++++++
 .../scheduler/fair/TestQueueManager.java        | 13 +++-
 8 files changed, 181 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 90d906b..8ea93d3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -144,6 +144,9 @@ Release 2.8.0 - UNRELEASED
     YARN-3269. Yarn.nodemanager.remote-app-log-dir could not be configured to 
     fully qualified path. (Xuan Gong via junping_du)
 
+    YARN-3241. FairScheduler handles "invalid" queue names inconsistently. 
+    (Zhihai Xu via kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
index 76fa588..dab6d9f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java
@@ -426,13 +426,19 @@ public class AllocationFileLoaderService extends AbstractService {
       Map<FSQueueType, Set<String>> configuredQueues,
       Set<String> reservableQueues)
       throws AllocationConfigurationException {
-    String queueName = element.getAttribute("name");
+    String queueName = element.getAttribute("name").trim();
 
     if (queueName.contains(".")) {
       throw new AllocationConfigurationException("Bad fair scheduler config "
           + "file: queue name (" + queueName + ") shouldn't contain period.");
     }
 
+    if (queueName.isEmpty()) {
+      throw new AllocationConfigurationException("Bad fair scheduler config "
+          + "file: queue name shouldn't be empty or "
+          + "consist only of whitespace.");
+    }
+
     if (parentName != null) {
       queueName = parentName + "." + queueName;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index 1d97983..98a8de2 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -701,6 +701,8 @@ public class FairScheduler extends
           appRejectMsg = queueName + " is not a leaf queue";
         }
       }
+    } catch (InvalidQueueNameException qne) {
+      appRejectMsg = qne.getMessage();
     } catch (IOException ioe) {
       appRejectMsg = "Error assigning app to queue " + queueName;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/InvalidQueueNameException.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/InvalidQueueNameException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/InvalidQueueNameException.java
new file mode 100644
index 0000000..fc5ba16
--- /dev/null
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/InvalidQueueNameException.java
@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * Thrown when Queue Name is malformed.
+ */
+@Private
+@Unstable
+public class InvalidQueueNameException extends IllegalArgumentException {
+  private static final long serialVersionUID = -7306320927804540011L;
+
+  public InvalidQueueNameException(String message) {
+    super(message);
+  }
+
+  public InvalidQueueNameException(String message, Throwable t) {
+    super(message, t);
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
index 27e571e..64442ab 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/QueueManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.xml.sax.SAXException;
 
+import com.google.common.annotations.VisibleForTesting;
 /**
  * Maintains a list of queues as well as scheduling parameters for each queue,
  * such as guaranteed share allocations, from the fair scheduler config file.
@@ -155,7 +156,13 @@ public class QueueManager {
 
     // Move up the queue tree until we reach one that exists.
     while (sepIndex != -1) {
+      int prevSepIndex = sepIndex;
       sepIndex = name.lastIndexOf('.', sepIndex-1);
+      String node = name.substring(sepIndex+1, prevSepIndex);
+      if (!isQueueNameValid(node)) {
+        throw new InvalidQueueNameException("Illegal node name at offset " +
+            (sepIndex+1) + " for queue name " + name);
+      }
       FSQueue queue;
       String curName = null;
       curName = name.substring(0, sepIndex);
@@ -401,4 +408,13 @@ public class QueueManager {
     // recursively
     rootQueue.updatePreemptionVariables();
   }
+
+  /**
+   * Check whether queue name is valid,
+   * return true if it is valid, otherwise return false.
+   */
+  @VisibleForTesting
+  boolean isQueueNameValid(String node) {
+    return !node.isEmpty() && node.equals(node.trim());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
index 3c166a5..b09573c 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestAllocationFileLoaderService.java
@@ -550,7 +550,30 @@ public class TestAllocationFileLoaderService {
     allocLoader.setReloadListener(confHolder);
     allocLoader.reloadAllocations();
   }
-  
+
+  /**
+   * Verify that you can't have the queue name with whitespace only in the
+   * allocations file.
+   */
+  @Test (expected = AllocationConfigurationException.class)
+  public void testQueueNameContainingOnlyWhitespace() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<queue name=\"      \">");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+
+    AllocationFileLoaderService allocLoader = new AllocationFileLoaderService();
+    allocLoader.init(conf);
+    ReloadListener confHolder = new ReloadListener();
+    allocLoader.setReloadListener(confHolder);
+    allocLoader.reloadAllocations();
+  }
 
   @Test
   public void testReservableQueue() throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 267fbc2..7600a35 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -4444,4 +4444,82 @@ public class TestFairScheduler extends FairSchedulerTestBase {
     assertEquals("Incorrect number of perf metrics", 1,
         collector.getRecords().size());
   }
+
+  @Test
+  public void testQueueNameWithTrailingSpace() throws Exception {
+    scheduler.init(conf);
+    scheduler.start();
+    scheduler.reinitialize(conf, resourceManager.getRMContext());
+
+    // only default queue
+    assertEquals(1, scheduler.getQueueManager().getLeafQueues().size());
+
+    // submit app with queue name "A"
+    ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1);
+    AppAddedSchedulerEvent appAddedEvent1 = new AppAddedSchedulerEvent(
+        appAttemptId1.getApplicationId(), "A", "user1");
+    scheduler.handle(appAddedEvent1);
+    // submission accepted
+    assertEquals(2, scheduler.getQueueManager().getLeafQueues().size());
+    assertNotNull(scheduler.getSchedulerApplications().get(appAttemptId1.
+        getApplicationId()));
+
+    AppAttemptAddedSchedulerEvent attempAddedEvent =
+        new AppAttemptAddedSchedulerEvent(appAttemptId1, false);
+    scheduler.handle(attempAddedEvent);
+    // That queue should have one app
+    assertEquals(1, scheduler.getQueueManager().getLeafQueue("A", true)
+        .getNumRunnableApps());
+    assertNotNull(scheduler.getSchedulerApp(appAttemptId1));
+
+    // submit app with queue name "A "
+    ApplicationAttemptId appAttemptId2 = createAppAttemptId(2, 1);
+    AppAddedSchedulerEvent appAddedEvent2 = new AppAddedSchedulerEvent(
+        appAttemptId2.getApplicationId(), "A ", "user1");
+    scheduler.handle(appAddedEvent2);
+    // submission rejected
+    assertEquals(2, scheduler.getQueueManager().getLeafQueues().size());
+    assertNull(scheduler.getSchedulerApplications().get(appAttemptId2.
+        getApplicationId()));
+    assertNull(scheduler.getSchedulerApp(appAttemptId2));
+
+    // submit app with queue name "B.C"
+    ApplicationAttemptId appAttemptId3 = createAppAttemptId(3, 1);
+    AppAddedSchedulerEvent appAddedEvent3 = new AppAddedSchedulerEvent(
+        appAttemptId3.getApplicationId(), "B.C", "user1");
+    scheduler.handle(appAddedEvent3);
+    // submission accepted
+    assertEquals(3, scheduler.getQueueManager().getLeafQueues().size());
+    assertNotNull(scheduler.getSchedulerApplications().get(appAttemptId3.
+        getApplicationId()));
+
+    attempAddedEvent =
+        new AppAttemptAddedSchedulerEvent(appAttemptId3, false);
+    scheduler.handle(attempAddedEvent);
+    // That queue should have one app
+    assertEquals(1, scheduler.getQueueManager().getLeafQueue("B.C", true)
+        .getNumRunnableApps());
+    assertNotNull(scheduler.getSchedulerApp(appAttemptId3));
+  }
+
+  @Test
+  public void testEmptyQueueNameInConfigFile() throws IOException {
+    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
+    // set empty queue name
+    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
+    out.println("<?xml version=\"1.0\"?>");
+    out.println("<allocations>");
+    out.println("<queue name=\"\">");
+    out.println("</queue>");
+    out.println("</allocations>");
+    out.close();
+    try {
+      scheduler.init(conf);
+      Assert.fail("scheduler init should fail because" +
+          " empty queue name.");
+    } catch (Exception e) {
+      Assert.assertTrue(e.getMessage().contains(
+          "Failed to initialize FairScheduler"));
+    }
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc9adb6b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
index ef0ec7e..b3ed542 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestQueueManager.java
@@ -123,7 +123,18 @@ public class TestQueueManager {
     assertTrue(queueManager.getParentQueue("root.queue1", false)
         .getChildQueues().isEmpty());
   }
-  
+
+  @Test
+  public void testCheckQueueNodeName() {
+    assertFalse(queueManager.isQueueNameValid(""));
+    assertFalse(queueManager.isQueueNameValid("  "));
+    assertFalse(queueManager.isQueueNameValid(" a"));
+    assertFalse(queueManager.isQueueNameValid("a "));
+    assertFalse(queueManager.isQueueNameValid(" a "));
+    assertTrue(queueManager.isQueueNameValid("a b"));
+    assertTrue(queueManager.isQueueNameValid("a"));
+  }
+
   private void updateConfiguredLeafQueues(QueueManager queueMgr, String... confLeafQueues) {
     AllocationConfiguration allocConf = new AllocationConfiguration(conf);
     allocConf.configuredQueues.get(FSQueueType.LEAF).addAll(Sets.newHashSet(confLeafQueues));


[31/50] [abbrv] hadoop git commit: MAPREDUCE-579. Streaming slowmatch documentation.

Posted by zj...@apache.org.
MAPREDUCE-579. Streaming slowmatch documentation.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b51b3662
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b51b3662
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b51b3662

Branch: refs/heads/YARN-2928
Commit: b51b36626316b1cba2bf5c991473b2e26c29685c
Parents: 89760de
Author: Harsh J <ha...@cloudera.com>
Authored: Wed Mar 25 14:38:12 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt                          | 2 ++
 .../hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm  | 7 +++++++
 2 files changed, 9 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51b3662/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 2b16c30..f81a13f 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -256,6 +256,8 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    MAPREDUCE-579. Streaming "slowmatch" documentation. (harsh)
+
     MAPREDUCE-6287. Deprecated methods in org.apache.hadoop.examples.Sort
     (Chao Zhang via harsh)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b51b3662/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
----------------------------------------------------------------------
diff --git a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
index b4c5e38..7f2412e 100644
--- a/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
+++ b/hadoop-tools/hadoop-streaming/src/site/markdown/HadoopStreaming.md.vm
@@ -546,6 +546,13 @@ You can use the record reader StreamXmlRecordReader to process XML documents.
 
 Anything found between BEGIN\_STRING and END\_STRING would be treated as one record for map tasks.
 
+The name-value properties that StreamXmlRecordReader understands are:
+
+*   (strings) 'begin' - Characters marking beginning of record, and 'end' - Characters marking end of record.
+*   (boolean) 'slowmatch' - Toggle to look for begin and end characters, but within CDATA instead of regular tags. Defaults to false.
+*   (integer) 'lookahead' - Maximum lookahead bytes to sync CDATA when using 'slowmatch', should be larger than 'maxrec'. Defaults to 2*'maxrec'.
+*   (integer) 'maxrec' - Maximum record size to read between each match during 'slowmatch'. Defaults to 50000 bytes.
+
 $H3 How do I update counters in streaming applications?
 
 A streaming process can use the stderr to emit counter information. `reporter:counter:<group>,<counter>,<amount>` should be sent to stderr to update the counter.


[36/50] [abbrv] hadoop git commit: Move HDFS-6353 to the trunk section in CHANGES.txt

Posted by zj...@apache.org.
Move HDFS-6353 to the trunk section in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e1416a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e1416a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e1416a7

Branch: refs/heads/YARN-2928
Commit: 8e1416a7be024849952243498994627e32215578
Parents: 88494be
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Mar 25 11:15:33 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e1416a7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cd2ca4c..8f1d5fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -24,8 +24,6 @@ Trunk (Unreleased)
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
 
-    HDFS-3689. Add support for variable length block. (jing9)
-
   IMPROVEMENTS
 
     HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
@@ -149,6 +147,8 @@ Trunk (Unreleased)
 
     HDFS-7460. Rewrite httpfs to use new shell framework (John Smith via aw)
 
+    HDFS-6353. Check and make checkpoint before stopping the NameNode. (jing9)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -339,8 +339,6 @@ Release 2.8.0 - UNRELEASED
 
     HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
 
-    HDFS-6353. Check and make checkpoint before stopping the NameNode. (jing9)
-
   OPTIMIZATIONS
 
   BUG FIXES
@@ -385,6 +383,8 @@ Release 2.7.0 - UNRELEASED
     HDFS-6133. Add a feature for replica pinning so that a pinned replica
     will not be moved by Balancer/Mover.  (zhaoyunjiong via szetszwo)
 
+    HDFS-3689. Add support for variable length block. (jing9)
+
     HDFS-7584. Enable Quota Support for Storage Types (See breakdown of
     tasks below)
 


[16/50] [abbrv] hadoop git commit: HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.

Posted by zj...@apache.org.
HDFS-7875. Improve log message when wrong value configured for dfs.datanode.failed.volumes.tolerated. Contributed by Nijel.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7f43b8ca
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7f43b8ca
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7f43b8ca

Branch: refs/heads/YARN-2928
Commit: 7f43b8cafa28fd779d17554015bdfc9979349bc0
Parents: 1141b15
Author: Harsh J <ha...@cloudera.com>
Authored: Tue Mar 24 23:03:30 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:45 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 4 ++++
 .../hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java     | 6 ++++--
 2 files changed, 8 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f43b8ca/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4f3937a..3725a03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -321,6 +321,10 @@ Release 2.8.0 - UNRELEASED
 
   IMPROVEMENTS
 
+    HDFS-7875. Improve log message when wrong value configured for
+    dfs.datanode.failed.volumes.tolerated.
+    (nijel via harsh)
+
     HDFS-2360. Ugly stacktrace when quota exceeds. (harsh)
 
     HDFS-7835. make initial sleeptime in locateFollowingBlock configurable for

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7f43b8ca/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index d42c00c..05c4871 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -276,8 +276,10 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     this.validVolsRequired = volsConfigured - volFailuresTolerated;
 
     if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
-      throw new DiskErrorException("Invalid volume failure "
-          + " config value: " + volFailuresTolerated);
+      throw new DiskErrorException("Invalid value configured for "
+          + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated
+          + ". Value configured is either less than 0 or >= "
+          + "to the number of configured volumes (" + volsConfigured + ").");
     }
     if (volsFailed > volFailuresTolerated) {
       throw new DiskErrorException("Too many failed volumes - "


[25/50] [abbrv] hadoop git commit: HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.

Posted by zj...@apache.org.
HDFS-7713. Implement mkdirs in the HDFS Web UI. Contributed by Ravi Prakash.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8ec988c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8ec988c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8ec988c

Branch: refs/heads/YARN-2928
Commit: f8ec988c93e50c8830d351c2d037a9be1f0c02f8
Parents: cb2eb77
Author: Haohui Mai <wh...@apache.org>
Authored: Tue Mar 24 15:48:52 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:46 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 +
 .../src/main/webapps/hdfs/explorer.html         | 53 ++++++++++++++++++--
 .../src/main/webapps/hdfs/explorer.js           | 22 ++++++++
 3 files changed, 72 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ec988c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4ec0891..5ade5fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -335,6 +335,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-7854. Separate class DataStreamer out of DFSOutputStream. (Li Bo via
     jing9)
 
+    HDFS-7713. Implement mkdirs in the HDFS Web UI. (Ravi Prakash via wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ec988c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
index 7b34044..cd6623c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html
@@ -87,13 +87,56 @@
 	<button type="button" class="close" onclick="$('#alert-panel').hide();">&times;</button>
 	<div class="alert-body" id="alert-panel-body"></div>
       </div>
+
+    <div class="modal" id="btn-create-directory" tabindex="-1" role="dialog"
+      aria-hidden="true">
+      <div class="modal-dialog">
+        <div class="modal-content">
+          <div class="modal-header">
+            <button type="button" class="close"
+              data-dismiss="modal" aria-hidden="true">&times;</button>
+            <h4 class="modal-title">Create Directory</h4>
+          </div>
+          <div class="modal-body">
+            <div class="form-group">
+              <div class="input-group">
+                <span class="input-group-addon" id="new_directory_pwd"></span>
+                <input type="text" class="form-control" id="new_directory"
+                  placeholder="New Directory Name" />
+              </div>
+            </div>
+          </div>
+          <div class="modal-footer">
+            <button type="button" class="btn" data-dismiss="modal">Cancel</button>
+            <button type="button" class="btn btn-success"
+              id="btn-create-directory-send" data-complete-text="Creating...">
+              Create
+            </button>
+          </div>
+        </div>
+      </div>
+    </div>
+
       <div class="row">
-	<form onsubmit="return false;">
-	  <div class="input-group"><input type="text" class="form-control" id=
-					  "directory" /> <span class="input-group-btn"><button class="btn btn-default"
-											       type="submit" id="btn-nav-directory"><span class="input-group-btn">Go!</span></button></span></div>
-	</form>
+      <div class="col-xs-11">
+        <form onsubmit="return false;">
+          <div class="input-group">
+            <input type="text" class="form-control" id="directory"/>
+            <span class="input-group-btn">
+              <button class="btn btn-default" type="button" id="btn-nav-directory">Go!</button>
+            </span>
+          </div>
+        </form>
+      </div>
+      <div class="col-xs-1">
+        <button type="button" class="btn btn-default" data-toggle="modal"
+          aria-label="New Directory" data-target="#btn-create-directory"
+          title="Create Directory">
+            <span class="glyphicon glyphicon-folder-open"></span>
+        </button>
       </div>
+    </div>
+
       <br />
       <div id="panel"></div>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8ec988c/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
index 131b2aa..5572880 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js
@@ -193,5 +193,27 @@
     }
   }
 
+  $('#btn-create-directory').on('show.bs.modal', function(event) {
+    var modal = $(this)
+    $('#new_directory_pwd').html(current_directory);
+  });
+
+  $('#btn-create-directory-send').click(function () {
+    $(this).prop('disabled', true);
+    $(this).button('complete');
+
+    var url = '/webhdfs/v1' + encode_path(append_path(current_directory,
+      $('#new_directory').val())) + '?op=MKDIRS';
+
+    $.ajax(url, { type: 'PUT' }
+    ).done(function(data) {
+      browse_directory(current_directory);
+    }).error(network_error_handler(url)
+     ).complete(function() {
+       $('#btn-create-directory').modal('hide');
+       $('#btn-create-directory-send').button('reset');
+    });
+  })
+
   init();
 })();


[33/50] [abbrv] hadoop git commit: HADOOP-11738. Fix a link of Protocol Buffers 2.5 for download in BUILDING.txt. (ozawa)

Posted by zj...@apache.org.
HADOOP-11738. Fix a link of Protocol Buffers 2.5 for download in BUILDING.txt. (ozawa)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89760de1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89760de1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89760de1

Branch: refs/heads/YARN-2928
Commit: 89760de1297dfa1f21c9a6179fd43b793365929c
Parents: 7971742
Author: Tsuyoshi Ozawa <oz...@apache.org>
Authored: Wed Mar 25 18:02:32 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:47 2015 -0700

----------------------------------------------------------------------
 BUILDING.txt                                    | 4 ++--
 hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++
 2 files changed, 5 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89760de1/BUILDING.txt
----------------------------------------------------------------------
diff --git a/BUILDING.txt b/BUILDING.txt
index c126c5e..02b8610 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -29,7 +29,7 @@ Installing required packages for clean install of Ubuntu 14.04 LTS Desktop:
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake zlib1g-dev pkg-config libssl-dev
 * ProtocolBuffer 2.5.0
-  $ wget https://protobuf.googlecode.com/svn/rc/protobuf-2.5.0.tar.gz
+  $ wget https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
   $ tar -zxvf protobuf-2.5.0.tar.gz
   $ cd protobuf-2.5.0.tar.gz
   $ ./configure
@@ -312,4 +312,4 @@ http://www.zlib.net/
 ----------------------------------------------------------------------------------
 Building distributions:
 
- * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]
\ No newline at end of file
+ * Build distribution with native code    : mvn package [-Pdist][-Pdocs][-Psrc][-Dtar]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89760de1/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0c311df..a01a201 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1145,6 +1145,9 @@ Release 2.7.0 - UNRELEASED
     HADOOP-11014. Potential resource leak in JavaKeyStoreProvider due to
     unclosed stream. (ozawa)
 
+    HADOOP-11738. Fix a link of Protocol Buffers 2.5 for download in BUILDING.txt.
+    (ozawa)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[42/50] [abbrv] hadoop git commit: HADOOP-11524. hadoop_do_classpath_subcommand throws a shellcheck warning. Contributed by Chris Nauroth.

Posted by zj...@apache.org.
HADOOP-11524. hadoop_do_classpath_subcommand throws a shellcheck warning. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/56b24ba0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/56b24ba0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/56b24ba0

Branch: refs/heads/YARN-2928
Commit: 56b24ba0a05d93502284b80682e8df0e0f9c203c
Parents: 8aec0d1
Author: cnauroth <cn...@apache.org>
Authored: Wed Mar 25 22:36:09 2015 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:48 2015 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt                  | 3 +++
 hadoop-common-project/hadoop-common/src/main/bin/hadoop          | 2 +-
 .../hadoop-common/src/main/bin/hadoop-functions.sh               | 4 ++--
 hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs                | 2 +-
 hadoop-mapreduce-project/bin/mapred                              | 2 +-
 hadoop-yarn-project/hadoop-yarn/bin/yarn                         | 2 +-
 6 files changed, 9 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2e26b0a..667a010 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -187,6 +187,9 @@ Trunk (Unreleased)
     HADOOP-10115. Exclude duplicate jars in hadoop package under different
     component's lib (Vinayakumar B via aw)
 
+    HADOOP-11524. hadoop_do_classpath_subcommand throws a shellcheck warning.
+    (cnauroth)
+
   BUG FIXES
 
     HADOOP-11473. test-patch says "-1 overall" even when all checks are +1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-common-project/hadoop-common/src/main/bin/hadoop
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
index 6003927..64c3c13 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop
@@ -125,7 +125,7 @@ case ${COMMAND} in
     CLASS=org.apache.hadoop.util.NativeLibraryChecker
   ;;
   classpath)
-    hadoop_do_classpath_subcommand "$@"
+    hadoop_do_classpath_subcommand CLASS "$@"
   ;;
   credential)
     CLASS=org.apache.hadoop.security.alias.CredentialShell

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index 8129c5c..616e706 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -1305,8 +1305,8 @@ function hadoop_verify_user
 
 function hadoop_do_classpath_subcommand
 {
-  if [[ "$#" -gt 0 ]]; then
-    CLASS=org.apache.hadoop.util.Classpath
+  if [[ "$#" -gt 1 ]]; then
+    eval "$1"=org.apache.hadoop.util.Classpath
   else
     hadoop_finalize
     echo "${CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index ececbb4..f464261 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -94,7 +94,7 @@ case ${COMMAND} in
     CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
   ;;
   classpath)
-    hadoop_do_classpath_subcommand "$@"
+    hadoop_do_classpath_subcommand CLASS "$@"
   ;;
   crypto)
     CLASS=org.apache.hadoop.hdfs.tools.CryptoAdmin

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-mapreduce-project/bin/mapred
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index d199128..5afe02e 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -78,7 +78,7 @@ case ${COMMAND} in
     HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
   ;;
   classpath)
-    hadoop_do_classpath_subcommand "$@" 
+    hadoop_do_classpath_subcommand CLASS "$@"
   ;;
   distcp)
     CLASS=org.apache.hadoop.tools.DistCp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/56b24ba0/hadoop-yarn-project/hadoop-yarn/bin/yarn
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn
index e6af4ae..fddee46 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn
@@ -82,7 +82,7 @@ case "${COMMAND}" in
     set -- "${COMMAND}" "$@"
   ;;
   classpath)
-    hadoop_do_classpath_subcommand "$@"
+    hadoop_do_classpath_subcommand CLASS "$@"
   ;;
   cluster)
     CLASS=org.apache.hadoop.yarn.client.cli.ClusterCLI


[11/50] [abbrv] hadoop git commit: HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp provided by the client is larger than the one stored in the datanode. Contributed by Brahma Reddy Battula

Posted by zj...@apache.org.
HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp provided by the client is larger than the one stored in the datanode.  Contributed by Brahma Reddy Battula


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524987c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524987c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524987c1

Branch: refs/heads/YARN-2928
Commit: 524987c1d7595842bf81ee70ba12f1f17185acc3
Parents: b4294b6
Author: Tsz-Wo Nicholas Sze <sz...@hortonworks.com>
Authored: Tue Mar 24 13:49:17 2015 +0900
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:44 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                   | 4 ++++
 .../org/apache/hadoop/hdfs/server/datanode/BlockSender.java   | 7 +++++++
 2 files changed, 11 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524987c1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b88b7e3..d2891e3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1237,6 +1237,10 @@ Release 2.7.0 - UNRELEASED
 
     HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts (brandonli)
 
+    HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp
+    provided by the client is larger than the one stored in the datanode.
+    (Brahma Reddy Battula via szetszwo)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/524987c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index f4cde11..e76b93a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -246,6 +246,13 @@ class BlockSender implements java.io.Closeable {
       if (replica.getGenerationStamp() < block.getGenerationStamp()) {
         throw new IOException("Replica gen stamp < block genstamp, block="
             + block + ", replica=" + replica);
+      } else if (replica.getGenerationStamp() > block.getGenerationStamp()) {
+        if (DataNode.LOG.isDebugEnabled()) {
+          DataNode.LOG.debug("Bumping up the client provided"
+              + " block's genstamp to latest " + replica.getGenerationStamp()
+              + " for block " + block);
+        }
+        block.setGenerationStamp(replica.getGenerationStamp());
       }
       if (replicaVisibleLength < 0) {
         throw new IOException("Replica is not readable, block="


[07/50] [abbrv] hadoop git commit: MAPREDUCE-6242. Progress report log is incredibly excessive in application master. Contributed by Varun Saxena.

Posted by zj...@apache.org.
MAPREDUCE-6242. Progress report log is incredibly excessive in application
master. Contributed by Varun Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/77e82eb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/77e82eb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/77e82eb8

Branch: refs/heads/YARN-2928
Commit: 77e82eb847bf45106979b869565e1b39d6eb4ec2
Parents: 5b44e8d
Author: Devaraj K <de...@apache.org>
Authored: Mon Mar 23 22:51:20 2015 +0530
Committer: Zhijie Shen <zj...@apache.org>
Committed: Thu Mar 26 23:29:43 2015 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |   3 +
 .../java/org/apache/hadoop/mapred/Task.java     |  13 +-
 .../apache/hadoop/mapreduce/MRJobConfig.java    |   5 +
 .../hadoop/mapred/TestTaskProgressReporter.java | 160 +++++++++++++++++++
 4 files changed, 177 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e82eb8/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 20505b6..b8a2a1c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -300,6 +300,9 @@ Release 2.8.0 - UNRELEASED
 
     MAPREDUCE-6281. Fix javadoc in Terasort. (Albert Chu via ozawa)
 
+    MAPREDUCE-6242. Progress report log is incredibly excessive in 
+    application master. (Varun Saxena via devaraj)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e82eb8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 7fa5d02..bf5ca22 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -229,6 +229,11 @@ abstract public class Task implements Writable, Configurable {
     gcUpdater = new GcTimeUpdater();
   }
 
+  @VisibleForTesting
+  void setTaskDone() {
+    taskDone.set(true);
+  }
+
   ////////////////////////////////////////////
   // Accessors
   ////////////////////////////////////////////
@@ -536,9 +541,6 @@ abstract public class Task implements Writable, Configurable {
   public abstract void run(JobConf job, TaskUmbilicalProtocol umbilical)
     throws IOException, ClassNotFoundException, InterruptedException;
 
-  /** The number of milliseconds between progress reports. */
-  public static final int PROGRESS_INTERVAL = 3000;
-
   private transient Progress taskProgress = new Progress();
 
   // Current counters
@@ -714,6 +716,9 @@ abstract public class Task implements Writable, Configurable {
       int remainingRetries = MAX_RETRIES;
       // get current flag value and reset it as well
       boolean sendProgress = resetProgressFlag();
+      long taskProgressInterval =
+          conf.getLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL,
+                       MRJobConfig.DEFAULT_TASK_PROGRESS_REPORT_INTERVAL);
       while (!taskDone.get()) {
         synchronized (lock) {
           done = false;
@@ -726,7 +731,7 @@ abstract public class Task implements Writable, Configurable {
             if (taskDone.get()) {
               break;
             }
-            lock.wait(PROGRESS_INTERVAL);
+            lock.wait(taskProgressInterval);
           }
           if (taskDone.get()) {
             break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e82eb8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index f0a6ddf..947c814 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -49,6 +49,11 @@ public interface MRJobConfig {
 
   public static final String TASK_CLEANUP_NEEDED = "mapreduce.job.committer.task.cleanup.needed";
 
+  public static final String TASK_PROGRESS_REPORT_INTERVAL =
+      "mapreduce.task.progress-report.interval";
+  /** The number of milliseconds between progress reports. */
+  public static final int DEFAULT_TASK_PROGRESS_REPORT_INTERVAL = 3000;
+
   public static final String JAR = "mapreduce.job.jar";
 
   public static final String ID = "mapreduce.job.id";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/77e82eb8/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java
new file mode 100644
index 0000000..0bceb87
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestTaskProgressReporter.java
@@ -0,0 +1,160 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapred;
+
+import java.io.IOException;
+
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.mapred.SortedRanges.Range;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.checkpoint.TaskCheckpointID;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestTaskProgressReporter {
+  private static int statusUpdateTimes = 0;
+  private FakeUmbilical fakeUmbilical = new FakeUmbilical();
+
+  private static class DummyTask extends Task {
+    @Override
+    public void run(JobConf job, TaskUmbilicalProtocol umbilical)
+        throws IOException, ClassNotFoundException, InterruptedException {
+    }
+
+    @Override
+    public boolean isMapTask() {
+      return true;
+    }
+  }
+
+  private static class FakeUmbilical implements TaskUmbilicalProtocol {
+    @Override
+    public long getProtocolVersion(String protocol, long clientVersion)
+        throws IOException {
+      return 0;
+    }
+
+    @Override
+    public ProtocolSignature getProtocolSignature(String protocol,
+        long clientVersion, int clientMethodsHash) throws IOException {
+      return null;
+    }
+
+    @Override
+    public JvmTask getTask(JvmContext context) throws IOException {
+      return null;
+    }
+
+    @Override
+    public AMFeedback statusUpdate(TaskAttemptID taskId,
+        TaskStatus taskStatus) throws IOException, InterruptedException {
+      statusUpdateTimes++;
+      AMFeedback feedback = new AMFeedback();
+      feedback.setTaskFound(true);
+      feedback.setPreemption(true);
+      return feedback;
+    }
+
+    @Override
+    public void reportDiagnosticInfo(TaskAttemptID taskid, String trace)
+        throws IOException {
+    }
+
+    @Override
+    public void reportNextRecordRange(TaskAttemptID taskid, Range range)
+        throws IOException {
+    }
+
+    @Override
+    public void done(TaskAttemptID taskid) throws IOException {
+    }
+
+    @Override
+    public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
+        throws IOException, InterruptedException {
+    }
+
+    @Override
+    public boolean canCommit(TaskAttemptID taskid) throws IOException {
+      return false;
+    }
+
+    @Override
+    public void shuffleError(TaskAttemptID taskId, String message)
+        throws IOException {
+    }
+
+    @Override
+    public void fsError(TaskAttemptID taskId, String message)
+        throws IOException {
+    }
+
+    @Override
+    public void fatalError(TaskAttemptID taskId, String message)
+        throws IOException {
+    }
+
+    @Override
+    public MapTaskCompletionEventsUpdate getMapCompletionEvents(
+        JobID jobId, int fromIndex, int maxLocs, TaskAttemptID id)
+        throws IOException {
+      return null;
+    }
+
+    @Override
+    public void preempted(TaskAttemptID taskId, TaskStatus taskStatus)
+        throws IOException, InterruptedException {
+    }
+
+    @Override
+    public TaskCheckpointID getCheckpointID(TaskID taskID) {
+      return null;
+    }
+
+    @Override
+    public void setCheckpointID(TaskID tid, TaskCheckpointID cid) {
+    }
+  }
+
+  private class DummyTaskReporter extends Task.TaskReporter {
+    public DummyTaskReporter(Task task) {
+      task.super(task.getProgress(), fakeUmbilical);
+    }
+    @Override
+    public void setProgress(float progress) {
+      super.setProgress(progress);
+    }
+  }
+
+  @Test (timeout=10000)
+  public void testTaskProgress() throws Exception {
+    JobConf job = new JobConf();
+    job.setLong(MRJobConfig.TASK_PROGRESS_REPORT_INTERVAL, 1000);
+    Task task = new DummyTask();
+    task.setConf(job);
+    DummyTaskReporter reporter = new DummyTaskReporter(task);
+    Thread t = new Thread(reporter);
+    t.start();
+    Thread.sleep(2100);
+    task.setTaskDone();
+    reporter.resetDoneFlag();
+    t.join();
+    Assert.assertEquals(statusUpdateTimes, 2);
+  }
+}
\ No newline at end of file