You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2014/08/28 00:23:28 UTC

[01/22] git commit: Add a section for 2.5.1 in CHANGES.txt

Repository: hadoop
Updated Branches:
  refs/heads/HDFS-6581 5ce8b6065 -> 8612590d8 (forced update)


Add a section for 2.5.1 in CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc154ab8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc154ab8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc154ab8

Branch: refs/heads/HDFS-6581
Commit: dc154ab86d694e68f54c635043eb55d501d0e242
Parents: e871955
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Aug 27 00:55:19 2014 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Aug 27 00:55:19 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt | 12 ++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     | 12 ++++++++++++
 hadoop-mapreduce-project/CHANGES.txt            | 12 ++++++++++++
 hadoop-yarn-project/CHANGES.txt                 | 12 ++++++++++++
 4 files changed, 48 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc154ab8/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0291c75..9242ca4 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -695,6 +695,18 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10989. Work around buggy getgrouplist() implementations on Linux that
     return 0 on failure. (cnauroth)
 
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc154ab8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 31ed15c..4e60c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -638,6 +638,18 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-4852. libhdfs documentation is out of date. (cnauroth)
 
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc154ab8/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index ddf21ed..4cd71c0 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -258,6 +258,18 @@ Release 2.6.0 - UNRELEASED
     MAPREDUCE-6044. Fully qualified intermediate done dir path breaks per-user dir
     creation on Windows. (zjshen)
 
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc154ab8/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 5b61b41..916816e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -237,6 +237,18 @@ Release 2.6.0 - UNRELEASED
     YARN-2434. RM should not recover containers from previously failed attempt
     when AM restart is not enabled (Jian He via jlowe)
 
+Release 2.5.1 - UNRELEASED
+
+  INCOMPATIBLE CHANGES
+
+  NEW FEATURES
+
+  IMPROVEMENTS
+
+  OPTIMIZATIONS
+
+  BUG FIXES
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES


[04/22] git commit: YARN-2035. FileSystemApplicationHistoryStore should not make working dir when it already exists. Contributed by Jonathan Eagles.

Posted by ar...@apache.org.
YARN-2035. FileSystemApplicationHistoryStore should not make working dir when it already exists. Contributed by Jonathan Eagles.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d778abf0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d778abf0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d778abf0

Branch: refs/heads/HDFS-6581
Commit: d778abf022b415c64223153814d4188c2b3dd797
Parents: d16bfd1
Author: Zhijie Shen <zj...@apache.org>
Authored: Wed Aug 27 02:01:00 2014 -0700
Committer: Zhijie Shen <zj...@apache.org>
Committed: Wed Aug 27 02:01:00 2014 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                 |  3 +
 .../FileSystemApplicationHistoryStore.java      | 14 ++++-
 .../TestFileSystemApplicationHistoryStore.java  | 62 +++++++++++++++++++-
 3 files changed, 75 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d778abf0/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index eefa547..36d304c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -240,6 +240,9 @@ Release 2.6.0 - UNRELEASED
     YARN-2434. RM should not recover containers from previously failed attempt
     when AM restart is not enabled (Jian He via jlowe)
 
+    YARN-2035. FileSystemApplicationHistoryStore should not make working dir
+    when it already exists. (Jonathan Eagles via zjshen)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d778abf0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
index a5725eb..a2d9140 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java
@@ -110,15 +110,23 @@ public class FileSystemApplicationHistoryStore extends AbstractService
     super(FileSystemApplicationHistoryStore.class.getName());
   }
 
+  protected FileSystem getFileSystem(Path path, Configuration conf) throws Exception {
+    return path.getFileSystem(conf);
+  }
+
   @Override
   public void serviceInit(Configuration conf) throws Exception {
     Path fsWorkingPath =
         new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI));
     rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME);
     try {
-      fs = fsWorkingPath.getFileSystem(conf);
-      fs.mkdirs(rootDirPath);
-      fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
+      fs = getFileSystem(fsWorkingPath, conf);
+
+      if (!fs.isDirectory(rootDirPath)) {
+        fs.mkdirs(rootDirPath);
+        fs.setPermission(rootDirPath, ROOT_DIR_UMASK);
+      }
+
     } catch (IOException e) {
       LOG.error("Error when initializing FileSystemHistoryStorage", e);
       throw e;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d778abf0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
index d31018c..552a5e5 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestFileSystemApplicationHistoryStore.java
@@ -20,9 +20,17 @@ package org.apache.hadoop.yarn.server.applicationhistoryservice;
 
 import java.io.IOException;
 import java.net.URI;
+import java.net.URISyntaxException;
 
 import org.junit.Assert;
 
+import static org.mockito.Mockito.any;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -53,6 +61,11 @@ public class TestFileSystemApplicationHistoryStore extends
   @Before
   public void setup() throws Exception {
     fs = new RawLocalFileSystem();
+    initStore(fs);
+  }
+
+  private void initStore(final FileSystem fs) throws IOException,
+      URISyntaxException {
     Configuration conf = new Configuration();
     fs.initialize(new URI("/"), conf);
     fsWorkingPath =
@@ -61,7 +74,12 @@ public class TestFileSystemApplicationHistoryStore extends
     fs.delete(fsWorkingPath, true);
     conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
       fsWorkingPath.toString());
-    store = new FileSystemApplicationHistoryStore();
+    store = new FileSystemApplicationHistoryStore() {
+      @Override
+      protected FileSystem getFileSystem(Path path, Configuration conf) {
+        return fs;
+      }
+    };
     store.init(conf);
     store.start();
   }
@@ -243,4 +261,46 @@ public class TestFileSystemApplicationHistoryStore extends
     testWriteHistoryData(3, false, true);
     testReadHistoryData(3, false, true);
   }
+
+  @Test
+  public void testInitExistingWorkingDirectoryInSafeMode() throws Exception {
+    LOG.info("Starting testInitExistingWorkingDirectoryInSafeMode");
+    tearDown();
+
+    // Setup file system to inject startup conditions
+    FileSystem fs = spy(new RawLocalFileSystem());
+    doReturn(true).when(fs).isDirectory(any(Path.class));
+
+    try {
+      initStore(fs);
+    } catch (Exception e) {
+      Assert.fail("Exception should not be thrown: " + e);
+    }
+
+    // Make sure that directory creation was not attempted
+    verify(fs, times(1)).isDirectory(any(Path.class));
+    verify(fs, times(0)).mkdirs(any(Path.class));
+  }
+
+  @Test
+  public void testInitNonExistingWorkingDirectoryInSafeMode() throws Exception {
+    LOG.info("Starting testInitNonExistingWorkingDirectoryInSafeMode");
+    tearDown();
+
+    // Setup file system to inject startup conditions
+    FileSystem fs = spy(new RawLocalFileSystem());
+    doReturn(false).when(fs).isDirectory(any(Path.class));
+    doThrow(new IOException()).when(fs).mkdirs(any(Path.class));
+
+    try {
+      initStore(fs);
+      Assert.fail("Exception should have been thrown");
+    } catch (Exception e) {
+      // Expected failure
+    }
+
+    // Make sure that directory creation was attempted
+    verify(fs, times(1)).isDirectory(any(Path.class));
+    verify(fs, times(1)).mkdirs(any(Path.class));
+  }
 }


[13/22] git commit: HADOOP-10746. HttpServer2 should not load JspServlet. Contributed by Haohui Mai.

Posted by ar...@apache.org.
HADOOP-10746. HttpServer2 should not load JspServlet. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdd3bc5f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdd3bc5f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdd3bc5f

Branch: refs/heads/HDFS-6581
Commit: fdd3bc5f45da615db4fd51cc07cb7d44c211150d
Parents: 26ebdd8
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Aug 27 13:26:25 2014 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Aug 27 13:26:25 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt         |  2 ++
 .../main/java/org/apache/hadoop/http/HttpServer2.java   | 12 ++++++++++++
 2 files changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdd3bc5f/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 45e38d3..b13cd79 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -323,6 +323,8 @@ Trunk (Unreleased)
 
     HADOOP-10996. Stop violence in the *_HOME (aw)
 
+    HADOOP-10748. HttpServer2 should not load JspServlet. (wheat9)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdd3bc5f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index f84ade0..8aa777b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -44,6 +44,7 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
+import com.google.common.collect.ImmutableMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -415,6 +416,17 @@ public final class HttpServer2 implements FilterContainer {
   private static WebAppContext createWebAppContext(String name,
       Configuration conf, AccessControlList adminsAcl, final String appDir) {
     WebAppContext ctx = new WebAppContext();
+    ctx.setDefaultsDescriptor(null);
+    ServletHolder holder = new ServletHolder(new DefaultServlet());
+    Map<String, String> params = ImmutableMap. <String, String> builder()
+            .put("acceptRanges", "true")
+            .put("dirAllowed", "false")
+            .put("gzip", "true")
+            .put("useFileMappedBuffer", "true")
+            .build();
+    holder.setInitParameters(params);
+    ctx.setWelcomeFiles(new String[] {"index.html"});
+    ctx.addServlet(holder, "/");
     ctx.setDisplayName(name);
     ctx.setContextPath("/");
     ctx.setWar(appDir + "/" + name);


[12/22] git commit: HDFS-6938. Cleanup javac warnings in FSNamesystem. Contributed by Charles Lamb.

Posted by ar...@apache.org.
HDFS-6938. Cleanup javac warnings in FSNamesystem. Contributed by Charles Lamb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/26ebdd84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/26ebdd84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/26ebdd84

Branch: refs/heads/HDFS-6581
Commit: 26ebdd849b23243b31e58c44d0d363e11b42fc52
Parents: cd9182d
Author: Haohui Mai <wh...@apache.org>
Authored: Wed Aug 27 11:10:30 2014 -0700
Committer: Haohui Mai <wh...@apache.org>
Committed: Wed Aug 27 11:14:39 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt               |  2 ++
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 10 ----------
 2 files changed, 2 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/26ebdd84/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8dd3ebe..99d5c01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -647,6 +647,8 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6892. Add XDR packaging method for each NFS request (brandonli)
 
+    HDFS-6938. Cleanup javac warnings in FSNamesystem (Charles Lamb via wheat9)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/26ebdd84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 81d5a22..6d750bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -106,7 +106,6 @@ import java.lang.management.ManagementFactory;
 import java.net.InetAddress;
 import java.net.URI;
 import java.security.GeneralSecurityException;
-import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -120,7 +119,6 @@ import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Condition;
 import java.util.concurrent.locks.ReentrantLock;
@@ -137,8 +135,6 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CipherSuite;
-import org.apache.hadoop.crypto.CryptoCodec;
-import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
@@ -182,7 +178,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneWithId;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@@ -543,9 +538,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   private final NNConf nnConf;
 
   private KeyProviderCryptoExtension provider = null;
-  private KeyProvider.Options providerOptions = null;
-
-  private final CryptoCodec codec;
 
   private volatile boolean imageLoaded = false;
   private final Condition cond;
@@ -772,8 +764,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     } else {
       LOG.info("Found KeyProvider: " + provider.toString());
     }
-    providerOptions = KeyProvider.options(conf);
-    this.codec = CryptoCodec.getInstance(conf);
     if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY,
                         DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
       LOG.info("Enabling async auditlog");


[02/22] git commit: Fix CHANGES.txt entry for MAPREDUCE-6033.

Posted by ar...@apache.org.
Fix CHANGES.txt entry for MAPREDUCE-6033.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d9b77d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d9b77d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d9b77d6

Branch: refs/heads/HDFS-6581
Commit: 2d9b77d6170fe38757f7f48a4492f17ac669cbc2
Parents: dc154ab
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Aug 27 01:16:52 2014 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Aug 27 01:16:52 2014 -0700

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d9b77d6/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 4cd71c0..a6d2981 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -270,6 +270,9 @@ Release 2.5.1 - UNRELEASED
 
   BUG FIXES
 
+    MAPREDUCE-6033. Updated access check for displaying job information 
+    (Yu Gao via Eric Yang)
+
 Release 2.5.0 - 2014-08-11
 
   INCOMPATIBLE CHANGES
@@ -351,9 +354,6 @@ Release 2.5.0 - 2014-08-11
 
   BUG FIXES 
 
-    MAPREDUCE-6033. Updated access check for displaying job information 
-    (Yu Gao via Eric Yang)
-
     MAPREDUCE-5759. Remove unnecessary conf load in Limits (Sandy Ryza)
 
     MAPREDUCE-5014. Extend Distcp to accept a custom CopyListing.


[05/22] git commit: HADOOP-11002. shell escapes are incompatible with previous releases (aw)

Posted by ar...@apache.org.
HADOOP-11002. shell escapes are incompatible with previous releases (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1618a2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1618a2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1618a2a

Branch: refs/heads/HDFS-6581
Commit: a1618a2a77ef241b23058809037f93ea00da9329
Parents: d778abf
Author: Allen Wittenauer <aw...@apache.org>
Authored: Tue Aug 26 14:40:46 2014 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 27 06:56:59 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../src/main/bin/hadoop-functions.sh            | 31 ++++++--------------
 2 files changed, 11 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1618a2a/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 9242ca4..2270df3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -319,6 +319,8 @@ Trunk (Unreleased)
     HADOOP-10925. Compilation fails in native link0 function on Windows.
     (cnauroth)
 
+    HADOOP-11002. shell escapes are incompatible with previous releases (aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1618a2a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index f2437fa..ab61b84 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -59,8 +59,7 @@ function hadoop_bootstrap_init
   TOOL_PATH=${TOOL_PATH:-${HADOOP_PREFIX}/share/hadoop/tools/lib/*}
 
   export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
-
-  
+ 
   # defaults
   export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
 }
@@ -94,7 +93,6 @@ function hadoop_exec_hadoopenv
   fi
 }
 
-
 function hadoop_basic_init
 {
   # Some of these are also set in hadoop-env.sh.
@@ -446,7 +444,6 @@ function hadoop_add_to_classpath_mapred
   hadoop_add_classpath "${HADOOP_MAPRED_HOME}/${MAPRED_DIR}"'/*'
 }
 
-
 function hadoop_add_to_classpath_userpath
 {
   # Add the user-specified HADOOP_CLASSPATH to the
@@ -551,7 +548,6 @@ function hadoop_java_setup
   fi
 }
 
-
 function hadoop_finalize_libpaths
 {
   if [[ -n "${JAVA_LIBRARY_PATH}" ]]; then
@@ -564,17 +560,14 @@ function hadoop_finalize_libpaths
 #
 # fill in any last minute options that might not have been defined yet
 #
-# Note that we are replacing ' ' with '\ ' so that directories with
-# spaces work correctly when run exec blah
-#
 function hadoop_finalize_hadoop_opts
 {
-  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX/ /\ }"
-  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.log.dir "-Dhadoop.log.dir=${HADOOP_LOG_DIR}"
+  hadoop_add_param HADOOP_OPTS hadoop.log.file "-Dhadoop.log.file=${HADOOP_LOGFILE}"
+  hadoop_add_param HADOOP_OPTS hadoop.home.dir "-Dhadoop.home.dir=${HADOOP_PREFIX}"
+  hadoop_add_param HADOOP_OPTS hadoop.id.str "-Dhadoop.id.str=${HADOOP_IDENT_STRING}"
   hadoop_add_param HADOOP_OPTS hadoop.root.logger "-Dhadoop.root.logger=${HADOOP_ROOT_LOGGER}"
-  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE/ /\ }"
+  hadoop_add_param HADOOP_OPTS hadoop.policy.file "-Dhadoop.policy.file=${HADOOP_POLICYFILE}"
   hadoop_add_param HADOOP_OPTS hadoop.security.logger "-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER}"
 }
 
@@ -724,10 +717,8 @@ function hadoop_java_exec
   local command=$1
   local class=$2
   shift 2
-  # we eval this so that paths with spaces work
   #shellcheck disable=SC2086
-  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
-
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 
 function hadoop_start_daemon
@@ -739,7 +730,7 @@ function hadoop_start_daemon
   local class=$2
   shift 2
   #shellcheck disable=SC2086
-  eval exec "$JAVA" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
+  exec "${JAVA}" "-Dproc_${command}" ${HADOOP_OPTS} "${class}" "$@"
 }
 
 function hadoop_start_daemon_wrapper
@@ -802,9 +793,7 @@ function hadoop_start_secure_daemon
   # where to send stderr.  same thing, except &2 = stderr
   local daemonerrfile=$5
   shift 5
-  
-  
-  
+ 
   hadoop_rotate_log "${daemonoutfile}"
   hadoop_rotate_log "${daemonerrfile}"
   
@@ -925,7 +914,6 @@ function hadoop_stop_daemon
   fi
 }
 
-
 function hadoop_stop_secure_daemon
 {
   local command=$1
@@ -984,7 +972,6 @@ function hadoop_daemon_handler
   esac
 }
 
-
 function hadoop_secure_daemon_handler
 {
   local daemonmode=$1


[20/22] git commit: HDFS-6924. Add new RAM_DISK storage type. (Arpit Agarwal)

Posted by ar...@apache.org.
HDFS-6924. Add new RAM_DISK storage type. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e81c4fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e81c4fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e81c4fa

Branch: refs/heads/HDFS-6581
Commit: 5e81c4fa57bf785a0d1b1eb75b50d4b2dfa40f50
Parents: 6d12536
Author: arp <ar...@apache.org>
Authored: Wed Aug 27 09:03:45 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Wed Aug 27 15:23:02 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt       | 4 ++++
 .../src/main/java/org/apache/hadoop/hdfs/StorageType.java   | 3 ++-
 .../java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java    | 4 ++++
 hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto   | 2 +-
 .../org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java     | 9 ++++++---
 .../apache/hadoop/hdfs/server/datanode/TestDataDirs.java    | 5 ++++-
 6 files changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
index 706c03a..fc6e0e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
@@ -2,3 +2,7 @@
 
     HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)
 
+    HDFS-6924. Add new RAM_DISK storage type. (Arpit Agarwal)
+
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
index 3d8133c..51724f7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
@@ -32,7 +32,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceStability.Unstable
 public enum StorageType {
   DISK,
-  SSD;
+  SSD,
+  RAM_DISK;
 
   public static final StorageType DEFAULT = DISK;
   public static final StorageType[] EMPTY_ARRAY = {};

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 5efede7..5167597 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1703,6 +1703,8 @@ public class PBHelper {
       return StorageTypeProto.DISK;
     case SSD:
       return StorageTypeProto.SSD;
+    case RAM_DISK:
+      return StorageTypeProto.RAM_DISK;
     default:
       throw new IllegalStateException(
           "BUG: StorageType not found, type=" + type);
@@ -1731,6 +1733,8 @@ public class PBHelper {
         return StorageType.DISK;
       case SSD:
         return StorageType.SSD;
+      case RAM_DISK:
+        return StorageType.RAM_DISK;
       default:
         throw new IllegalStateException(
             "BUG: StorageTypeProto not found, type=" + type);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index cbb51f9..b54638e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -158,6 +158,7 @@ message FsPermissionProto {
 enum StorageTypeProto {
   DISK = 1;
   SSD = 2;
+  RAM_DISK = 3;
 }
 
 /**
@@ -260,7 +261,6 @@ message HdfsFileStatusProto {
   // Optional field for fileId
   optional uint64 fileId = 13 [default = 0]; // default as an invalid id
   optional int32 childrenNum = 14 [default = -1];
-
   // Optional field for file encryption
   optional FileEncryptionInfoProto fileEncryptionInfo = 15;
   optional bool isLazyPersist = 16 [default = false];

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index cb85c7d..98fd59a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -448,13 +448,16 @@ public class TestPBHelper {
         DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2",
             AdminStates.DECOMMISSIONED),
         DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", 
-            AdminStates.NORMAL)
+            AdminStates.NORMAL),
+        DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h4",
+            AdminStates.NORMAL),
     };
-    String[] storageIDs = {"s1", "s2", "s3"};
+    String[] storageIDs = {"s1", "s2", "s3", "s4"};
     StorageType[] media = {
         StorageType.DISK,
         StorageType.SSD,
-        StorageType.DISK
+        StorageType.DISK,
+        StorageType.RAM_DISK
     };
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e81c4fa/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
index 53babb4..c0b4f9a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataDirs.java
@@ -44,10 +44,11 @@ public class TestDataDirs {
     File dir1 = new File("/dir1");
     File dir2 = new File("/dir2");
     File dir3 = new File("/dir3");
+    File dir4 = new File("/dir4");
 
     // Verify that a valid string is correctly parsed, and that storage
     // type is not case-sensitive
-    String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3";
+    String locations1 = "[disk]/dir0,[DISK]/dir1,[sSd]/dir2,[disK]/dir3,[ram_disk]/dir4";
     conf.set(DFS_DATANODE_DATA_DIR_KEY, locations1);
     locations = DataNode.getStorageLocations(conf);
     assertThat(locations.size(), is(4));
@@ -59,6 +60,8 @@ public class TestDataDirs {
     assertThat(locations.get(2).getUri(), is(dir2.toURI()));
     assertThat(locations.get(3).getStorageType(), is(StorageType.DISK));
     assertThat(locations.get(3).getUri(), is(dir3.toURI()));
+    assertThat(locations.get(4).getStorageType(), is(StorageType.RAM_DISK));
+    assertThat(locations.get(4).getUri(), is(dir4.toURI()));
 
     // Verify that an unrecognized storage type result in an exception.
     String locations2 = "[BadMediaType]/dir0,[ssd]/dir1,[disk]/dir2";


[17/22] git commit: HDFS-6879. Adding tracing to Hadoop RPC. Contributed by Masatake Iwasaki.

Posted by ar...@apache.org.
HDFS-6879. Adding tracing to Hadoop RPC.  Contributed by Masatake Iwasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6962510f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6962510f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6962510f

Branch: refs/heads/HDFS-6581
Commit: 6962510f729717f776929708813f99a28e582f34
Parents: b6b95ff
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Aug 27 14:12:05 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Aug 27 14:12:05 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/pom.xml     |   4 +
 .../main/java/org/apache/hadoop/ipc/Client.java |   8 +
 .../apache/hadoop/ipc/ProtobufRpcEngine.java    |  20 +-
 .../main/java/org/apache/hadoop/ipc/Server.java |  43 ++-
 .../apache/hadoop/ipc/WritableRpcEngine.java    |  18 +-
 .../apache/hadoop/tracing/SpanReceiverHost.java | 153 ++++++++++
 .../java/org/apache/hadoop/util/ProtoUtil.java  |  11 +
 .../src/main/proto/RpcHeader.proto              |  13 +
 .../hadoop-common/src/site/apt/Tracing.apt.vm   | 169 +++++++++++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml         |   4 +
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |   8 +
 .../org/apache/hadoop/tracing/TestTracing.java  | 280 +++++++++++++++++++
 hadoop-project/pom.xml                          |   5 +
 hadoop-project/src/site/site.xml                |   1 +
 15 files changed, 738 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 09f1c5a..ae495be 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -225,6 +225,10 @@
     </dependency>
 
     <dependency>
+      <groupId>org.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
+    <dependency>
       <groupId>org.apache.zookeeper</groupId>
       <artifactId>zookeeper</artifactId>
       <exclusions>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 158445f..2f482c2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -88,6 +88,7 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.htrace.Trace;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -694,6 +695,9 @@ public class Client {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Connecting to "+server);
         }
+        if (Trace.isTracing()) {
+          Trace.addTimelineAnnotation("IPC client connecting to " + server);
+        }
         short numRetries = 0;
         Random rand = null;
         while (true) {
@@ -758,6 +762,10 @@ public class Client {
           // update last activity time
           touch();
 
+          if (Trace.isTracing()) {
+            Trace.addTimelineAnnotation("IPC client connected to " + server);
+          }
+
           // start the receiver thread after the socket connection has been set
           // up
           start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 64615d2..0ccdb71 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -48,6 +48,9 @@ import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.Time;
+import org.htrace.Sampler;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
@@ -191,6 +194,16 @@ public class ProtobufRpcEngine implements RpcEngine {
             + method.getName() + "]");
       }
 
+      TraceScope traceScope = null;
+      // if Tracing is on then start a new span for this rpc.
+      // guard it in the if statement to make sure there isn't
+      // any extra string manipulation.
+      if (Trace.isTracing()) {
+        traceScope = Trace.startSpan(
+            method.getDeclaringClass().getCanonicalName() +
+            "." + method.getName());
+      }
+
       RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
       
       if (LOG.isTraceEnabled()) {
@@ -212,8 +225,13 @@ public class ProtobufRpcEngine implements RpcEngine {
               remoteId + ": " + method.getName() +
                 " {" + e + "}");
         }
-
+        if (Trace.isTracing()) {
+          traceScope.getSpan().addTimelineAnnotation(
+              "Call got exception: " + e.getMessage());
+        }
         throw new ServiceException(e);
+      } finally {
+        if (traceScope != null) traceScope.close();
       }
 
       if (LOG.isDebugEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 24dd0c2..021e035 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -79,6 +79,7 @@ import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.ipc.ProtobufRpcEngine.RpcResponseMessageWrapper;
@@ -115,6 +116,10 @@ import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
+import org.htrace.Span;
+import org.htrace.Trace;
+import org.htrace.TraceInfo;
+import org.htrace.TraceScope;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.ByteString;
@@ -506,6 +511,7 @@ public abstract class Server {
     private ByteBuffer rpcResponse;       // the response for this call
     private final RPC.RpcKind rpcKind;
     private final byte[] clientId;
+    private final Span traceSpan; // the tracing span on the server side
 
     public Call(int id, int retryCount, Writable param, 
         Connection connection) {
@@ -515,6 +521,11 @@ public abstract class Server {
 
     public Call(int id, int retryCount, Writable param, Connection connection,
         RPC.RpcKind kind, byte[] clientId) {
+      this(id, retryCount, param, connection, kind, clientId, null);
+    }
+
+    public Call(int id, int retryCount, Writable param, Connection connection,
+        RPC.RpcKind kind, byte[] clientId, Span span) {
       this.callId = id;
       this.retryCount = retryCount;
       this.rpcRequest = param;
@@ -523,6 +534,7 @@ public abstract class Server {
       this.rpcResponse = null;
       this.rpcKind = kind;
       this.clientId = clientId;
+      this.traceSpan = span;
     }
     
     @Override
@@ -1921,9 +1933,18 @@ public abstract class Server {
             RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
       }
         
+      Span traceSpan = null;
+      if (header.hasTraceInfo()) {
+        // If the incoming RPC included tracing info, always continue the trace
+        TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
+                                             header.getTraceInfo().getParentId());
+        traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
+      }
+
       Call call = new Call(header.getCallId(), header.getRetryCount(),
-          rpcRequest, this, ProtoUtil.convert(header.getRpcKind()), header
-              .getClientId().toByteArray());
+          rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
+          header.getClientId().toByteArray(), traceSpan);
+
       callQueue.put(call);              // queue the call; maybe blocked here
       incRpcCount();  // Increment the rpc count
     }
@@ -2067,6 +2088,7 @@ public abstract class Server {
       ByteArrayOutputStream buf = 
         new ByteArrayOutputStream(INITIAL_RESP_BUF_SIZE);
       while (running) {
+        TraceScope traceScope = null;
         try {
           final Call call = callQueue.take(); // pop the queue; maybe blocked here
           if (LOG.isDebugEnabled()) {
@@ -2083,6 +2105,10 @@ public abstract class Server {
           Writable value = null;
 
           CurCall.set(call);
+          if (call.traceSpan != null) {
+            traceScope = Trace.continueSpan(call.traceSpan);
+          }
+
           try {
             // Make the call as the user via Subject.doAs, thus associating
             // the call with the Subject
@@ -2156,9 +2182,22 @@ public abstract class Server {
         } catch (InterruptedException e) {
           if (running) {                          // unexpected -- log it
             LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
+            if (Trace.isTracing()) {
+              traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
+                  StringUtils.stringifyException(e));
+            }
           }
         } catch (Exception e) {
           LOG.info(Thread.currentThread().getName() + " caught an exception", e);
+          if (Trace.isTracing()) {
+            traceScope.getSpan().addTimelineAnnotation("Exception: " +
+                StringUtils.stringifyException(e));
+          }
+        } finally {
+          if (traceScope != null) {
+            traceScope.close();
+          }
+          IOUtils.cleanup(LOG, traceScope);
         }
       }
       LOG.debug(Thread.currentThread().getName() + ": exiting");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index 04ab4dc..4b2dfe0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
 
 /** An RpcEngine implementation for Writable data. */
 @InterfaceStability.Evolving
@@ -227,9 +229,19 @@ public class WritableRpcEngine implements RpcEngine {
       if (LOG.isDebugEnabled()) {
         startTime = Time.now();
       }
-
-      ObjectWritable value = (ObjectWritable)
-        client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
+      TraceScope traceScope = null;
+      if (Trace.isTracing()) {
+        traceScope = Trace.startSpan(
+            method.getDeclaringClass().getCanonicalName() +
+            "." + method.getName());
+      }
+      ObjectWritable value;
+      try {
+        value = (ObjectWritable)
+          client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
+      } finally {
+        if (traceScope != null) traceScope.close();
+      }
       if (LOG.isDebugEnabled()) {
         long callTime = Time.now() - startTime;
         LOG.debug("Call: " + method.getName() + " " + callTime);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
new file mode 100644
index 0000000..b8c7b31
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tracing/SpanReceiverHost.java
@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashSet;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
+import org.htrace.HTraceConfiguration;
+import org.htrace.SpanReceiver;
+import org.htrace.Trace;
+
+/**
+ * This class provides functions for reading the names of SpanReceivers from
+ * the Hadoop configuration, adding those SpanReceivers to the Tracer,
+ * and closing those SpanReceivers when appropriate.
+ * This class does nothing If no SpanReceiver is configured.
+ */
+@InterfaceAudience.Private
+public class SpanReceiverHost {
+  public static final String SPAN_RECEIVERS_CONF_KEY = "hadoop.trace.spanreceiver.classes";
+  private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
+  private Collection<SpanReceiver> receivers = new HashSet<SpanReceiver>();
+  private boolean closed = false;
+
+  private static enum SingletonHolder {
+    INSTANCE;
+    Object lock = new Object();
+    SpanReceiverHost host = null;
+  }
+
+  public static SpanReceiverHost getInstance(Configuration conf) {
+    if (SingletonHolder.INSTANCE.host != null) {
+      return SingletonHolder.INSTANCE.host;
+    }
+    synchronized (SingletonHolder.INSTANCE.lock) {
+      if (SingletonHolder.INSTANCE.host != null) {
+        return SingletonHolder.INSTANCE.host;
+      }
+      SpanReceiverHost host = new SpanReceiverHost();
+      host.loadSpanReceivers(conf);
+      SingletonHolder.INSTANCE.host = host;
+      ShutdownHookManager.get().addShutdownHook(new Runnable() {
+          public void run() {
+            SingletonHolder.INSTANCE.host.closeReceivers();
+          }
+        }, 0);
+      return SingletonHolder.INSTANCE.host;
+    }
+  }
+
+  /**
+   * Reads the names of classes specified in the
+   * "hadoop.trace.spanreceiver.classes" property and instantiates and registers
+   * them with the Tracer as SpanReceiver's.
+   *
+   * The nullary constructor is called during construction, but if the classes
+   * specified implement the Configurable interface, setConfiguration() will be
+   * called on them. This allows SpanReceivers to use values from the Hadoop
+   * configuration.
+   */
+  public void loadSpanReceivers(Configuration conf) {
+    Class<?> implClass = null;
+    String[] receiverNames = conf.getTrimmedStrings(SPAN_RECEIVERS_CONF_KEY);
+    if (receiverNames == null || receiverNames.length == 0) {
+      return;
+    }
+    for (String className : receiverNames) {
+      className = className.trim();
+      try {
+        implClass = Class.forName(className);
+        receivers.add(loadInstance(implClass, conf));
+        LOG.info("SpanReceiver " + className + " was loaded successfully.");
+      } catch (ClassNotFoundException e) {
+        LOG.warn("Class " + className + " cannot be found.", e);
+      } catch (IOException e) {
+        LOG.warn("Load SpanReceiver " + className + " failed.", e);
+      }
+    }
+    for (SpanReceiver rcvr : receivers) {
+      Trace.addReceiver(rcvr);
+    }
+  }
+
+  private SpanReceiver loadInstance(Class<?> implClass, Configuration conf)
+      throws IOException {
+    SpanReceiver impl;
+    try {
+      Object o = ReflectionUtils.newInstance(implClass, conf);
+      impl = (SpanReceiver)o;
+      impl.configure(wrapHadoopConf(conf));
+    } catch (SecurityException e) {
+      throw new IOException(e);
+    } catch (IllegalArgumentException e) {
+      throw new IOException(e);
+    } catch (RuntimeException e) {
+      throw new IOException(e);
+    }
+
+    return impl;
+  }
+
+  private static HTraceConfiguration wrapHadoopConf(final Configuration conf) {
+    return new HTraceConfiguration() {
+      public static final String HTRACE_CONF_PREFIX = "hadoop.";
+
+      @Override
+      public String get(String key) {
+        return conf.get(HTRACE_CONF_PREFIX + key);
+      }
+
+      @Override
+      public String get(String key, String defaultValue) {
+        return conf.get(HTRACE_CONF_PREFIX + key, defaultValue);
+      }
+    };
+  }
+
+  /**
+   * Calls close() on all SpanReceivers created by this SpanReceiverHost.
+   */
+  public synchronized void closeReceivers() {
+    if (closed) return;
+    closed = true;
+    for (SpanReceiver rcvr : receivers) {
+      try {
+        rcvr.close();
+      } catch (IOException e) {
+        LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
index 79f8692..36b5ff1 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
@@ -27,6 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation
 import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.htrace.Span;
+import org.htrace.Trace;
 
 import com.google.protobuf.ByteString;
 
@@ -165,6 +167,15 @@ public abstract class ProtoUtil {
     RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder();
     result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId)
         .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
+
+    // Add tracing info if we are currently tracing.
+    if (Trace.isTracing()) {
+      Span s = Trace.currentSpan();
+      result.setTraceInfo(RPCTraceInfoProto.newBuilder()
+          .setParentId(s.getSpanId())
+          .setTraceId(s.getTraceId()).build());
+    }
+
     return result.build();
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
index e8c4ada..c879150 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcHeader.proto
@@ -53,6 +53,18 @@ enum RpcKindProto {
 
 
    
+/**
+ * Used to pass through the information necessary to continue
+ * a trace after an RPC is made. All we need is the traceid
+ * (so we know the overarching trace this message is a part of), and
+ * the id of the current span when this message was sent, so we know
+ * what span caused the new span we will create when this message is received.
+ */
+message RPCTraceInfoProto {
+  optional int64 traceId = 1;
+  optional int64 parentId = 2;
+}
+
 message RpcRequestHeaderProto { // the header for the RpcRequest
   enum OperationProto {
     RPC_FINAL_PACKET        = 0; // The final RPC Packet
@@ -67,6 +79,7 @@ message RpcRequestHeaderProto { // the header for the RpcRequest
   // clientId + callId uniquely identifies a request
   // retry count, 1 means this is the first retry
   optional sint32 retryCount = 5 [default = -1];
+  optional RPCTraceInfoProto traceInfo = 6; // tracing info
 }
 
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
new file mode 100644
index 0000000..f777dd2
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/site/apt/Tracing.apt.vm
@@ -0,0 +1,169 @@
+~~ Licensed under the Apache License, Version 2.0 (the "License");
+~~ you may not use this file except in compliance with the License.
+~~ You may obtain a copy of the License at
+~~
+~~   http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License. See accompanying LICENSE file.
+
+  ---
+  Hadoop Distributed File System-${project.version} - Enabling Dapper-like Tracing
+  ---
+  ---
+  ${maven.build.timestamp}
+
+Enabling Dapper-like Tracing in HDFS
+
+%{toc|section=1|fromDepth=0}
+
+* {Dapper-like Tracing in HDFS}
+
+** HTrace
+
+  {{{https://issues.apache.org/jira/browse/HDFS-5274}HDFS-5274}}
+  added support for tracing requests through HDFS,
+  using the open source tracing library, {{{https://github.com/cloudera/htrace}HTrace}}.
+  Setting up tracing is quite simple, however it requires some very minor changes to your client code.
+
+** SpanReceivers
+
+  The tracing system works by collecting information in structs called 'Spans'.
+  It is up to you to choose how you want to receive this information
+  by implementing the SpanReceiver interface, which defines one method:
+
++----
+public void receiveSpan(Span span);
++----
+
+  Configure what SpanReceivers you'd like to use
+  by putting a comma separated list of the fully-qualified class name of
+  classes implementing SpanReceiver
+  in <<<hdfs-site.xml>>> property: <<<hadoop.trace.spanreceiver.classes>>>.
+
++----
+  <property>
+    <name>hadoop.trace.spanreceiver.classes</name>
+    <value>org.htrace.impl.LocalFileSpanReceiver</value>
+  </property>
+  <property>
+    <name>hadoop.local-file-span-receiver.path</name>
+    <value>/var/log/hadoop/htrace.out</value>
+  </property>
++----
+
+** Setting up ZipkinSpanReceiver
+
+  Instead of implementing SpanReceiver by yourself,
+  you can use <<<ZipkinSpanReceiver>>> which uses
+  {{{https://github.com/twitter/zipkin}Zipkin}}
+  for collecting and dispalying tracing data.
+
+  In order to use <<<ZipkinSpanReceiver>>>,
+  you need to download and setup {{{https://github.com/twitter/zipkin}Zipkin}} first.
+
+  you also need to add the jar of <<<htrace-zipkin>>> to the classpath of Hadoop on each node.
+  Here is example setup procedure.
+
++----
+  $ git clone https://github.com/cloudera/htrace
+  $ cd htrace/htrace-zipkin
+  $ mvn compile assembly:single
+  $ cp target/htrace-zipkin-*-jar-with-dependencies.jar $HADOOP_HOME/share/hadoop/hdfs/lib/
++----
+
+  The sample configuration for <<<ZipkinSpanReceiver>>> is shown below.
+  By adding these to <<<hdfs-site.xml>>> of NameNode and DataNodes,
+  <<<ZipkinSpanReceiver>>> is initialized on the startup.
+  You also need this configuration on the client node in addition to the servers.
+
++----
+  <property>
+    <name>hadoop.trace.spanreceiver.classes</name>
+    <value>org.htrace.impl.ZipkinSpanReceiver</value>
+  </property>
+  <property>
+    <name>hadoop.zipkin.collector-hostname</name>
+    <value>192.168.1.2</value>
+  </property>
+  <property>
+    <name>hadoop.zipkin.collector-port</name>
+    <value>9410</value>
+  </property>
++----
+
+** Turning on tracing by HTrace API
+
+  In order to turn on Dapper-like tracing,
+  you will need to wrap the traced logic with <<tracing span>> as shown below.
+  When there is running tracing spans,
+  the tracing information is propagated to servers along with RPC requests.
+
+  In addition, you need to initialize <<<SpanReceiver>>> once per process.
+
++----
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.tracing.SpanReceiverHost;
+import org.htrace.Sampler;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+
+...
+
+    SpanReceiverHost.getInstance(new HdfsConfiguration());
+
+...
+
+    TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
+    try {
+      ... // traced logic
+    } finally {
+      if (ts != null) ts.close();
+    }
++----
+
+** Sample code for tracing
+
+  The <<<TracingFsShell.java>>> shown below is the wrapper of FsShell
+  which start tracing span before invoking HDFS shell command.
+
++----
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.tracing.SpanReceiverHost;
+import org.apache.hadoop.util.ToolRunner;
+import org.htrace.Sampler;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+
+public class TracingFsShell {
+  public static void main(String argv[]) throws Exception {
+    Configuration conf = new Configuration();
+    FsShell shell = new FsShell();
+    conf.setQuietMode(false);
+    shell.setConf(conf);
+    int res = 0;
+    SpanReceiverHost.init(new HdfsConfiguration());
+    TraceScope ts = null;
+    try {
+      ts = Trace.startSpan("FsShell", Sampler.ALWAYS);
+      res = ToolRunner.run(shell, argv);
+    } finally {
+      shell.close();
+      if (ts != null) ts.close();
+    }
+    System.exit(res);
+  }
+}
++----
+
+  You can compile and execute this code as shown below.
+
++----
+$ javac -cp `hadoop classpath` TracingFsShell.java
+$ HADOOP_CLASSPATH=. hdfs TracingFsShell -put sample.txt /tmp/
++----

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-hdfs-project/hadoop-hdfs/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 9b026f2..81eae0a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -181,6 +181,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>xercesImpl</artifactId>
       <scope>compile</scope>
     </dependency>
+    <dependency>
+      <groupId>org.htrace</groupId>
+      <artifactId>htrace-core</artifactId>
+    </dependency>
   </dependencies>
 
   <build>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index df50eab..1ec91d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -180,6 +180,7 @@ import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.tracing.SpanReceiverHost;
 import org.mortbay.util.ajax.JSON;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -326,6 +327,8 @@ public class DataNode extends Configured
   private boolean isPermissionEnabled;
   private String dnUserName = null;
 
+  private SpanReceiverHost spanReceiverHost;
+
   /**
    * Create the DataNode given a configuration, an array of dataDirs,
    * and a namenode proxy
@@ -823,6 +826,7 @@ public class DataNode extends Configured
     this.dataDirs = dataDirs;
     this.conf = conf;
     this.dnConf = new DNConf(conf);
+    this.spanReceiverHost = SpanReceiverHost.getInstance(conf);
 
     if (dnConf.maxLockedMemory > 0) {
       if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
@@ -1510,6 +1514,9 @@ public class DataNode extends Configured
       MBeans.unregister(dataNodeInfoBeanName);
       dataNodeInfoBeanName = null;
     }
+    if (this.spanReceiverHost != null) {
+      this.spanReceiverHost.closeReceivers();
+    }
     if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
     LOG.info("Shutdown complete.");
     synchronized(this) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 4072b17..bcb5a86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.tracing.SpanReceiverHost;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.GenericOptionsParser;
 import org.apache.hadoop.util.JvmPauseMonitor;
@@ -278,6 +279,7 @@ public class NameNode implements NameNodeStatusMXBean {
 
   private JvmPauseMonitor pauseMonitor;
   private ObjectName nameNodeStatusBeanName;
+  private SpanReceiverHost spanReceiverHost;
   /**
    * The namenode address that clients will use to access this namenode
    * or the name service. For HA configurations using logical URI, it
@@ -586,6 +588,9 @@ public class NameNode implements NameNodeStatusMXBean {
     if (NamenodeRole.NAMENODE == role) {
       startHttpServer(conf);
     }
+
+    this.spanReceiverHost = SpanReceiverHost.getInstance(conf);
+
     loadNamesystem(conf);
 
     rpcServer = createRpcServer(conf);
@@ -822,6 +827,9 @@ public class NameNode implements NameNodeStatusMXBean {
         MBeans.unregister(nameNodeStatusBeanName);
         nameNodeStatusBeanName = null;
       }
+      if (this.spanReceiverHost != null) {
+        this.spanReceiverHost.closeReceivers();
+      }
     }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
new file mode 100644
index 0000000..bb923a2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.tracing;
+
+import org.apache.commons.lang.RandomStringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.htrace.HTraceConfiguration;
+import org.htrace.Sampler;
+import org.htrace.Span;
+import org.htrace.SpanReceiver;
+import org.htrace.Trace;
+import org.htrace.TraceScope;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+public class TestTracing {
+
+  private static Configuration conf;
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem dfs;
+
+  @Test
+  public void testSpanReceiverHost() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY,
+        SetSpanReceiver.class.getName());
+    SpanReceiverHost spanReceiverHost = SpanReceiverHost.getInstance(conf);
+  }
+
+  @Test
+  public void testWriteTraceHooks() throws Exception {
+    long startTime = System.currentTimeMillis();
+    TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS);
+    Path file = new Path("traceWriteTest.dat");
+    FSDataOutputStream stream = dfs.create(file);
+
+    for (int i = 0; i < 10; i++) {
+      byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
+      stream.write(data);
+    }
+    stream.hflush();
+    stream.close();
+    long endTime = System.currentTimeMillis();
+    ts.close();
+
+    String[] expectedSpanNames = {
+      "testWriteTraceHooks",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.create",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.fsync",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.complete"
+    };
+    assertSpanNamesFound(expectedSpanNames);
+
+    // The trace should last about the same amount of time as the test
+    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
+    Span s = map.get("testWriteTraceHooks").get(0);
+    Assert.assertNotNull(s);
+    long spanStart = s.getStartTimeMillis();
+    long spanEnd = s.getStopTimeMillis();
+    Assert.assertTrue(spanStart - startTime < 100);
+    Assert.assertTrue(spanEnd - endTime < 100);
+
+    // There should only be one trace id as it should all be homed in the
+    // top trace.
+    for (Span span : SetSpanReceiver.SetHolder.spans) {
+      Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+    }
+  }
+
+  @Test
+  public void testWriteWithoutTraceHooks() throws Exception {
+    Path file = new Path("withoutTraceWriteTest.dat");
+    FSDataOutputStream stream = dfs.create(file);
+    for (int i = 0; i < 10; i++) {
+      byte[] data = RandomStringUtils.randomAlphabetic(102400).getBytes();
+      stream.write(data);
+    }
+    stream.hflush();
+    stream.close();
+    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+  }
+
+  @Test
+  public void testReadTraceHooks() throws Exception {
+    String fileName = "traceReadTest.dat";
+    Path filePath = new Path(fileName);
+
+    // Create the file.
+    FSDataOutputStream ostream = dfs.create(filePath);
+    for (int i = 0; i < 50; i++) {
+      byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes();
+      ostream.write(data);
+    }
+    ostream.close();
+
+
+    long startTime = System.currentTimeMillis();
+    TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
+    FSDataInputStream istream = dfs.open(filePath, 10240);
+    ByteBuffer buf = ByteBuffer.allocate(10240);
+
+    int count = 0;
+    try {
+      while (istream.read(buf) > 0) {
+        count += 1;
+        buf.clear();
+        istream.seek(istream.getPos() + 5);
+      }
+    } catch (IOException ioe) {
+      // Ignore this it's probably a seek after eof.
+    } finally {
+      istream.close();
+    }
+    ts.getSpan().addTimelineAnnotation("count: " + count);
+    long endTime = System.currentTimeMillis();
+    ts.close();
+
+    String[] expectedSpanNames = {
+      "testReadTraceHooks",
+      "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
+      "org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.getBlockLocations"
+    };
+    assertSpanNamesFound(expectedSpanNames);
+
+    // The trace should last about the same amount of time as the test
+    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
+    Span s = map.get("testReadTraceHooks").get(0);
+    Assert.assertNotNull(s);
+
+    long spanStart = s.getStartTimeMillis();
+    long spanEnd = s.getStopTimeMillis();
+    Assert.assertTrue(spanStart - startTime < 100);
+    Assert.assertTrue(spanEnd - endTime < 100);
+
+    // There should only be one trace id as it should all be homed in the
+    // top trace.
+    for (Span span : SetSpanReceiver.SetHolder.spans) {
+      Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
+    }
+  }
+
+  @Test
+  public void testReadWithoutTraceHooks() throws Exception {
+    String fileName = "withoutTraceReadTest.dat";
+    Path filePath = new Path(fileName);
+
+    // Create the file.
+    FSDataOutputStream ostream = dfs.create(filePath);
+    for (int i = 0; i < 50; i++) {
+      byte[] data = RandomStringUtils.randomAlphabetic(10240).getBytes();
+      ostream.write(data);
+    }
+    ostream.close();
+
+    FSDataInputStream istream = dfs.open(filePath, 10240);
+    ByteBuffer buf = ByteBuffer.allocate(10240);
+
+    int count = 0;
+    try {
+      while (istream.read(buf) > 0) {
+        count += 1;
+        buf.clear();
+        istream.seek(istream.getPos() + 5);
+      }
+    } catch (IOException ioe) {
+      // Ignore this it's probably a seek after eof.
+    } finally {
+      istream.close();
+    }
+    Assert.assertTrue(SetSpanReceiver.SetHolder.size() == 0);
+  }
+
+  @Before
+  public void cleanSet() {
+    SetSpanReceiver.SetHolder.spans.clear();
+  }
+
+  @BeforeClass
+  public static void setupCluster() throws IOException {
+    conf = new Configuration();
+    conf.setLong("dfs.blocksize", 100 * 1024);
+    conf.set(SpanReceiverHost.SPAN_RECEIVERS_CONF_KEY,
+        SetSpanReceiver.class.getName());
+
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .build();
+
+    dfs = cluster.getFileSystem();
+  }
+
+  @AfterClass
+  public static void shutDown() throws IOException {
+    cluster.shutdown();
+  }
+
+  private void assertSpanNamesFound(String[] expectedSpanNames) {
+    Map<String, List<Span>> map = SetSpanReceiver.SetHolder.getMap();
+    for (String spanName : expectedSpanNames) {
+      Assert.assertTrue("Should find a span with name " + spanName, map.get(spanName) != null);
+    }
+  }
+
+  /**
+   * Span receiver that puts all spans into a single set.
+   * This is useful for testing.
+   * <p/>
+   * We're not using HTrace's POJOReceiver here so as that doesn't
+   * push all the metrics to a static place, and would make testing
+   * SpanReceiverHost harder.
+   */
+  public static class SetSpanReceiver implements SpanReceiver {
+
+    public void configure(HTraceConfiguration conf) {
+    }
+
+    public void receiveSpan(Span span) {
+      SetHolder.spans.add(span);
+    }
+
+    public void close() {
+    }
+
+    public static class SetHolder {
+      public static Set<Span> spans = new HashSet<Span>();
+
+      public static int size() {
+        return spans.size();
+      }
+
+      public static Map<String, List<Span>> getMap() {
+        Map<String, List<Span>> map = new HashMap<String, List<Span>>();
+
+        for (Span s : spans) {
+          List<Span> l = map.get(s.getDescription());
+          if (l == null) {
+            l = new LinkedList<Span>();
+            map.put(s.getDescription(), l);
+          }
+          l.add(s);
+        }
+        return map;
+      }
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-project/pom.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index be5b3d5..beaeec6 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -678,6 +678,11 @@
         <version>0.1.42</version>
       </dependency>
       <dependency>
+        <groupId>org.htrace</groupId>
+        <artifactId>htrace-core</artifactId>
+        <version>3.0.4</version>
+      </dependency>
+      <dependency>
         <groupId>org.jdom</groupId>
         <artifactId>jdom</artifactId>
         <version>1.1</version>

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6962510f/hadoop-project/src/site/site.xml
----------------------------------------------------------------------
diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml
index 56288ee..a42aff0 100644
--- a/hadoop-project/src/site/site.xml
+++ b/hadoop-project/src/site/site.xml
@@ -65,6 +65,7 @@
       <item name="Service Level Authorization" href="hadoop-project-dist/hadoop-common/ServiceLevelAuth.html"/>
       <item name="HTTP Authentication" href="hadoop-project-dist/hadoop-common/HttpAuthentication.html"/>
       <item name="Hadoop KMS" href="hadoop-kms/index.html"/>
+      <item name="Tracing" href="hadoop-project-dist/hadoop-common/Tracing.html"/>
     </menu>
     
     <menu name="HDFS" inherit="top">


[03/22] git commit: YARN-1326. RM should log using RMStore at startup time. (Tsuyoshi Ozawa via kasha)

Posted by ar...@apache.org.
YARN-1326. RM should log using RMStore at startup time. (Tsuyoshi Ozawa via kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d16bfd1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d16bfd1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d16bfd1d

Branch: refs/heads/HDFS-6581
Commit: d16bfd1d0f7cd958e7041be40763cc9983a7b80a
Parents: 2d9b77d
Author: Karthik Kambatla <ka...@apache.org>
Authored: Wed Aug 27 01:43:58 2014 -0700
Committer: Karthik Kambatla <ka...@apache.org>
Committed: Wed Aug 27 01:43:58 2014 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                        |  3 +++
 .../resourcemanager/recovery/RMStateStoreFactory.java  | 13 ++++++++-----
 .../yarn/server/resourcemanager/webapp/AboutBlock.java |  1 +
 .../server/resourcemanager/webapp/dao/ClusterInfo.java |  8 ++++++++
 .../resourcemanager/webapp/TestRMWebServices.java      |  2 +-
 5 files changed, 21 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16bfd1d/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 916816e..eefa547 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -154,6 +154,9 @@ Release 2.6.0 - UNRELEASED
     YARN-2389. Added functionality for schedulers to kill all applications in a
     queue. (Subramaniam Venkatraman Krishnan via jianhe)
 
+    YARN-1326. RM should log using RMStore at startup time. 
+    (Tsuyoshi Ozawa via kasha)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16bfd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java
index f9e2869..c09ddb8 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreFactory.java
@@ -17,17 +17,20 @@
 */
 package org.apache.hadoop.yarn.server.resourcemanager.recovery;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 public class RMStateStoreFactory {
+  private static final Log LOG = LogFactory.getLog(RMStateStoreFactory.class);
   
   public static RMStateStore getStore(Configuration conf) {
-    RMStateStore store = ReflectionUtils.newInstance(
-        conf.getClass(YarnConfiguration.RM_STORE, 
-            MemoryRMStateStore.class, RMStateStore.class), 
-            conf);
-    return store;
+    Class<? extends RMStateStore> storeClass =
+        conf.getClass(YarnConfiguration.RM_STORE,
+            MemoryRMStateStore.class, RMStateStore.class);
+    LOG.info("Using RMStateStore implementation - " + storeClass);
+    return ReflectionUtils.newInstance(storeClass, conf);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16bfd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
index 91b5cc1..ea5c48a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/AboutBlock.java
@@ -44,6 +44,7 @@ public class AboutBlock extends HtmlBlock {
       _("Cluster ID:", cinfo.getClusterId()).
       _("ResourceManager state:", cinfo.getState()).
       _("ResourceManager HA state:", cinfo.getHAState()).
+      _("ResourceManager RMStateStore:", cinfo.getRMStateStore()).
       _("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
       _("ResourceManager version:", cinfo.getRMBuildVersion() +
           " on " + cinfo.getRMVersionBuiltOn()).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16bfd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
index c96d73e..b529f21 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterInfo.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.ha.HAServiceProtocol;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.util.VersionInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.util.YarnVersionInfo;
 
 @XmlRootElement
@@ -35,6 +36,7 @@ public class ClusterInfo {
   protected long startedOn;
   protected STATE state;
   protected HAServiceProtocol.HAServiceState haState;
+  protected String rmStateStoreName;
   protected String resourceManagerVersion;
   protected String resourceManagerBuildVersion;
   protected String resourceManagerVersionBuiltOn;
@@ -51,6 +53,8 @@ public class ClusterInfo {
     this.id = ts;
     this.state = rm.getServiceState();
     this.haState = rm.getRMContext().getHAServiceState();
+    this.rmStateStoreName = rm.getRMContext().getStateStore().getClass()
+        .getName();
     this.startedOn = ts;
     this.resourceManagerVersion = YarnVersionInfo.getVersion();
     this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
@@ -68,6 +72,10 @@ public class ClusterInfo {
     return this.haState.toString();
   }
 
+  public String getRMStateStore() {
+    return this.rmStateStoreName;
+  }
+
   public String getRMVersion() {
     return this.resourceManagerVersion;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d16bfd1d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 561b147..ff0f6f6 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -284,7 +284,7 @@ public class TestRMWebServices extends JerseyTest {
       Exception {
     assertEquals("incorrect number of elements", 1, json.length());
     JSONObject info = json.getJSONObject("clusterInfo");
-    assertEquals("incorrect number of elements", 10, info.length());
+    assertEquals("incorrect number of elements", 11, info.length());
     verifyClusterGeneric(info.getLong("id"), info.getLong("startedOn"),
         info.getString("state"), info.getString("haState"),
         info.getString("hadoopVersionBuiltOn"),


[14/22] git commit: HDFS-6773. MiniDFSCluster should skip edit log fsync by default. Contributed by Stephen Chu.

Posted by ar...@apache.org.
HDFS-6773. MiniDFSCluster should skip edit log fsync by default.  Contributed by Stephen Chu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d805cc27
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d805cc27
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d805cc27

Branch: refs/heads/HDFS-6581
Commit: d805cc27a98abbdf14a20ef3127a2c7cb212c765
Parents: fdd3bc5
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Aug 27 13:33:02 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Aug 27 13:33:02 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/MiniDFSCluster.java  | 22 +++++++++++++++++---
 .../server/datanode/TestFsDatasetCache.java     |  1 -
 .../server/namenode/TestCacheDirectives.java    |  1 -
 4 files changed, 22 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d805cc27/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 99d5c01..7783243 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -518,6 +518,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
 
+    HDFS-6773. MiniDFSCluster should skip edit log fsync by default (Stephen
+    Chu via Colin Patrick McCabe)
+
   BUG FIXES
 
     HDFS-6823. dfs.web.authentication.kerberos.principal shows up in logs for 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d805cc27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index 98ca316..0e49cfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -172,6 +173,7 @@ public class MiniDFSCluster {
     private boolean checkDataNodeAddrConfig = false;
     private boolean checkDataNodeHostConfig = false;
     private Configuration[] dnConfOverlays;
+    private boolean skipFsyncForTesting = true;
     
     public Builder(Configuration conf) {
       this.conf = conf;
@@ -405,6 +407,15 @@ public class MiniDFSCluster {
       this.dnConfOverlays = dnConfOverlays;
       return this;
     }
+
+    /**
+     * Default: true
+     * When true, we skip fsync() calls for speed improvements.
+     */
+    public Builder skipFsyncForTesting(boolean val) {
+      this.skipFsyncForTesting = val;
+      return this;
+    }
     
     /**
      * Construct the actual MiniDFSCluster
@@ -472,7 +483,8 @@ public class MiniDFSCluster {
                        builder.checkExitOnShutdown,
                        builder.checkDataNodeAddrConfig,
                        builder.checkDataNodeHostConfig,
-                       builder.dnConfOverlays);
+                       builder.dnConfOverlays,
+                       builder.skipFsyncForTesting);
   }
   
   public class DataNodeProperties {
@@ -727,7 +739,8 @@ public class MiniDFSCluster {
                        manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
                        operation, null, racks, hosts,
                        null, simulatedCapacities, null, true, false,
-                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false, false, null);
+                       MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0),
+                       true, false, false, null, true);
   }
 
   private void initMiniDFSCluster(
@@ -742,7 +755,8 @@ public class MiniDFSCluster {
       MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
       boolean checkDataNodeAddrConfig,
       boolean checkDataNodeHostConfig,
-      Configuration[] dnConfOverlays)
+      Configuration[] dnConfOverlays,
+      boolean skipFsyncForTesting)
   throws IOException {
     boolean success = false;
     try {
@@ -782,6 +796,8 @@ public class MiniDFSCluster {
             + "Standby node since no IPC ports have been specified.");
         conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
       }
+
+      EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
     
       federation = nnTopology.isFederated();
       try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d805cc27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
index 5ac13ee..d6e70d8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
@@ -108,7 +108,6 @@ public class TestFsDatasetCache {
   private static CacheManipulator prevCacheManipulator;
 
   static {
-    EditLogFileOutputStream.setShouldSkipFsyncForTesting(false);
     LogManager.getLogger(FsDatasetCache.class).setLevel(Level.DEBUG);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d805cc27/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index d54b90e..9307692 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -110,7 +110,6 @@ public class TestCacheDirectives {
 
   static {
     NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
-    EditLogFileOutputStream.setShouldSkipFsyncForTesting(false);
   }
 
   private static final long BLOCK_SIZE = 4096;


[11/22] git commit: HDFS-6892. Add XDR packaging method for each NFS request. Contributed by Brandon Li

Posted by ar...@apache.org.
HDFS-6892. Add XDR packaging method for each NFS request. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cd9182d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cd9182d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cd9182d8

Branch: refs/heads/HDFS-6581
Commit: cd9182d8b5f60428f6c91b0eb0b2e61d52a07020
Parents: 6b441d2
Author: brandonli <Br...@Brandons-MacBook-Pro-2.local>
Authored: Wed Aug 27 11:06:01 2014 -0700
Committer: brandonli <Br...@Brandons-MacBook-Pro-2.local>
Committed: Wed Aug 27 11:06:01 2014 -0700

----------------------------------------------------------------------
 .../hadoop/nfs/nfs3/request/ACCESS3Request.java |  15 +-
 .../hadoop/nfs/nfs3/request/COMMIT3Request.java |  23 ++-
 .../hadoop/nfs/nfs3/request/CREATE3Request.java |  18 +-
 .../hadoop/nfs/nfs3/request/FSINFO3Request.java |  15 +-
 .../hadoop/nfs/nfs3/request/FSSTAT3Request.java |  15 +-
 .../nfs/nfs3/request/GETATTR3Request.java       |  15 +-
 .../hadoop/nfs/nfs3/request/LOOKUP3Request.java |   9 +-
 .../hadoop/nfs/nfs3/request/MKDIR3Request.java  |  26 ++-
 .../hadoop/nfs/nfs3/request/NFS3Request.java    |  45 +++++
 .../nfs/nfs3/request/PATHCONF3Request.java      |  15 +-
 .../hadoop/nfs/nfs3/request/READ3Request.java   |   9 +-
 .../nfs/nfs3/request/READDIR3Request.java       |  30 ++-
 .../nfs/nfs3/request/READDIRPLUS3Request.java   |  33 +++-
 .../nfs/nfs3/request/READLINK3Request.java      |  15 +-
 .../hadoop/nfs/nfs3/request/REMOVE3Request.java |  20 +-
 .../hadoop/nfs/nfs3/request/RENAME3Request.java |  37 ++--
 .../hadoop/nfs/nfs3/request/RMDIR3Request.java  |  20 +-
 .../nfs/nfs3/request/RequestWithHandle.java     |  16 +-
 .../nfs/nfs3/request/SETATTR3Request.java       |  29 ++-
 .../nfs/nfs3/request/SYMLINK3Request.java       |  30 ++-
 .../hadoop/nfs/nfs3/request/SetAttr3.java       |   9 +
 .../hadoop/nfs/nfs3/request/WRITE3Request.java  |  13 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java    |  38 ++--
 .../hdfs/nfs/nfs3/TestRpcProgramNfs3.java       | 187 ++++++++++---------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 25 files changed, 480 insertions(+), 204 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java
index 2470108..ea1ba86 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/ACCESS3Request.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * ACCESS3 Request
  */
 public class ACCESS3Request extends RequestWithHandle {
-  public ACCESS3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static ACCESS3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new ACCESS3Request(handle);
+  }
+
+  public ACCESS3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);    
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java
index 810c41b..ba84d42 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/COMMIT3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -28,10 +29,17 @@ public class COMMIT3Request extends RequestWithHandle {
   private final long offset;
   private final int count;
 
-  public COMMIT3Request(XDR xdr) throws IOException {
-    super(xdr);
-    offset = xdr.readHyper();
-    count = xdr.readInt();
+  public static COMMIT3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    long offset = xdr.readHyper();
+    int count = xdr.readInt();
+    return new COMMIT3Request(handle, offset, count);
+  }
+  
+  public COMMIT3Request(FileHandle handle, long offset, int count) {
+    super(handle);
+    this.offset = offset;
+    this.count = count;
   }
 
   public long getOffset() {
@@ -41,4 +49,11 @@ public class COMMIT3Request extends RequestWithHandle {
   public int getCount() {
     return this.count;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr); 
+    xdr.writeLongAsHyper(offset);
+    xdr.writeInt(count);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java
index b444c99..473d527 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/CREATE3Request.java
@@ -29,8 +29,8 @@ import org.apache.hadoop.oncrpc.XDR;
 public class CREATE3Request extends RequestWithHandle {
   private final String name;
   private final int mode;
-  private SetAttr3 objAttr = null;
-  private long verf;
+  private final SetAttr3 objAttr;
+  private long verf = 0;
 
   public CREATE3Request(FileHandle handle, String name, int mode,
       SetAttr3 objAttr, long verf) {
@@ -41,12 +41,12 @@ public class CREATE3Request extends RequestWithHandle {
     this.verf = verf;
   }
   
-  public CREATE3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
-    mode = xdr.readInt();
-
-    objAttr = new SetAttr3();
+  public static CREATE3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    int mode = xdr.readInt();
+    SetAttr3 objAttr = new SetAttr3();
+    long verf = 0;
     if ((mode == Nfs3Constant.CREATE_UNCHECKED)
         || (mode == Nfs3Constant.CREATE_GUARDED)) {
       objAttr.deserialize(xdr);
@@ -55,6 +55,7 @@ public class CREATE3Request extends RequestWithHandle {
     } else {
       throw new IOException("Wrong create mode:" + mode);
     }
+    return new CREATE3Request(handle, name, mode, objAttr, verf);
   }
 
   public String getName() {
@@ -81,4 +82,5 @@ public class CREATE3Request extends RequestWithHandle {
     xdr.writeInt(mode);
     objAttr.serialize(xdr);
   }
+
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java
index 26b65be..92c8ed8 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSINFO3Request.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * FSINFO3 Request
  */
 public class FSINFO3Request extends RequestWithHandle {
-  public FSINFO3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static FSINFO3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new FSINFO3Request(handle);
+  }
+
+  public FSINFO3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);    
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java
index 90bec15..c6c620d 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/FSSTAT3Request.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * FSSTAT3 Request
  */
 public class FSSTAT3Request extends RequestWithHandle {
-  public FSSTAT3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static FSSTAT3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new FSSTAT3Request(handle);
+  }
+
+  public FSSTAT3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);    
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java
index e1d69d1..b06b4b1 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/GETATTR3Request.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * GETATTR3 Request
  */
 public class GETATTR3Request extends RequestWithHandle {
-  public GETATTR3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static GETATTR3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new GETATTR3Request(handle);
+  }
+
+  public GETATTR3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);    
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java
index e461ec3..4661821 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/LOOKUP3Request.java
@@ -35,9 +35,10 @@ public class LOOKUP3Request extends RequestWithHandle {
     this.name = name;
   }
   
-  public LOOKUP3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
+  public static LOOKUP3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    return new LOOKUP3Request(handle, name);
   }
 
   public String getName() {
@@ -51,7 +52,7 @@ public class LOOKUP3Request extends RequestWithHandle {
   @Override
   @VisibleForTesting
   public void serialize(XDR xdr) {
-    super.serialize(xdr);
+    handle.serialize(xdr);
     xdr.writeInt(name.getBytes().length);
     xdr.writeFixedOpaque(name.getBytes());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java
index 170de8c..b3ef828 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/MKDIR3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -28,13 +29,20 @@ public class MKDIR3Request extends RequestWithHandle {
   private final String name;
   private final SetAttr3 objAttr;
 
-  public MKDIR3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
-    objAttr = new SetAttr3();
+  public static MKDIR3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    SetAttr3 objAttr = new SetAttr3();
     objAttr.deserialize(xdr);
+    return new MKDIR3Request(handle, name, objAttr);
   }
-  
+
+  public MKDIR3Request(FileHandle handle, String name, SetAttr3 objAttr) {
+    super(handle);
+    this.name = name;
+    this.objAttr = objAttr;
+  }
+
   public String getName() {
     return name;
   }
@@ -42,4 +50,12 @@ public class MKDIR3Request extends RequestWithHandle {
   public SetAttr3 getObjAttr() {
     return objAttr;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeInt(name.getBytes().length);
+    xdr.writeFixedOpaque(name.getBytes());
+    objAttr.serialize(xdr);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java
new file mode 100644
index 0000000..cffa215
--- /dev/null
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/NFS3Request.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.nfs.nfs3.request;
+
+import java.io.IOException;
+
+import org.apache.hadoop.nfs.nfs3.FileHandle;
+import org.apache.hadoop.oncrpc.XDR;
+
+/**
+ * An NFS request that uses {@link FileHandle} to identify a file.
+ */
+public abstract class NFS3Request {
+  
+  /**
+   * Deserialize a handle from an XDR object
+   */
+  static FileHandle readHandle(XDR xdr) throws IOException {
+    FileHandle handle = new FileHandle();
+    if (!handle.deserialize(xdr)) {
+      throw new IOException("can't deserialize file handle");
+    }
+    return handle;
+  }
+  
+  /**
+   * Subclass should implement. Usually handle is the first to be serialized
+   */
+  public abstract void serialize(XDR xdr);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java
index d514264..bff8038 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/PATHCONF3Request.java
@@ -19,13 +19,24 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * PATHCONF3 Request
  */
 public class PATHCONF3Request extends RequestWithHandle {
-  public PATHCONF3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static PATHCONF3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new PATHCONF3Request(handle);
+  }
+  
+  public PATHCONF3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
index 6d95f5e..5898ec5 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READ3Request.java
@@ -31,10 +31,11 @@ public class READ3Request extends RequestWithHandle {
   private final long offset;
   private final int count;
 
-  public READ3Request(XDR xdr) throws IOException {
-    super(xdr);
-    offset = xdr.readHyper();
-    count = xdr.readInt();
+  public static READ3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    long offset = xdr.readHyper();
+    int count = xdr.readInt();
+    return new READ3Request(handle, offset, count);
   }
 
   @VisibleForTesting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java
index c9835b9..79245c1 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIR3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -29,13 +30,22 @@ public class READDIR3Request extends RequestWithHandle {
   private final long cookieVerf;
   private final int count;
 
-  public READDIR3Request(XDR xdr) throws IOException {
-    super(xdr);
-    cookie = xdr.readHyper();
-    cookieVerf = xdr.readHyper();
-    count = xdr.readInt();
+  public static READDIR3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    long cookie = xdr.readHyper();
+    long cookieVerf = xdr.readHyper();
+    int count = xdr.readInt();
+    return new READDIR3Request(handle, cookie, cookieVerf, count);
   }
-
+  
+  public READDIR3Request(FileHandle handle, long cookie, long cookieVerf,
+      int count) {
+    super(handle);
+    this.cookie = cookie;
+    this.cookieVerf = cookieVerf;
+    this.count = count;
+  }
+  
   public long getCookie() {
     return this.cookie;
   }
@@ -47,4 +57,12 @@ public class READDIR3Request extends RequestWithHandle {
   public long getCount() {
     return this.count;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeLongAsHyper(cookie);
+    xdr.writeLongAsHyper(cookieVerf);
+    xdr.writeInt(count);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java
index 2994fe9..c1e4365 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READDIRPLUS3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -30,14 +31,25 @@ public class READDIRPLUS3Request extends RequestWithHandle {
   private final int dirCount;
   private final int maxCount;
 
-  public READDIRPLUS3Request(XDR xdr) throws IOException {
-    super(xdr);
-    cookie = xdr.readHyper();
-    cookieVerf = xdr.readHyper();
-    dirCount = xdr.readInt();
-    maxCount = xdr.readInt();
+  public static READDIRPLUS3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    long cookie = xdr.readHyper();
+    long cookieVerf = xdr.readHyper();
+    int dirCount = xdr.readInt();
+    int maxCount = xdr.readInt();
+    return new READDIRPLUS3Request(handle, cookie, cookieVerf, dirCount,
+        maxCount);
   }
 
+  public READDIRPLUS3Request(FileHandle handle, long cookie, long cookieVerf,
+      int dirCount, int maxCount) {
+    super(handle);
+    this.cookie = cookie;
+    this.cookieVerf = cookieVerf;
+    this.dirCount = dirCount;
+    this.maxCount = maxCount;
+  }
+  
   public long getCookie() {
     return this.cookie;
   }
@@ -53,4 +65,13 @@ public class READDIRPLUS3Request extends RequestWithHandle {
   public int getMaxCount() {
     return maxCount;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeLongAsHyper(cookie);
+    xdr.writeLongAsHyper(cookieVerf);
+    xdr.writeInt(dirCount);
+    xdr.writeInt(maxCount);
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java
index 3b0e8a4..15fe8f0 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/READLINK3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -26,7 +27,17 @@ import org.apache.hadoop.oncrpc.XDR;
  */
 public class READLINK3Request extends RequestWithHandle {
 
-  public READLINK3Request(XDR xdr) throws IOException {
-    super(xdr);
+  public static READLINK3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    return new READLINK3Request(handle);
+  }
+  
+  public READLINK3Request(FileHandle handle) {
+    super(handle);
+  }
+  
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);   
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java
index 901d803..ffd47b0 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/REMOVE3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR;
 public class REMOVE3Request extends RequestWithHandle {
   private final String name;
 
-  public REMOVE3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
+  public static REMOVE3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    return new REMOVE3Request(handle, name);
   }
 
+  public REMOVE3Request(FileHandle handle, String name) {
+    super(handle);
+    this.name = name;
+  }
+  
   public String getName() {
     return this.name;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeInt(name.getBytes().length);
+    xdr.writeFixedOpaque(name.getBytes());
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java
index 6fdccff..5144e8a 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RENAME3Request.java
@@ -25,23 +25,26 @@ import org.apache.hadoop.oncrpc.XDR;
 /**
  * RENAME3 Request
  */
-public class RENAME3Request {
+public class RENAME3Request extends NFS3Request {
   private final FileHandle fromDirHandle;
   private final String fromName;
   private final FileHandle toDirHandle;
   private final String toName;
   
-  public RENAME3Request(XDR xdr) throws IOException {
-    fromDirHandle = new FileHandle();
-    if (!fromDirHandle.deserialize(xdr)) {
-      throw new IOException("can't deserialize file handle");
-    }
-    fromName = xdr.readString();
-    toDirHandle = new FileHandle();
-    if (!toDirHandle.deserialize(xdr)) {
-      throw new IOException("can't deserialize file handle");
-    }
-    toName = xdr.readString();
+  public static RENAME3Request deserialize(XDR xdr) throws IOException {
+    FileHandle fromDirHandle = readHandle(xdr);
+    String fromName = xdr.readString();
+    FileHandle toDirHandle = readHandle(xdr);
+    String toName = xdr.readString();
+    return new RENAME3Request(fromDirHandle, fromName, toDirHandle, toName);
+  }
+  
+  public RENAME3Request(FileHandle fromDirHandle, String fromName,
+      FileHandle toDirHandle, String toName) {
+    this.fromDirHandle = fromDirHandle;
+    this.fromName = fromName;
+    this.toDirHandle = toDirHandle;
+    this.toName = toName;
   }
   
   public FileHandle getFromDirHandle() {
@@ -59,4 +62,14 @@ public class RENAME3Request {
   public String getToName() {
     return toName;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    fromDirHandle.serialize(xdr);
+    xdr.writeInt(fromName.getBytes().length);
+    xdr.writeFixedOpaque(fromName.getBytes());
+    toDirHandle.serialize(xdr);
+    xdr.writeInt(toName.getBytes().length);
+    xdr.writeFixedOpaque(toName.getBytes());
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java
index 8fd5b70..e9977fa 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RMDIR3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -27,12 +28,25 @@ import org.apache.hadoop.oncrpc.XDR;
 public class RMDIR3Request extends RequestWithHandle {
   private final String name;
 
-  public RMDIR3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
+  public static RMDIR3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    return new RMDIR3Request(handle, name);
   }
 
+  public RMDIR3Request(FileHandle handle, String name) {
+    super(handle);
+    this.name = name;
+  }
+  
   public String getName() {
     return this.name;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeInt(name.getBytes().length);
+    xdr.writeFixedOpaque(name.getBytes());
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java
index a3b19a1..9f9539c 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/RequestWithHandle.java
@@ -17,33 +17,19 @@
  */
 package org.apache.hadoop.nfs.nfs3.request;
 
-import java.io.IOException;
-
 import org.apache.hadoop.nfs.nfs3.FileHandle;
-import org.apache.hadoop.oncrpc.XDR;
 
 /**
  * An NFS request that uses {@link FileHandle} to identify a file.
  */
-public class RequestWithHandle {
+public abstract class RequestWithHandle extends NFS3Request {
   protected final FileHandle handle;
   
   RequestWithHandle(FileHandle handle) {
     this.handle = handle;
   }
-  
-  RequestWithHandle(XDR xdr) throws IOException {
-    handle = new FileHandle();
-    if (!handle.deserialize(xdr)) {
-      throw new IOException("can't deserialize file handle");
-    }
-  }
 
   public FileHandle getHandle() {
     return this.handle;
   }
-  
-  public void serialize(XDR xdr) {
-    handle.serialize(xdr);
-  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java
index 05e8c03..c5f668c 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SETATTR3Request.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 import java.io.IOException;
 
 import org.apache.hadoop.nfs.NfsTime;
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -38,16 +39,26 @@ public class SETATTR3Request extends RequestWithHandle {
   private final boolean check;
   private final NfsTime ctime;
   
-  public SETATTR3Request(XDR xdr) throws IOException {
-    super(xdr);
-    attr = new SetAttr3();
+  public static SETATTR3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    SetAttr3 attr = new SetAttr3();
     attr.deserialize(xdr);
-    check = xdr.readBoolean();
+    boolean check = xdr.readBoolean();
+    NfsTime ctime;
     if (check) {
       ctime = NfsTime.deserialize(xdr);
     } else {
       ctime = null;
     }
+    return new SETATTR3Request(handle, attr, check, ctime);
+  }
+  
+  public SETATTR3Request(FileHandle handle, SetAttr3 attr, boolean check,
+      NfsTime ctime) {
+    super(handle);
+    this.attr = attr;
+    this.check = check;
+    this.ctime = ctime;
   }
   
   public SetAttr3 getAttr() {
@@ -61,4 +72,14 @@ public class SETATTR3Request extends RequestWithHandle {
   public NfsTime getCtime() {
     return ctime;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    attr.serialize(xdr);
+    xdr.writeBoolean(check);
+    if (check) {
+      ctime.serialize(xdr);
+    }
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
index 6e74d1a..2880794 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.nfs.nfs3.request;
 
 import java.io.IOException;
 
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.oncrpc.XDR;
 
 /**
@@ -29,14 +30,23 @@ public class SYMLINK3Request extends RequestWithHandle {
   private final SetAttr3 symAttr;
   private final String symData;  // It contains the target
   
-  public SYMLINK3Request(XDR xdr) throws IOException {
-    super(xdr);
-    name = xdr.readString();
-    symAttr = new SetAttr3();
+  public static SYMLINK3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    String name = xdr.readString();
+    SetAttr3 symAttr = new SetAttr3();
     symAttr.deserialize(xdr);
-    symData = xdr.readString();
+    String symData = xdr.readString();
+    return new SYMLINK3Request(handle, name, symAttr, symData);
   }
 
+  public SYMLINK3Request(FileHandle handle, String name, SetAttr3 symAttr,
+      String symData) {
+    super(handle);
+    this.name = name;
+    this.symAttr = symAttr;
+    this.symData = symData;
+  }
+  
   public String getName() {
     return name;
   }
@@ -48,4 +58,14 @@ public class SYMLINK3Request extends RequestWithHandle {
   public String getSymData() {
     return symData;
   }
+
+  @Override
+  public void serialize(XDR xdr) {
+    handle.serialize(xdr);
+    xdr.writeInt(name.getBytes().length);
+    xdr.writeFixedOpaque(name.getBytes());
+    symAttr.serialize(xdr);
+    xdr.writeInt(symData.getBytes().length);
+    xdr.writeFixedOpaque(symData.getBytes());
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
index 373425f..e8e637c 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SetAttr3.java
@@ -52,6 +52,15 @@ public class SetAttr3 {
     size = 0;
     updateFields = EnumSet.noneOf(SetAttrField.class);
   }
+  
+  public SetAttr3(int mode, int uid, int gid, long size, NfsTime atime,
+      NfsTime mtime, EnumSet<SetAttrField> updateFields) {
+    this.mode = mode;
+    this.uid = uid;
+    this.gid = gid;
+    this.size = size;
+    this.updateFields = updateFields;
+  }
 
   public int getMode() {
     return mode;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
index 8a1ff8a..d85dcbb 100644
--- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
+++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/WRITE3Request.java
@@ -33,12 +33,13 @@ public class WRITE3Request extends RequestWithHandle {
   private final WriteStableHow stableHow;
   private final ByteBuffer data;
 
-  public WRITE3Request(XDR xdr) throws IOException {
-    super(xdr);
-    offset = xdr.readHyper();
-    count = xdr.readInt();
-    stableHow = WriteStableHow.fromValue(xdr.readInt());
-    data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
+  public static WRITE3Request deserialize(XDR xdr) throws IOException {
+    FileHandle handle = readHandle(xdr);
+    long offset = xdr.readHyper();
+    int count = xdr.readInt();
+    WriteStableHow stableHow = WriteStableHow.fromValue(xdr.readInt());
+    ByteBuffer data = ByteBuffer.wrap(xdr.readFixedOpaque(xdr.readInt()));
+    return new WRITE3Request(handle, offset, count, stableHow, data);
   }
 
   public WRITE3Request(FileHandle handle, final long offset, final int count,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
index 0c7aebe..33dc3a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
@@ -268,7 +268,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     GETATTR3Request request = null;
     try {
-      request = new GETATTR3Request(xdr);
+      request = GETATTR3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid GETATTR request");
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@@ -360,7 +360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     SETATTR3Request request = null;
     try {
-      request = new SETATTR3Request(xdr);
+      request = SETATTR3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid SETATTR request");
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@@ -445,7 +445,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     LOOKUP3Request request = null;
     try {
-      request = new LOOKUP3Request(xdr);
+      request = LOOKUP3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid LOOKUP request");
       return new LOOKUP3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -513,7 +513,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     ACCESS3Request request = null;
     try {
-      request = new ACCESS3Request(xdr);
+      request = ACCESS3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid ACCESS request");
       return new ACCESS3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -581,7 +581,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     READLINK3Request request = null;
 
     try {
-      request = new READLINK3Request(xdr);
+      request = READLINK3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid READLINK request");
       return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -655,7 +655,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     READ3Request request = null;
 
     try {
-      request = new READ3Request(xdr);
+      request = READ3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid READ request");
       return new READ3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -788,7 +788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     WRITE3Request request = null;
 
     try {
-      request = new WRITE3Request(xdr);
+      request = WRITE3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid WRITE request");
       return new WRITE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -870,7 +870,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     CREATE3Request request = null;
 
     try {
-      request = new CREATE3Request(xdr);
+      request = CREATE3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid CREATE request");
       return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1003,7 +1003,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     MKDIR3Request request = null;
 
     try {
-      request = new MKDIR3Request(xdr);
+      request = MKDIR3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid MKDIR request");
       return new MKDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1099,7 +1099,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     REMOVE3Request request = null;
     try {
-      request = new REMOVE3Request(xdr);
+      request = REMOVE3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid REMOVE request");
       return new REMOVE3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1179,7 +1179,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     RMDIR3Request request = null;
     try {
-      request = new RMDIR3Request(xdr);
+      request = RMDIR3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid RMDIR request");
       return new RMDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1264,7 +1264,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     RENAME3Request request = null;
     try {
-      request = new RENAME3Request(xdr);
+      request = RENAME3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid RENAME request");
       return new RENAME3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1360,7 +1360,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     SYMLINK3Request request = null;
     try {
-      request = new SYMLINK3Request(xdr);
+      request = SYMLINK3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid SYMLINK request");
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);
@@ -1453,7 +1453,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     READDIR3Request request = null;
     try {
-      request = new READDIR3Request(xdr);
+      request = READDIR3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid READDIR request");
       return new READDIR3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1611,7 +1611,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     READDIRPLUS3Request request = null;
     try {
-      request = new READDIRPLUS3Request(xdr);
+      request = READDIRPLUS3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid READDIRPLUS request");
       return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1788,7 +1788,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FSSTAT3Request request = null;
     try {
-      request = new FSSTAT3Request(xdr);
+      request = FSSTAT3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid FSSTAT request");
       return new FSSTAT3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1862,7 +1862,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     FSINFO3Request request = null;
     try {
-      request = new FSINFO3Request(xdr);
+      request = FSINFO3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid FSINFO request");
       return new FSINFO3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1926,7 +1926,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     PATHCONF3Request request = null;
     try {
-      request = new PATHCONF3Request(xdr);
+      request = PATHCONF3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid PATHCONF request");
       return new PATHCONF3Response(Nfs3Status.NFS3ERR_INVAL);
@@ -1977,7 +1977,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
 
     COMMIT3Request request = null;
     try {
-      request = new COMMIT3Request(xdr);
+      request = COMMIT3Request.deserialize(xdr);
     } catch (IOException e) {
       LOG.error("Invalid COMMIT request");
       response.setStatus(Nfs3Status.NFS3ERR_INVAL);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
index 3fc0d99..05b976d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
@@ -17,59 +17,78 @@
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
-import org.jboss.netty.channel.Channel;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.mockito.Mockito;
+import java.util.EnumSet;
 
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3Status;
+import org.apache.hadoop.nfs.nfs3.request.ACCESS3Request;
+import org.apache.hadoop.nfs.nfs3.request.COMMIT3Request;
+import org.apache.hadoop.nfs.nfs3.request.CREATE3Request;
+import org.apache.hadoop.nfs.nfs3.request.FSINFO3Request;
+import org.apache.hadoop.nfs.nfs3.request.FSSTAT3Request;
+import org.apache.hadoop.nfs.nfs3.request.GETATTR3Request;
 import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
+import org.apache.hadoop.nfs.nfs3.request.MKDIR3Request;
+import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request;
 import org.apache.hadoop.nfs.nfs3.request.READ3Request;
+import org.apache.hadoop.nfs.nfs3.request.READDIR3Request;
+import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request;
+import org.apache.hadoop.nfs.nfs3.request.READLINK3Request;
+import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request;
+import org.apache.hadoop.nfs.nfs3.request.RENAME3Request;
+import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request;
+import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request;
+import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField;
 import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
 import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
 import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
 import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
-import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
 import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
+import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
 import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
 import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
+import org.apache.hadoop.nfs.nfs3.response.MKDIR3Response;
 import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
 import org.apache.hadoop.nfs.nfs3.response.READ3Response;
-import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
-import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
-import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
 import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
 import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
 import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
+import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
+import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
+import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
 import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
 import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
 import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
-import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
 import org.apache.hadoop.oncrpc.XDR;
 import org.apache.hadoop.oncrpc.security.SecurityHandler;
 import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
+import org.jboss.netty.channel.Channel;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
 
 
 /**
@@ -143,8 +162,9 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-
+    GETATTR3Request req = new GETATTR3Request(handle);
+    req.serialize(xdr_req);
+    
     // Attempt by an unpriviledged user should fail.
     GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
@@ -165,13 +185,12 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("bar");
-    SetAttr3 symAttr = new SetAttr3();
-    symAttr.serialize(xdr_req);
-    xdr_req.writeBoolean(false);
+    SetAttr3 symAttr = new SetAttr3(0, 1, 0, 0, null, null,
+        EnumSet.of(SetAttrField.UID));
+    SETATTR3Request req = new SETATTR3Request(handle, symAttr, false, null);
+    req.serialize(xdr_req);
 
-    // Attempt by an unpriviledged user should fail.
+    // Attempt by an unprivileged user should fail.
     SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
@@ -214,7 +233,8 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
+    ACCESS3Request req = new ACCESS3Request(handle);
+    req.serialize(xdr_req);
 
     // Attempt by an unpriviledged user should fail.
     ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
@@ -237,12 +257,10 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("fubar");
-    SetAttr3 symAttr = new SetAttr3();
-    symAttr.serialize(xdr_req);
-    xdr_req.writeString("bar");
-
+    SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
+        "bar");
+    req.serialize(xdr_req);
+    
     SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@@ -251,7 +269,8 @@ public class TestRpcProgramNfs3 {
     // Now perform readlink operations.
     FileHandle handle2 = response.getObjFileHandle();
     XDR xdr_req2 = new XDR();
-    handle2.serialize(xdr_req2);
+    READLINK3Request req2 = new READLINK3Request(handle2);
+    req2.serialize(xdr_req2);
 
     // Attempt by an unpriviledged user should fail.
     READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
@@ -327,12 +346,10 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("fubar");
-    xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
-    SetAttr3 symAttr = new SetAttr3();
-    symAttr.serialize(xdr_req);
-
+    CREATE3Request req = new CREATE3Request(handle, "fubar",
+        Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
+    req.serialize(xdr_req);
+    
     // Attempt by an unpriviledged user should fail.
     CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
@@ -348,26 +365,27 @@ public class TestRpcProgramNfs3 {
   }
 
   @Test(timeout = 60000)
-  public void testMkdir() throws Exception {
+  public void testMkdir() throws Exception {//FixME
     HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("fubar");
-    SetAttr3 symAttr = new SetAttr3();
-    symAttr.serialize(xdr_req);
-    xdr_req.writeString("bar");
-
-    // Attempt to remove by an unpriviledged user should fail.
-    SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+    MKDIR3Request req = new MKDIR3Request(handle, "fubar1", new SetAttr3());
+    req.serialize(xdr_req);
+    
+    // Attempt to mkdir by an unprivileged user should fail.
+    MKDIR3Response response1 = nfsd.mkdir(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
         response1.getStatus());
 
-    // Attempt to remove by a priviledged user should pass.
-    SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+    XDR xdr_req2 = new XDR();
+    MKDIR3Request req2 = new MKDIR3Request(handle, "fubar2", new SetAttr3());
+    req2.serialize(xdr_req2);
+    
+    // Attempt to mkdir by a privileged user should pass.
+    MKDIR3Response response2 = nfsd.mkdir(xdr_req2.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
         response2.getStatus());
@@ -379,20 +397,18 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("fubar");
-    SetAttr3 symAttr = new SetAttr3();
-    symAttr.serialize(xdr_req);
-    xdr_req.writeString("bar");
+    SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(),
+        "bar");
+    req.serialize(xdr_req);
 
-    // Attempt by an unpriviledged user should fail.
+    // Attempt by an unprivileged user should fail.
     SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
         response1.getStatus());
 
-    // Attempt by a priviledged user should pass.
+    // Attempt by a privileged user should pass.
     SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@@ -405,8 +421,8 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("bar");
+    REMOVE3Request req = new REMOVE3Request(handle, "bar");
+    req.serialize(xdr_req);
 
     // Attempt by an unpriviledged user should fail.
     REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
@@ -428,17 +444,17 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("foo");
+    RMDIR3Request req = new RMDIR3Request(handle, "foo");
+    req.serialize(xdr_req);
 
-    // Attempt by an unpriviledged user should fail.
+    // Attempt by an unprivileged user should fail.
     RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
         response1.getStatus());
 
-    // Attempt by a priviledged user should pass.
+    // Attempt by a privileged user should pass.
     RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@@ -451,19 +467,17 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     XDR xdr_req = new XDR();
     FileHandle handle = new FileHandle(dirId);
-    handle.serialize(xdr_req);
-    xdr_req.writeString("bar");
-    handle.serialize(xdr_req);
-    xdr_req.writeString("fubar");
-
-    // Attempt by an unpriviledged user should fail.
+    RENAME3Request req = new RENAME3Request(handle, "bar", handle, "fubar");
+    req.serialize(xdr_req);
+    
+    // Attempt by an unprivileged user should fail.
     RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
         response1.getStatus());
 
-    // Attempt by a priviledged user should pass.
+    // Attempt by a privileged user should pass.
     RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@@ -476,10 +490,8 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(0);
-    xdr_req.writeLongAsHyper(0);
-    xdr_req.writeInt(100);
+    READDIR3Request req = new READDIR3Request(handle, 0, 0, 100);
+    req.serialize(xdr_req);
 
     // Attempt by an unpriviledged user should fail.
     READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
@@ -501,20 +513,17 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(0);
-    xdr_req.writeLongAsHyper(0);
-    xdr_req.writeInt(3);
-    xdr_req.writeInt(2);
-
-    // Attempt by an unpriviledged user should fail.
+    READDIRPLUS3Request req = new READDIRPLUS3Request(handle, 0, 0, 3, 2);
+    req.serialize(xdr_req);
+    
+    // Attempt by an unprivileged user should fail.
     READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
         new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
         response1.getStatus());
 
-    // Attempt by a priviledged user should pass.
+    // Attempt by a privileged user should pass.
     READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
         securityHandler, new InetSocketAddress("localhost", 1234));
     assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
@@ -527,8 +536,9 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-
+    FSSTAT3Request req = new FSSTAT3Request(handle);
+    req.serialize(xdr_req);
+    
     // Attempt by an unpriviledged user should fail.
     FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
@@ -549,8 +559,9 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-
+    FSINFO3Request req = new FSINFO3Request(handle);
+    req.serialize(xdr_req);
+    
     // Attempt by an unpriviledged user should fail.
     FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
@@ -571,8 +582,9 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-
+    PATHCONF3Request req = new PATHCONF3Request(handle);
+    req.serialize(xdr_req);
+    
     // Attempt by an unpriviledged user should fail.
     PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
         securityHandlerUnpriviledged,
@@ -593,9 +605,8 @@ public class TestRpcProgramNfs3 {
     long dirId = status.getFileId();
     FileHandle handle = new FileHandle(dirId);
     XDR xdr_req = new XDR();
-    handle.serialize(xdr_req);
-    xdr_req.writeLongAsHyper(0);
-    xdr_req.writeInt(5);
+    COMMIT3Request req = new COMMIT3Request(handle, 0, 5);
+    req.serialize(xdr_req);
 
     Channel ch = Mockito.mock(Channel.class);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cd9182d8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 63c434d..8dd3ebe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -645,6 +645,8 @@ Release 2.6.0 - UNRELEASED
     HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
     (Juan Yu and jing9 via jing9)
 
+    HDFS-6892. Add XDR packaging method for each NFS request (brandonli)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[08/22] git commit: HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail intermittently with various symptoms - debugging patch (Contributed by Yongjun Zhang)

Posted by ar...@apache.org.
HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail intermittently with various symptoms - debugging patch (Contributed by Yongjun Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5d9a4a9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5d9a4a9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5d9a4a9

Branch: refs/heads/HDFS-6581
Commit: c5d9a4a91e4e0faae3a8530408da35b591396060
Parents: 812bd0c
Author: arp <ar...@apache.org>
Authored: Wed Aug 27 09:52:33 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Wed Aug 27 09:52:33 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  4 +++
 .../namenode/ha/TestPipelinesFailover.java      | 28 ++++++++++++++++++++
 2 files changed, 32 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5d9a4a9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4e60c46..fb3906a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -348,6 +348,10 @@ Trunk (Unreleased)
 
     HDFS-6905. fs-encryption merge triggered release audit failures. (clamb via tucu)
 
+    HDFS-6694. TestPipelinesFailover.testPipelineRecoveryStress tests fail
+    intermittently with various symptoms - debugging patch. (Yongjun Zhang via
+    Arpit Agarwal)
+
 Release 2.6.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5d9a4a9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
index bba3dbb..08c6525 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestPipelinesFailover.java
@@ -58,6 +58,7 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.MultithreadedTestUtil.RepeatingTestThread;
 import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -420,6 +421,33 @@ public class TestPipelinesFailover {
    */
   @Test(timeout=STRESS_RUNTIME*3)
   public void testPipelineRecoveryStress() throws Exception {
+
+    // The following section of code is to help debug HDFS-6694 about
+    // this test that fails from time to time due to "too many open files".
+    //
+    String[] scmd = new String[] {"/bin/sh", "-c", "ulimit -a"};
+    ShellCommandExecutor sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+
+    System.out.println("HDFS-6694 Debug Data BEGIN===");
+    System.out.println("'ulimit -a' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"hostname"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'hostname' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"ifconfig"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'ifconfig' output:\n" + sce.getOutput());
+
+    scmd = new String[] {"whoami"};
+    sce = new ShellCommandExecutor(scmd);
+    sce.execute();
+    System.out.println("'whoami' output:\n" + sce.getOutput());
+    System.out.println("===HDFS-6694 Debug Data END");
+
     HAStressTestHarness harness = new HAStressTestHarness();
     // Disable permissions so that another user can recover the lease.
     harness.conf.setBoolean(


[16/22] git commit: HDFS-6902. FileWriter should be closed in finally block in BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)

Posted by ar...@apache.org.
HDFS-6902. FileWriter should be closed in finally block in BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b6b95ff6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b6b95ff6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b6b95ff6

Branch: refs/heads/HDFS-6581
Commit: b6b95ff66700e4db1d8d59a31c3048cb10504262
Parents: 225569e
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Aug 27 13:49:31 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Aug 27 13:49:31 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt                    | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java  | 6 ++++--
 2 files changed, 7 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6b95ff6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f3ecf07..d5797e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -655,6 +655,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-6938. Cleanup javac warnings in FSNamesystem (Charles Lamb via wheat9)
 
+    HDFS-6902. FileWriter should be closed in finally block in
+    BlockReceiver#receiveBlock() (Tsuyoshi OZAWA via Colin Patrick McCabe)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b6b95ff6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index afa8bbb..bfb2233 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -825,15 +825,17 @@ class BlockReceiver implements Closeable {
               LOG.warn("Failed to delete restart meta file: " +
                   restartMeta.getPath());
             }
+            FileWriter out = null;
             try {
-              FileWriter out = new FileWriter(restartMeta);
+              out = new FileWriter(restartMeta);
               // write out the current time.
               out.write(Long.toString(Time.now() + restartBudget));
               out.flush();
-              out.close();
             } catch (IOException ioe) {
               // The worst case is not recovering this RBW replica. 
               // Client will fall back to regular pipeline recovery.
+            } finally {
+              IOUtils.cleanup(LOG, out);
             }
             try {              
               // Even if the connection is closed after the ack packet is


[19/22] git commit: HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)

Posted by ar...@apache.org.
HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d125367
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d125367
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d125367

Branch: refs/heads/HDFS-6581
Commit: 6d125367ce86a996ca9d6935d2d0f322f8be5767
Parents: 3754957
Author: arp <ar...@apache.org>
Authored: Wed Aug 27 08:52:55 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Wed Aug 27 15:23:01 2014 -0700

----------------------------------------------------------------------
 .../java/org/apache/hadoop/fs/CreateFlag.java   | 16 ++++++++++++++-
 .../java/org/apache/hadoop/fs/FileStatus.java   | 21 ++++++++++++++++++++
 .../apache/hadoop/fs/RawLocalFileSystem.java    |  3 ++-
 .../hadoop/fs/http/client/HttpFSFileSystem.java |  8 +++++---
 .../hadoop/fs/http/server/FSOperations.java     |  1 +
 .../hadoop-hdfs/CHANGES-HDFS-6581.txt           |  4 ++++
 .../hadoop/hdfs/protocol/HdfsFileStatus.java    | 18 +++++++++++++----
 .../hdfs/protocol/HdfsLocatedFileStatus.java    | 14 ++++++-------
 .../protocol/SnapshottableDirectoryStatus.java  |  2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  9 +++++++++
 .../hdfs/server/namenode/FSDirectory.java       |  5 +++--
 .../hdfs/server/namenode/FSNamesystem.java      | 14 +++++++------
 .../org/apache/hadoop/hdfs/web/JsonUtil.java    |  7 ++++---
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto       |  1 +
 .../hadoop/hdfs/TestDFSClientRetries.java       |  4 ++--
 .../java/org/apache/hadoop/hdfs/TestLease.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/TestFsck.java   |  4 ++--
 .../apache/hadoop/hdfs/web/TestJsonUtil.java    |  2 +-
 19 files changed, 103 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
index 252f37b..c5d23b4 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
@@ -79,7 +79,21 @@ public enum CreateFlag {
   /**
    * Force closed blocks to disk. Similar to POSIX O_SYNC. See javadoc for description.
    */
-  SYNC_BLOCK((short) 0x08);
+  SYNC_BLOCK((short) 0x08),
+
+  /**
+   * Create the block on transient storage (RAM) if available. If
+   * transient storage is unavailable then the block will be created
+   * on disk.
+   *
+   * HDFS will make a best effort to lazily write these files to persistent
+   * storage, however file contents may be lost at any time due to process/
+   * node restarts, hence there is no guarantee of data durability.
+   *
+   * This flag must only be used for intermediate data whose loss can be
+   * tolerated by the application.
+   */
+  LAZY_PERSIST((short) 0x10);
 
   private final short mode;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index b261f7f..92b4e38 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -38,6 +38,7 @@ public class FileStatus implements Writable, Comparable {
   private boolean isdir;
   private short block_replication;
   private long blocksize;
+  private boolean isLazyPersist;
   private long modification_time;
   private long access_time;
   private FsPermission permission;
@@ -73,6 +74,18 @@ public class FileStatus implements Writable, Comparable {
                     FsPermission permission, String owner, String group, 
                     Path symlink,
                     Path path) {
+    this(length, isdir, block_replication, blocksize, false,
+        modification_time, access_time, permission, owner, group,
+        symlink, path);
+  }
+
+  public FileStatus(long length, boolean isdir,
+                    int block_replication,
+                    long blocksize, boolean isLazyPersist,
+                    long modification_time, long access_time,
+                    FsPermission permission, String owner, String group,
+                    Path symlink,
+                    Path path) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
@@ -92,6 +105,7 @@ public class FileStatus implements Writable, Comparable {
     this.group = (group == null) ? "" : group;
     this.symlink = symlink;
     this.path = path;
+    this.isLazyPersist = isLazyPersist;
     // The variables isdir and symlink indicate the type:
     // 1. isdir implies directory, in which case symlink must be null.
     // 2. !isdir implies a file or symlink, symlink != null implies a
@@ -168,6 +182,13 @@ public class FileStatus implements Writable, Comparable {
   }
 
   /**
+   * Get whether the file is lazyPersist.
+   */
+  public boolean isLazyPersist() {
+    return isLazyPersist;
+  }
+
+  /**
    * Get the replication factor of a file.
    * @return the replication factor of a file.
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index a06e3a6..c3852df 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -762,6 +762,7 @@ public class RawLocalFileSystem extends FileSystem {
           false,
           fs.getReplication(),
           fs.getBlockSize(),
+          fs.isLazyPersist(),
           fs.getModificationTime(),
           fs.getAccessTime(),
           fs.getPermission(),
@@ -777,7 +778,7 @@ public class RawLocalFileSystem extends FileSystem {
        * when available.
        */
       if (!target.isEmpty()) {
-        return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(),
+        return new FileStatus(0, false, 0, 0, false, 0, 0, FsPermission.getDefault(),
             "", "", new Path(target), f);
       }
       // f refers to a file or directory that does not exist

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 3749bc3..f7d2e90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -160,6 +160,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String XATTR_NAME_JSON = "name";
   public static final String XATTR_VALUE_JSON = "value";
   public static final String XATTRNAMES_JSON = "XAttrNames";
+  public static final String LAZY_PERSIST_JSON = "LazyPersist";
 
   public static final String FILE_CHECKSUM_JSON = "FileChecksum";
   public static final String CHECKSUM_ALGORITHM_JSON = "algorithm";
@@ -954,19 +955,20 @@ public class HttpFSFileSystem extends FileSystem
     long mTime = (Long) json.get(MODIFICATION_TIME_JSON);
     long blockSize = (Long) json.get(BLOCK_SIZE_JSON);
     short replication = ((Long) json.get(REPLICATION_JSON)).shortValue();
+    boolean isLazyPersist = ((Boolean) json.get(LAZY_PERSIST_JSON)).booleanValue();
     FileStatus fileStatus = null;
 
     switch (type) {
       case FILE:
       case DIRECTORY:
         fileStatus = new FileStatus(len, (type == FILE_TYPE.DIRECTORY),
-                                    replication, blockSize, mTime, aTime,
-                                    permission, owner, group, path);
+                                    replication, blockSize, false, mTime, aTime,
+                                    permission, owner, group, null, path);
         break;
       case SYMLINK:
         Path symLink = null;
         fileStatus = new FileStatus(len, false,
-                                    replication, blockSize, mTime, aTime,
+                                    replication, blockSize, isLazyPersist, mTime, aTime,
                                     permission, owner, group, symLink,
                                     path);
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index e7d92f5..bcc0476 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -125,6 +125,7 @@ public class FSOperations {
               fileStatus.getModificationTime());
       json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
       json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
+      json.put(HttpFSFileSystem.LAZY_PERSIST_JSON, fileStatus.isLazyPersist());
       if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
         json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
new file mode 100644
index 0000000..706c03a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HDFS-6581 SUBTASKS AND RELATED JIRAS
+
+    HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
index 3d05639..49c5cde 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
@@ -39,6 +39,7 @@ public class HdfsFileStatus {
   private final boolean isdir;
   private final short block_replication;
   private final long blocksize;
+  private final boolean isLazyPersist;
   private final long modification_time;
   private final long access_time;
   private final FsPermission permission;
@@ -69,13 +70,15 @@ public class HdfsFileStatus {
    * @param feInfo the file's encryption info
    */
   public HdfsFileStatus(long length, boolean isdir, int block_replication,
-      long blocksize, long modification_time, long access_time,
-      FsPermission permission, String owner, String group, byte[] symlink,
-    byte[] path, long fileId, int childrenNum, FileEncryptionInfo feInfo) {
+      long blocksize, boolean isLazyPersist, long modification_time,
+      long access_time, FsPermission permission, String owner,
+      String group, byte[] symlink, byte[] path, long fileId,
+      int childrenNum, FileEncryptionInfo feInfo) {
     this.length = length;
     this.isdir = isdir;
     this.block_replication = (short)block_replication;
     this.blocksize = blocksize;
+    this.isLazyPersist = isLazyPersist;
     this.modification_time = modification_time;
     this.access_time = access_time;
     this.permission = (permission == null) ? 
@@ -125,6 +128,13 @@ public class HdfsFileStatus {
   }
 
   /**
+   * @return true if the file is lazyPersist.
+   */
+  final public boolean isLazyPersist() {
+    return isLazyPersist;
+  }
+
+  /**
    * Get the replication factor of a file.
    * @return the replication factor of a file.
    */
@@ -253,7 +263,7 @@ public class HdfsFileStatus {
 
   public final FileStatus makeQualified(URI defaultUri, Path path) {
     return new FileStatus(getLen(), isDir(), getReplication(),
-        getBlockSize(), getModificationTime(),
+        getBlockSize(), isLazyPersist(), getModificationTime(),
         getAccessTime(),
         getPermission(), getOwner(), getGroup(),
         isSymlink() ? new Path(getSymlink()) : null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
index a78b8bc..f70df34 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
@@ -55,13 +55,13 @@ public class HdfsLocatedFileStatus extends HdfsFileStatus {
    * @param feInfo file encryption info
    */
   public HdfsLocatedFileStatus(long length, boolean isdir,
-      int block_replication, long blocksize, long modification_time,
-      long access_time, FsPermission permission, String owner, String group,
-      byte[] symlink, byte[] path, long fileId, LocatedBlocks locations,
-    int childrenNum, FileEncryptionInfo feInfo) {
-    super(length, isdir, block_replication, blocksize, modification_time,
-      access_time, permission, owner, group, symlink, path, fileId,
-      childrenNum, feInfo);
+      int block_replication, long blocksize, boolean isLazyPersist,
+      long modification_time, long access_time, FsPermission permission,
+      String owner, String group, byte[] symlink, byte[] path, long fileId,
+      LocatedBlocks locations, int childrenNum, FileEncryptionInfo feInfo) {
+    super(length, isdir, block_replication, blocksize, isLazyPersist,
+        modification_time, access_time, permission, owner, group, symlink,
+        path, fileId, childrenNum, feInfo);
     this.locations = locations;
   }
 	

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
index d395283..d0cebfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshottableDirectoryStatus.java
@@ -59,7 +59,7 @@ public class SnapshottableDirectoryStatus {
       FsPermission permission, String owner, String group, byte[] localName,
       long inodeId, int childrenNum,
       int snapshotNumber, int snapshotQuota, byte[] parentFullPath) {
-    this.dirStatus = new HdfsFileStatus(0, true, 0, 0, modification_time,
+    this.dirStatus = new HdfsFileStatus(0, true, 0, 0, false, modification_time,
         access_time, permission, owner, group, null, localName, inodeId,
         childrenNum, null);
     this.snapshotNumber = snapshotNumber;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 4dcac39..5efede7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -1277,6 +1277,9 @@ public class PBHelper {
     if (flag.contains(CreateFlag.OVERWRITE)) {
       value |= CreateFlagProto.OVERWRITE.getNumber();
     }
+    if (flag.contains(CreateFlag.LAZY_PERSIST)) {
+      value |= CreateFlagProto.LAZY_PERSIST.getNumber();
+    }
     return value;
   }
   
@@ -1293,6 +1296,10 @@ public class PBHelper {
         == CreateFlagProto.OVERWRITE_VALUE) {
       result.add(CreateFlag.OVERWRITE);
     }
+    if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
+        == CreateFlagProto.LAZY_PERSIST_VALUE) {
+      result.add(CreateFlag.LAZY_PERSIST);
+    }
     return new EnumSetWritable<CreateFlag>(result);
   }
 
@@ -1318,6 +1325,7 @@ public class PBHelper {
     return new HdfsLocatedFileStatus(
         fs.getLength(), fs.getFileType().equals(FileType.IS_DIR), 
         fs.getBlockReplication(), fs.getBlocksize(),
+        fs.hasIsLazyPersist() ? fs.getIsLazyPersist() : false,
         fs.getModificationTime(), fs.getAccessTime(),
         PBHelper.convert(fs.getPermission()), fs.getOwner(), fs.getGroup(), 
         fs.getFileType().equals(FileType.IS_SYMLINK) ? 
@@ -1366,6 +1374,7 @@ public class PBHelper {
       setFileType(fType).
       setBlockReplication(fs.getReplication()).
       setBlocksize(fs.getBlockSize()).
+      setIsLazyPersist(fs.isLazyPersist()).
       setModificationTime(fs.getModificationTime()).
       setAccessTime(fs.getAccessTime()).
       setPermission(PBHelper.convert(fs.getPermission())).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 54e3181..d03a4e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -1438,7 +1438,7 @@ public class FSDirectory implements Closeable {
   private HdfsFileStatus getFileInfo4DotSnapshot(String src)
       throws UnresolvedLinkException {
     if (getINode4DotSnapshot(src) != null) {
-      return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
+      return new HdfsFileStatus(0, true, 0, 0, false, 0, 0, null, null, null, null,
           HdfsFileStatus.EMPTY_NAME, -1L, 0, null);
     }
     return null;
@@ -2300,6 +2300,7 @@ public class FSDirectory implements Closeable {
         node.isDirectory(), 
         replication, 
         blocksize,
+        false,
         node.getModificationTime(snapshot),
         node.getAccessTime(snapshot),
         getPermissionForFileStatus(node, snapshot),
@@ -2347,7 +2348,7 @@ public class FSDirectory implements Closeable {
 
     HdfsLocatedFileStatus status =
         new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
-          blocksize, node.getModificationTime(snapshot),
+          blocksize, false, node.getModificationTime(snapshot),
           node.getAccessTime(snapshot),
           getPermissionForFileStatus(node, snapshot),
           node.getUserName(snapshot), node.getGroupName(snapshot),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6d750bc..6f1f969 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -358,7 +358,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       Path symlink = stat.isSymlink() ? new Path(stat.getSymlink()) : null;
       Path path = dst != null ? new Path(dst) : new Path(src);
       status = new FileStatus(stat.getLen(), stat.isDir(),
-          stat.getReplication(), stat.getBlockSize(), stat.getModificationTime(),
+          stat.getReplication(), stat.getBlockSize(), stat.isLazyPersist(),
+          stat.getModificationTime(),
           stat.getAccessTime(), stat.getPermission(), stat.getOwner(),
           stat.getGroup(), symlink, path);
     }
@@ -2435,6 +2436,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     boolean create = flag.contains(CreateFlag.CREATE);
     boolean overwrite = flag.contains(CreateFlag.OVERWRITE);
+    boolean isLazyPersist = flag.contains(CreateFlag.LAZY_PERSIST);
 
     waitForLoadingFSImage();
 
@@ -2497,8 +2499,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           checkNameNodeSafeMode("Cannot create file" + src);
           src = resolvePath(src, pathComponents);
           startFileInternal(pc, src, permissions, holder, clientMachine, create,
-              overwrite, createParent, replication, blockSize, suite, edek,
-              logRetryCache);
+              overwrite, createParent, replication, blockSize, isLazyPersist,
+              suite, edek, logRetryCache);
           stat = dir.getFileInfo(src, false,
               FSDirectory.isReservedRawName(srcArg));
         } catch (StandbyException se) {
@@ -2538,8 +2540,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   private void startFileInternal(FSPermissionChecker pc, String src,
       PermissionStatus permissions, String holder, String clientMachine,
       boolean create, boolean overwrite, boolean createParent,
-      short replication, long blockSize, CipherSuite suite,
-      EncryptedKeyVersion edek, boolean logRetryEntry)
+      short replication, long blockSize, boolean isLazyPersist,
+      CipherSuite suite, EncryptedKeyVersion edek, boolean logRetryEntry)
       throws FileAlreadyExistsException, AccessControlException,
       UnresolvedLinkException, FileNotFoundException,
       ParentNotDirectoryException, RetryStartFileException, IOException {
@@ -2614,7 +2616,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       if (parent != null && mkdirsRecursively(parent.toString(),
               permissions, true, now())) {
         newNode = dir.addFile(src, permissions, replication, blockSize,
-                holder, clientMachine);
+                              holder, clientMachine);
       }
 
       if (newNode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 321630c..98e49e4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -244,6 +244,8 @@ public class JsonUtil {
     final long aTime = (Long) m.get("accessTime");
     final long mTime = (Long) m.get("modificationTime");
     final long blockSize = (Long) m.get("blockSize");
+    final boolean isLazyPersist = m.containsKey("lazyPersist")
+        ? (Boolean) m.get("lazyPersist") : false;
     final short replication = (short) (long) (Long) m.get("replication");
     final long fileId = m.containsKey("fileId") ? (Long) m.get("fileId")
         : INodeId.GRANDFATHER_INODE_ID;
@@ -251,9 +253,8 @@ public class JsonUtil {
     final int childrenNum = (childrenNumLong == null) ? -1
             : childrenNumLong.intValue();
     return new HdfsFileStatus(len, type == PathType.DIRECTORY, replication,
-        blockSize, mTime, aTime, permission, owner, group,
-        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum,
-        null);
+        blockSize, isLazyPersist, mTime, aTime, permission, owner, group,
+        symlink, DFSUtil.string2Bytes(localName), fileId, childrenNum, null);
   }
 
   /** Convert an ExtendedBlock to a Json map. */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
index edffc9a..1f30e71 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
@@ -64,6 +64,7 @@ enum CreateFlagProto {
   CREATE = 0x01;    // Create a file
   OVERWRITE = 0x02; // Truncate/overwrite a file. Same as POSIX O_TRUNC
   APPEND = 0x04;    // Append to a file
+  LAZY_PERSIST = 0x10; // File with reduced durability guarantees.
 }
 
 message CreateRequestProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
index a410224..cbb51f9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
@@ -263,6 +263,7 @@ message HdfsFileStatusProto {
 
   // Optional field for file encryption
   optional FileEncryptionInfoProto fileEncryptionInfo = 15;
+  optional bool isLazyPersist = 16 [default = false];
 } 
 
 /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 74daccc..f0ffb2b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -253,12 +253,12 @@ public class TestDFSClientRetries {
                          anyLong(), any(String[].class))).thenAnswer(answer);
     
     Mockito.doReturn(
-            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
                 1010, 0, null)).when(mockNN).getFileInfo(anyString());
     
     Mockito.doReturn(
-            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
                 1010, 0, null))
         .when(mockNN)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
index 28c253f..f3c0911 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
@@ -340,12 +340,12 @@ public class TestLease {
     }
 
     Mockito.doReturn(
-        new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+        new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission(
             (short) 777), "owner", "group", new byte[0], new byte[0],
             1010, 0, null)).when(mcp).getFileInfo(anyString());
     Mockito
         .doReturn(
-            new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
+            new HdfsFileStatus(0, false, 1, 1024, false, 0, 0, new FsPermission(
                 (short) 777), "owner", "group", new byte[0], new byte[0],
                 1010, 0, null))
         .when(mcp)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 4cddd60..380f6ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -1017,8 +1017,8 @@ public class TestFsck {
     int numChildren = 1;
 
     HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
-        blockSize, modTime, accessTime, perms, owner, group, symlink, path,
-        fileId, numChildren, null);
+        blockSize, false, modTime, accessTime, perms, owner, group, symlink,
+        path, fileId, numChildren, null);
     Result res = new Result(conf);
 
     try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d125367/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index b8150f7..ec82c48 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -62,7 +62,7 @@ public class TestJsonUtil {
     final long now = Time.now();
     final String parent = "/dir";
     final HdfsFileStatus status = new HdfsFileStatus(1001L, false, 3, 1L << 26,
-        now, now + 10, new FsPermission((short) 0644), "user", "group",
+        false, now, now + 10, new FsPermission((short) 0644), "user", "group",
         DFSUtil.string2Bytes("bar"), DFSUtil.string2Bytes("foo"),
         INodeId.GRANDFATHER_INODE_ID, 0, null);
     final FileStatus fstatus = toFileStatus(status, parent);


[06/22] git commit: HADOOP-10996. Stop violence in the *_HOME (aw)

Posted by ar...@apache.org.
HADOOP-10996. Stop violence in the *_HOME (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ec4a930
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ec4a930
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ec4a930

Branch: refs/heads/HDFS-6581
Commit: 9ec4a930f57ab17b969ab656a7d5b0c7364b1354
Parents: a1618a2
Author: Allen Wittenauer <aw...@apache.org>
Authored: Wed Aug 27 07:00:31 2014 -0700
Committer: Allen Wittenauer <aw...@apache.org>
Committed: Wed Aug 27 07:00:31 2014 -0700

----------------------------------------------------------------------
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 .../hadoop-common/src/main/bin/hadoop-config.sh | 10 ++++++++--
 .../src/main/bin/hadoop-functions.sh            | 12 +++++-------
 .../hadoop-hdfs/src/main/bin/hdfs-config.sh     | 15 ++++++++-------
 hadoop-mapreduce-project/bin/mapred-config.sh   | 20 +++++++++++---------
 .../hadoop-yarn/bin/yarn-config.sh              | 16 ++++++++--------
 6 files changed, 42 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2270df3..45e38d3 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -321,6 +321,8 @@ Trunk (Unreleased)
 
     HADOOP-11002. shell escapes are incompatible with previous releases (aw)
 
+    HADOOP-10996. Stop violence in the *_HOME (aw)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
index b2fc4d3..0cf8bcf 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
@@ -53,7 +53,10 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
 fi
 
 # get our functions defined for usage later
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] && 
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-functions.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh" ]]; then
   . "${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh"
 else
   echo "ERROR: Unable to exec ${HADOOP_LIBEXEC_DIR}/hadoop-functions.sh." 1>&2
@@ -61,7 +64,10 @@ else
 fi
 
 # allow overrides of the above and pre-defines of the below
-if [[ -f "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh" ]]; then
+  . "${HADOOP_COMMON_HOME}/libexec/hadoop-layout.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh" ]]; then
   . "${HADOOP_LIBEXEC_DIR}/hadoop-layout.sh"
 fi
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
index ab61b84..800e024 100644
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-functions.sh
@@ -68,17 +68,18 @@ function hadoop_find_confdir
 {
   # NOTE: This function is not user replaceable.
 
+  local conf_dir
   # Look for the basic hadoop configuration area.
   #
   #
   # An attempt at compatibility with some Hadoop 1.x
   # installs.
   if [[ -e "${HADOOP_PREFIX}/conf/hadoop-env.sh" ]]; then
-    DEFAULT_CONF_DIR="conf"
+    conf_dir="conf"
   else
-    DEFAULT_CONF_DIR="etc/hadoop"
+    conf_dir="etc/hadoop"
   fi
-  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${DEFAULT_CONF_DIR}}"
+  export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-${HADOOP_PREFIX}/${conf_dir}}"
 }
 
 function hadoop_exec_hadoopenv
@@ -573,10 +574,7 @@ function hadoop_finalize_hadoop_opts
 
 function hadoop_finalize_classpath
 {
-  
-  # we want the HADOOP_CONF_DIR at the end
-  # according to oom, it gives a 2% perf boost
-  hadoop_add_classpath "${HADOOP_CONF_DIR}" after
+  hadoop_add_classpath "${HADOOP_CONF_DIR}" before
   
   # user classpath gets added at the last minute. this allows
   # override of CONF dirs and more

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
index fb460d9..6824028 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs-config.sh
@@ -20,7 +20,7 @@
 
 function hadoop_subproject_init
 {
-  if [ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]; then
+  if [[ -e "${HADOOP_CONF_DIR}/hdfs-env.sh" ]]; then
     . "${HADOOP_CONF_DIR}/hdfs-env.sh"
   fi
   
@@ -49,7 +49,7 @@ function hadoop_subproject_init
   HADOOP_ROOT_LOGGER=${HADOOP_HDFS_ROOT_LOGGER:-$HADOOP_ROOT_LOGGER}
   HADOOP_HDFS_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   
-  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_HOME_DIR}"
+  HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$HADOOP_PREFIX}"
   
   HADOOP_IDENT_STRING="${HADOOP_HDFS_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_HDFS_IDENT_STRING="${HADOOP_IDENT_STRING}"
@@ -71,12 +71,13 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_hd_this}")" >/dev/null && pwd -P)
 fi
 
-if [ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
   echo "ERROR: Hadoop common not found." 2>&1
   exit 1

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-mapreduce-project/bin/mapred-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/bin/mapred-config.sh b/hadoop-mapreduce-project/bin/mapred-config.sh
index c24d350..c2681ac 100644
--- a/hadoop-mapreduce-project/bin/mapred-config.sh
+++ b/hadoop-mapreduce-project/bin/mapred-config.sh
@@ -20,7 +20,7 @@
 
 function hadoop_subproject_init
 {
-  if [ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]; then
+  if [[ -e "${HADOOP_CONF_DIR}/mapred-env.sh" ]]; then
     . "${HADOOP_CONF_DIR}/mapred-env.sh"
   fi
   
@@ -49,7 +49,7 @@ function hadoop_subproject_init
   HADOOP_ROOT_LOGGER="${HADOOP_MAPRED_ROOT_LOGGER:-INFO,console}"
   HADOOP_MAPRED_ROOT_LOGGER="${HADOOP_ROOT_LOGGER}"
   
-  HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME_DIR}"
+  HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_PREFIX}"
   
   HADOOP_IDENT_STRING="${HADOOP_MAPRED_IDENT_STRING:-$HADOOP_IDENT_STRING}"
   HADOOP_MAPRED_IDENT_STRING="${HADOOP_IDENT_STRING}"
@@ -60,13 +60,15 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_mc_this}")" >/dev/null && pwd -P)
 fi
 
-if [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ec4a930/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
index 34d2d2d..d83e998 100644
--- a/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
+++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
@@ -80,14 +80,14 @@ if [[ -z "${HADOOP_LIBEXEC_DIR}" ]]; then
   HADOOP_LIBEXEC_DIR=$(cd -P -- "$(dirname -- "${_yc_this}")" >/dev/null && pwd -P)
 fi
 
-if [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
-  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
-elif [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
+if [[ -n "${HADOOP_COMMON_HOME}" ]] &&
+   [[ -e "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh" ]]; then
   . "${HADOOP_COMMON_HOME}/libexec/hadoop-config.sh"
-elif [[ -e "${HADOOP_HOME}/libexec/hadoop-config.sh" ]]; then
-  . "${HADOOP_HOME}/libexec/hadoop-config.sh"
+elif [[ -e "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh" ]]; then
+  . "${HADOOP_LIBEXEC_DIR}/hadoop-config.sh"
+elif [ -e "${HADOOP_PREFIX}/libexec/hadoop-config.sh" ]; then
+  . "${HADOOP_PREFIX}/libexec/hadoop-config.sh"
 else
-  echo "Hadoop common not found."
-  exit
+  echo "ERROR: Hadoop common not found." 2>&1
+  exit 1
 fi
-


[07/22] git commit: MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings. Contributed by Chen He

Posted by ar...@apache.org.
MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings. Contributed by Chen He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/812bd0c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/812bd0c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/812bd0c0

Branch: refs/heads/HDFS-6581
Commit: 812bd0c0e583fce925e3151510860ca9781b3e40
Parents: 9ec4a93
Author: Jason Lowe <jl...@apache.org>
Authored: Wed Aug 27 15:14:54 2014 +0000
Committer: Jason Lowe <jl...@apache.org>
Committed: Wed Aug 27 15:14:54 2014 +0000

----------------------------------------------------------------------
 hadoop-mapreduce-project/CHANGES.txt            |  3 +
 .../apache/hadoop/mapred/TestComparators.java   | 61 +++++++++++++------
 .../apache/hadoop/mapred/TestMapOutputType.java | 62 ++++++++++++--------
 .../org/apache/hadoop/mapred/TestMapRed.java    | 28 ++++++---
 .../mapred/lib/TestKeyFieldBasedComparator.java | 25 +++++++-
 .../apache/hadoop/mapreduce/TestMapReduce.java  | 30 +++++++---
 6 files changed, 150 insertions(+), 59 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index a6d2981..de0767d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -258,6 +258,9 @@ Release 2.6.0 - UNRELEASED
     MAPREDUCE-6044. Fully qualified intermediate done dir path breaks per-user dir
     creation on Windows. (zjshen)
 
+    MAPREDUCE-5885. build/test/test.mapred.spill causes release audit warnings
+    (Chen He via jlowe)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
index 1cef5cb..f83dbe2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestComparators.java
@@ -17,13 +17,30 @@
  */
 package org.apache.hadoop.mapred;
 
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.io.*;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
+import org.apache.hadoop.io.WritableComparator;
 import org.apache.hadoop.mapreduce.MRConfig;
 
-import junit.framework.TestCase;
-import java.io.*;
-import java.util.*;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 
 /**
  * Two different types of comparators can be used in MapReduce. One is used
@@ -37,8 +54,11 @@ import java.util.*;
  * 2. Test the common use case where values are grouped by keys but values 
  * within each key are grouped by a secondary key (a timestamp, for example). 
  */
-public class TestComparators extends TestCase 
-{
+public class TestComparators {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestComparators-mapred");
+
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobClient jc;
   static Random rng = new Random();
@@ -292,9 +312,9 @@ public class TestComparators extends TestCase
     }
   }
 
-
+  @Before
   public void configure() throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
@@ -334,14 +354,18 @@ public class TestComparators extends TestCase
     
     jc = new JobClient(conf);
   }
-  
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
   /**
    * Test the default comparator for Map/Reduce. 
    * Use the identity mapper and see if the keys are sorted at the end
    * @throws Exception
    */
-  public void testDefaultMRComparator() throws Exception { 
-    configure();
+  @Test
+  public void testDefaultMRComparator() throws Exception {
     conf.setMapperClass(IdentityMapper.class);
     conf.setReducerClass(AscendingKeysReducer.class);
     
@@ -361,8 +385,8 @@ public class TestComparators extends TestCase
    * comparator. Keys should be sorted in reverse order in the reducer. 
    * @throws Exception
    */
-  public void testUserMRComparator() throws Exception { 
-    configure();
+  @Test
+  public void testUserMRComparator() throws Exception {
     conf.setMapperClass(IdentityMapper.class);
     conf.setReducerClass(DescendingKeysReducer.class);
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
@@ -384,8 +408,8 @@ public class TestComparators extends TestCase
    * values for a key should be sorted by the 'timestamp'. 
    * @throws Exception
    */
-  public void testUserValueGroupingComparator() throws Exception { 
-    configure();
+  @Test
+  public void testUserValueGroupingComparator() throws Exception {
     conf.setMapperClass(RandomGenMapper.class);
     conf.setReducerClass(AscendingGroupReducer.class);
     conf.setOutputValueGroupingComparator(CompositeIntGroupFn.class);
@@ -409,8 +433,8 @@ public class TestComparators extends TestCase
    * order. This lets us make sure that the right comparators are used. 
    * @throws Exception
    */
-  public void testAllUserComparators() throws Exception { 
-    configure();
+  @Test
+  public void testAllUserComparators() throws Exception {
     conf.setMapperClass(RandomGenMapper.class);
     // use a decreasing comparator so keys are sorted in reverse order
     conf.setOutputKeyComparatorClass(DecreasingIntComparator.class);
@@ -430,6 +454,7 @@ public class TestComparators extends TestCase
    * Test a user comparator that relies on deserializing both arguments
    * for each compare.
    */
+  @Test
   public void testBakedUserComparator() throws Exception {
     MyWritable a = new MyWritable(8, 8);
     MyWritable b = new MyWritable(7, 9);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
index d11d7bc..e3860fd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapOutputType.java
@@ -17,21 +17,36 @@
  */
 package org.apache.hadoop.mapred;
 
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.mapred.lib.*;
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.SequenceFile;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.mapreduce.MRConfig;
-import junit.framework.TestCase;
-import java.io.*;
-import java.util.*;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.fail;
+
 
 /** 
  * TestMapOutputType checks whether the Map task handles type mismatch
  * between mapper output and the type specified in
  * JobConf.MapOutputKeyType and JobConf.MapOutputValueType.
  */
-public class TestMapOutputType extends TestCase 
-{
+public class TestMapOutputType {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapOutputType-mapred");
   JobConf conf = new JobConf(TestMapOutputType.class);
   JobClient jc;
   /** 
@@ -75,9 +90,9 @@ public class TestMapOutputType extends TestCase
     }
   }
 
-
+  @Before
   public void configure() throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
@@ -101,17 +116,21 @@ public class TestMapOutputType extends TestCase
       throw new IOException("Mkdirs failed to create " + inDir.toString());
     }
     Path inFile = new Path(inDir, "part0");
-    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile, 
+    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, inFile,
                                                            Text.class, Text.class);
     writer.append(new Text("rec: 1"), new Text("Hello"));
     writer.close();
     
     jc = new JobClient(conf);
   }
-  
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testKeyMismatch() throws Exception {
-    configure();
-    
     //  Set bad MapOutputKeyClass and MapOutputValueClass
     conf.setMapOutputKeyClass(IntWritable.class);
     conf.setMapOutputValueClass(IntWritable.class);
@@ -125,11 +144,9 @@ public class TestMapOutputType extends TestCase
       fail("Oops! The job was supposed to break due to an exception");
     }
   }
-  
+
+  @Test
   public void testValueMismatch() throws Exception {
-    configure();
-  
-    // Set good MapOutputKeyClass, bad MapOutputValueClass    
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputValueClass(IntWritable.class);
     
@@ -142,11 +159,10 @@ public class TestMapOutputType extends TestCase
       fail("Oops! The job was supposed to break due to an exception");
     }
   }
-  
-  public void testNoMismatch() throws Exception{ 
-    configure();
-    
-    //  Set good MapOutputKeyClass and MapOutputValueClass    
+
+  @Test
+  public void testNoMismatch() throws Exception{
+    //  Set good MapOutputKeyClass and MapOutputValueClass
     conf.setMapOutputKeyClass(Text.class);
     conf.setMapOutputValueClass(Text.class);
      

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
index 3f7a6f7..02a083b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestMapRed.java
@@ -24,7 +24,7 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
-import java.util.Collections;
+import java.io.File;
 import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.Iterator;
@@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
@@ -46,11 +47,11 @@ import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.junit.After;
 import org.junit.Test;
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 
 /**********************************************************
  * MapredLoadTest generates a bunch of work that exercises
@@ -110,6 +111,10 @@ public class TestMapRed extends Configured implements Tool {
    * of numbers in random order, but where each number appears
    * as many times as we were instructed.
    */
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapRed-mapred");
+
   static class RandomGenMapper
     implements Mapper<IntWritable, IntWritable, IntWritable, IntWritable> {
     
@@ -248,6 +253,11 @@ public class TestMapRed extends Configured implements Tool {
   private static int counts = 100;
   private static Random r = new Random();
 
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
   /**
      public TestMapRed(int range, int counts, Configuration conf) throws IOException {
      this.range = range;
@@ -372,7 +382,7 @@ public class TestMapRed extends Configured implements Tool {
                                 boolean includeCombine
                                 ) throws Exception {
     JobConf conf = new JobConf(TestMapRed.class);
-    Path testdir = new Path("build/test/test.mapred.compress");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = FileSystem.get(conf);
@@ -440,7 +450,7 @@ public class TestMapRed extends Configured implements Tool {
     //
     // Generate distribution of ints.  This is the answer key.
     //
-    JobConf conf = null;
+    JobConf conf;
     //Check to get configuration and check if it is configured thro' Configured
     //interface. This would happen when running testcase thro' command line.
     if(getConf() == null) {
@@ -465,7 +475,7 @@ public class TestMapRed extends Configured implements Tool {
     // Write the answer key to a file.  
     //
     FileSystem fs = FileSystem.get(conf);
-    Path testdir = new Path("mapred.loadtest");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath(), "mapred.loadtest");
     if (!fs.mkdirs(testdir)) {
       throw new IOException("Mkdirs failed to create " + testdir.toString());
     }
@@ -635,8 +645,8 @@ public class TestMapRed extends Configured implements Tool {
       in.close();
     }
     int originalTotal = 0;
-    for (int i = 0; i < dist.length; i++) {
-      originalTotal += dist[i];
+    for (int aDist : dist) {
+      originalTotal += aDist;
     }
     System.out.println("Original sum: " + originalTotal);
     System.out.println("Recomputed sum: " + totalseen);
@@ -727,7 +737,7 @@ public class TestMapRed extends Configured implements Tool {
   public void runJob(int items) {
     try {
       JobConf conf = new JobConf(TestMapRed.class);
-      Path testdir = new Path("build/test/test.mapred.spill");
+      Path testdir = new Path(TEST_DIR.getAbsolutePath());
       Path inDir = new Path(testdir, "in");
       Path outDir = new Path(testdir, "out");
       FileSystem fs = FileSystem.get(conf);
@@ -777,7 +787,7 @@ public class TestMapRed extends Configured implements Tool {
       System.err.println("Usage: TestMapRed <range> <counts>");
       System.err.println();
       System.err.println("Note: a good test will have a " +
-      		"<counts> value that is substantially larger than the <range>");
+          "<counts> value that is substantially larger than the <range>");
       return -1;
     }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
index 0bee2b5..34a4d2c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestKeyFieldBasedComparator.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.mapred.lib;
 
-import java.io.*;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -35,9 +34,23 @@ import org.apache.hadoop.mapred.RunningJob;
 import org.apache.hadoop.mapred.TextInputFormat;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.mapred.Utils;
+import org.junit.After;
+import org.junit.Test;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.InputStreamReader;
 
 
 public class TestKeyFieldBasedComparator extends HadoopTestCase {
+
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")),
+          "TestKeyFieldBasedComparator-lib");
   JobConf conf;
   JobConf localConf;
   
@@ -50,8 +63,9 @@ public class TestKeyFieldBasedComparator extends HadoopTestCase {
     localConf = createJobConf();
     localConf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
   }
+
   public void configure(String keySpec, int expect) throws Exception {
-    Path testdir = new Path("build/test/test.mapred.spill");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     Path inDir = new Path(testdir, "in");
     Path outDir = new Path(testdir, "out");
     FileSystem fs = getFileSystem();
@@ -116,6 +130,13 @@ public class TestKeyFieldBasedComparator extends HadoopTestCase {
       reader.close();
     }
   }
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testBasicUnixComparator() throws Exception {
     configure("-k1,1n", 1);
     configure("-k2,2n", 1);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/812bd0c0/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
----------------------------------------------------------------------
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
index 01e1283..48ad47a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapReduce.java
@@ -23,14 +23,14 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.OutputStreamWriter;
+import java.io.File;
 import java.util.Iterator;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.SequenceFile;
@@ -41,6 +41,10 @@ import org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.MapFileOutputFormat;
 import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
 
 /**********************************************************
  * MapredLoadTest generates a bunch of work that exercises
@@ -75,8 +79,10 @@ import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
  * 7) A mapred job integrates all the count files into a single one.
  *
  **********************************************************/
-public class TestMapReduce extends TestCase {
-  
+public class TestMapReduce {
+  private static final File TEST_DIR = new File(
+      System.getProperty("test.build.data",
+          System.getProperty("java.io.tmpdir")), "TestMapReduce-mapreduce");
   private static FileSystem fs;
   
   static {
@@ -215,6 +221,12 @@ public class TestMapReduce extends TestCase {
   private static int counts = 100;
   private static Random r = new Random();
 
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(TEST_DIR);
+  }
+
+  @Test
   public void testMapred() throws Exception {
     launch();
   }
@@ -239,7 +251,7 @@ public class TestMapReduce extends TestCase {
     //
     // Write the answer key to a file.  
     //
-    Path testdir = new Path("mapred.loadtest");
+    Path testdir = new Path(TEST_DIR.getAbsolutePath());
     if (!fs.mkdirs(testdir)) {
       throw new IOException("Mkdirs failed to create " + testdir.toString());
     }
@@ -488,13 +500,17 @@ public class TestMapReduce extends TestCase {
       System.err.println("Usage: TestMapReduce <range> <counts>");
       System.err.println();
       System.err.println("Note: a good test will have a <counts> value" +
-        " that is substantially larger than the <range>");
+          " that is substantially larger than the <range>");
       return;
     }
 
     int i = 0;
     range = Integer.parseInt(argv[i++]);
     counts = Integer.parseInt(argv[i++]);
-    launch();
+    try {
+      launch();
+    } finally {
+      FileUtil.fullyDelete(TEST_DIR);
+    }
   }
 }


[09/22] git commit: YARN-2182. Updated ContainerId#toString() to append RM Epoch number. Contributed by Tsuyoshi OZAWA

Posted by ar...@apache.org.
YARN-2182. Updated ContainerId#toString() to append RM Epoch number. Contributed by Tsuyoshi OZAWA


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e2d0ff36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e2d0ff36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e2d0ff36

Branch: refs/heads/HDFS-6581
Commit: e2d0ff364a84a4de10e7b11fe83cd3dab155a571
Parents: c5d9a4a
Author: Jian He <jh...@hortonworks.com>
Authored: Wed Aug 27 10:02:45 2014 -0700
Committer: Jian He <jh...@hortonworks.com>
Committed: Wed Aug 27 10:02:45 2014 -0700

----------------------------------------------------------------------
 hadoop-yarn-project/CHANGES.txt                           |  3 +++
 .../org/apache/hadoop/yarn/api/records/ContainerId.java   | 10 +++++++---
 .../java/org/apache/hadoop/yarn/api/TestContainerId.java  |  4 +++-
 3 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d0ff36/hadoop-yarn-project/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 36d304c..871829a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -157,6 +157,9 @@ Release 2.6.0 - UNRELEASED
     YARN-1326. RM should log using RMStore at startup time. 
     (Tsuyoshi Ozawa via kasha)
 
+    YARN-2182. Updated ContainerId#toString() to append RM Epoch number.
+    (Tsuyoshi OZAWA via jianhe)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d0ff36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
index 73e8085..fc7f404 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java
@@ -83,7 +83,7 @@ public abstract class ContainerId implements Comparable<ContainerId>{
  
   
   // TODO: fail the app submission if attempts are more than 10 or something
-  private static final ThreadLocal<NumberFormat> appAttemptIdFormat =
+  private static final ThreadLocal<NumberFormat> appAttemptIdAndEpochFormat =
       new ThreadLocal<NumberFormat>() {
         @Override
         public NumberFormat initialValue() {
@@ -153,9 +153,13 @@ public abstract class ContainerId implements Comparable<ContainerId>{
     sb.append(ApplicationId.appIdFormat.get().format(appId.getId()))
         .append("_");
     sb.append(
-        appAttemptIdFormat.get().format(
+        appAttemptIdAndEpochFormat.get().format(
             getApplicationAttemptId().getAttemptId())).append("_");
-    sb.append(containerIdFormat.get().format(getId()));
+    sb.append(containerIdFormat.get().format(0x3fffff & getId()));
+    int epoch = getId() >> 22;
+    if (epoch > 0) {
+      sb.append("_").append(appAttemptIdAndEpochFormat.get().format(epoch));
+    }
     return sb.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e2d0ff36/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
----------------------------------------------------------------------
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
index f92df8a..b23d0ed 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java
@@ -54,7 +54,9 @@ public class TestContainerId {
     long ts = System.currentTimeMillis();
     ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811);
     Assert.assertEquals("container_10_0001_01_000001", c1.toString());
-    Assert.assertEquals("container_" + ts + "_36473_4365472_25645811",
+    Assert.assertEquals(479987, 0x003fffff & c6.getId());
+    Assert.assertEquals(6, c6.getId() >> 22);
+    Assert.assertEquals("container_" + ts + "_36473_4365472_479987_06",
         c6.toString());
   }
 


[15/22] git commit: HDFS-4486. Add log category for long-running DFSClient notices. Contributed by Zhe Zhang.

Posted by ar...@apache.org.
HDFS-4486. Add log category for long-running DFSClient notices.  Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/225569ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/225569ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/225569ec

Branch: refs/heads/HDFS-6581
Commit: 225569ece229cec32f852f831fd337a139c44b1e
Parents: d805cc2
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Aug 27 13:39:40 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Aug 27 13:39:40 2014 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/crypto/OpensslCipher.java |  2 ++
 .../crypto/random/OpensslSecureRandom.java      |  3 +++
 .../org/apache/hadoop/io/nativeio/NativeIO.java |  7 ++---
 .../JniBasedUnixGroupsMappingWithFallback.java  |  3 ++-
 .../apache/hadoop/util/PerformanceAdvisory.java | 24 +++++++++++++++++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 27 +++++++++-----------
 .../hdfs/shortcircuit/DomainSocketFactory.java  |  4 ++-
 8 files changed, 53 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
index 264652b..2eb16ee 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.PerformanceAdvisory;
 
 /**
  * OpenSSL cipher using JNI.
@@ -82,6 +83,7 @@ public final class OpensslCipher {
     String loadingFailure = null;
     try {
       if (!NativeCodeLoader.buildSupportsOpenssl()) {
+        PerformanceAdvisory.LOG.debug("Build does not support openssl");
         loadingFailure = "build does not support openssl.";
       } else {
         initIDs();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
index b1fa988..6c53a0a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.NativeCodeLoader;
 
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.util.PerformanceAdvisory;
 
 /**
  * OpenSSL secure random using JNI.
@@ -67,6 +68,8 @@ public class OpensslSecureRandom extends Random {
   
   public OpensslSecureRandom() {
     if (!nativeEnabled) {
+      PerformanceAdvisory.LOG.debug("Build does not support openssl, " +
+          "falling back to Java SecureRandom.");
       fallback = new java.security.SecureRandom();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
index fafa295..53d31d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.PerformanceAdvisory;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
@@ -196,7 +197,7 @@ public class NativeIO {
           // This can happen if the user has an older version of libhadoop.so
           // installed - in this case we can continue without native IO
           // after warning
-          LOG.error("Unable to initialize NativeIO libraries", t);
+          PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
         }
       }
     }
@@ -574,7 +575,7 @@ public class NativeIO {
           // This can happen if the user has an older version of libhadoop.so
           // installed - in this case we can continue without native IO
           // after warning
-          LOG.error("Unable to initialize NativeIO libraries", t);
+          PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
         }
       }
     }
@@ -593,7 +594,7 @@ public class NativeIO {
         // This can happen if the user has an older version of libhadoop.so
         // installed - in this case we can continue without native IO
         // after warning
-        LOG.error("Unable to initialize NativeIO libraries", t);
+        PerformanceAdvisory.LOG.debug("Unable to initialize NativeIO libraries", t);
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
index 908ca14..40333fc 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/JniBasedUnixGroupsMappingWithFallback.java
@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.util.NativeCodeLoader;
+import org.apache.hadoop.util.PerformanceAdvisory;
 
 public class JniBasedUnixGroupsMappingWithFallback implements
     GroupMappingServiceProvider {
@@ -37,7 +38,7 @@ public class JniBasedUnixGroupsMappingWithFallback implements
     if (NativeCodeLoader.isNativeCodeLoaded()) {
       this.impl = new JniBasedUnixGroupsMapping();
     } else {
-      LOG.debug("Falling back to shell based");
+      PerformanceAdvisory.LOG.debug("Falling back to shell based");
       this.impl = new ShellBasedUnixGroupsMapping();
     }
     if (LOG.isDebugEnabled()){

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java
new file mode 100644
index 0000000..306d47c
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PerformanceAdvisory.java
@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+public class PerformanceAdvisory {
+  public static final Log LOG = LogFactory.getLog(PerformanceAdvisory.class);
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7783243..f3ecf07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -514,6 +514,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6899. Allow changing MiniDFSCluster volumes per DN and capacity
     per volume. (Arpit Agarwal)
 
+    HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang
+    via Colin Patrick McCabe)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index d27bd6e..3fb442b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -54,6 +54,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.PerformanceAdvisory;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -343,10 +344,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       return null;
     }
     if (clientContext.getDisableLegacyBlockReaderLocal()) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(this + ": can't construct BlockReaderLocalLegacy because " +
-            "disableLegacyBlockReaderLocal is set.");
-      }
+      PerformanceAdvisory.LOG.debug(this + ": can't construct " +
+          "BlockReaderLocalLegacy because " +
+          "disableLegacyBlockReaderLocal is set.");
       return null;
     }
     IOException ioe = null;
@@ -385,10 +385,8 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
                       getPathInfo(inetSocketAddress, conf);
     }
     if (!pathInfo.getPathState().getUsableForShortCircuit()) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(this + ": " + pathInfo + " is not " +
-            "usable for short circuit; giving up on BlockReaderLocal.");
-      }
+      PerformanceAdvisory.LOG.debug(this + ": " + pathInfo + " is not " +
+          "usable for short circuit; giving up on BlockReaderLocal.");
       return null;
     }
     ShortCircuitCache cache = clientContext.getShortCircuitCache();
@@ -404,8 +402,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
     }
     if (info.getReplica() == null) {
       if (LOG.isTraceEnabled()) {
-        LOG.trace(this + ": failed to get ShortCircuitReplica.  " +
-            "Cannot construct BlockReaderLocal via " + pathInfo.getPath());
+        PerformanceAdvisory.LOG.debug(this + ": failed to get " +
+            "ShortCircuitReplica. Cannot construct " +
+            "BlockReaderLocal via " + pathInfo.getPath());
       }
       return null;
     }
@@ -580,11 +579,9 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
                       getPathInfo(inetSocketAddress, conf);
     }
     if (!pathInfo.getPathState().getUsableForDataTransfer()) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace(this + ": not trying to create a remote block reader " +
-            "because the UNIX domain socket at " + pathInfo +
-            " is not usable.");
-      }
+      PerformanceAdvisory.LOG.debug(this + ": not trying to create a " +
+          "remote block reader because the UNIX domain socket at " +
+          pathInfo + " is not usable.");
       return null;
     }
     if (LOG.isTraceEnabled()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/225569ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
index e067de7..5fd31a9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
 import com.google.common.base.Preconditions;
 import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
+import org.apache.hadoop.util.PerformanceAdvisory;
 
 public class DomainSocketFactory {
   private static final Log LOG = LogFactory.getLog(DomainSocketFactory.class);
@@ -105,7 +106,8 @@ public class DomainSocketFactory {
     }
 
     if (feature == null) {
-      LOG.debug("Both short-circuit local reads and UNIX domain socket are disabled.");
+      PerformanceAdvisory.LOG.debug(
+          "Both short-circuit local reads and UNIX domain socket are disabled.");
     } else {
       if (conf.getDomainSocketPath().isEmpty()) {
         throw new HadoopIllegalArgumentException(feature + " is enabled but "


[22/22] git commit: HDFS-6923. Propagate LazyPersist flag to DNs via DataTransferProtocol. (Arpit Agarwal)

Posted by ar...@apache.org.
HDFS-6923. Propagate LazyPersist flag to DNs via DataTransferProtocol. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8612590d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8612590d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8612590d

Branch: refs/heads/HDFS-6581
Commit: 8612590d896d4debd9f0cb83a6d99832512fbe00
Parents: 43ccda5
Author: arp <ar...@apache.org>
Authored: Wed Aug 27 15:13:20 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Wed Aug 27 15:23:02 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt    |  3 +++
 .../java/org/apache/hadoop/hdfs/DFSOutputStream.java     | 11 +++++++----
 .../hdfs/protocol/datatransfer/DataTransferProtocol.java |  4 ++--
 .../hadoop/hdfs/protocol/datatransfer/Receiver.java      |  3 ++-
 .../apache/hadoop/hdfs/protocol/datatransfer/Sender.java |  6 ++++--
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java |  3 ++-
 .../apache/hadoop/hdfs/server/datanode/DataXceiver.java  |  6 ++++--
 .../hadoop-hdfs/src/main/proto/datatransfer.proto        |  7 +++++++
 .../org/apache/hadoop/hdfs/TestDataTransferProtocol.java |  2 +-
 .../hadoop/hdfs/server/datanode/TestDiskError.java       |  2 +-
 10 files changed, 33 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
index 1f2bf64..8854e07 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
@@ -6,4 +6,7 @@
 
     HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and
     edit logs. (Arpit Agarwal)
+  
+    HDFS-6923. Propagate LazyPersist flag to DNs via DataTransferProtocol.
+    (Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 14977a2..c255bf6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -344,6 +344,7 @@ public class DFSOutputStream extends FSOutputSummer
     private long restartDeadline = 0; // Deadline of DN restart
     private BlockConstructionStage stage;  // block construction stage
     private long bytesSent = 0; // number of bytes that've been sent
+    private final boolean isLazyPersistFile;
 
     /** Nodes have been used in the pipeline before and have failed. */
     private final List<DatanodeInfo> failed = new ArrayList<DatanodeInfo>();
@@ -358,8 +359,9 @@ public class DFSOutputStream extends FSOutputSummer
     /**
      * Default construction for file create
      */
-    private DataStreamer() {
+    private DataStreamer(HdfsFileStatus stat) {
       isAppend = false;
+      isLazyPersistFile = stat.isLazyPersist();
       stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
     }
     
@@ -377,6 +379,7 @@ public class DFSOutputStream extends FSOutputSummer
       block = lastBlock.getBlock();
       bytesSent = block.getNumBytes();
       accessToken = lastBlock.getBlockToken();
+      isLazyPersistFile = stat.isLazyPersist();
       long usedInLastBlock = stat.getLen() % blockSize;
       int freeInLastBlock = (int)(blockSize - usedInLastBlock);
 
@@ -1352,7 +1355,7 @@ public class DFSOutputStream extends FSOutputSummer
           new Sender(out).writeBlock(blockCopy, nodeStorageTypes[0], accessToken,
               dfsClient.clientName, nodes, nodeStorageTypes, null, bcs, 
               nodes.length, block.getNumBytes(), bytesSent, newGS, checksum,
-              cachingStrategy.get());
+              cachingStrategy.get(), isLazyPersistFile);
   
           // receive ack for connect
           BlockOpResponseProto resp = BlockOpResponseProto.parseFrom(
@@ -1601,7 +1604,7 @@ public class DFSOutputStream extends FSOutputSummer
     computePacketChunkSize(dfsClient.getConf().writePacketSize,
         checksum.getBytesPerChecksum());
 
-    streamer = new DataStreamer();
+    streamer = new DataStreamer(stat);
     if (favoredNodes != null && favoredNodes.length != 0) {
       streamer.setFavoredNodes(favoredNodes);
     }
@@ -1650,7 +1653,7 @@ public class DFSOutputStream extends FSOutputSummer
     } else {
       computePacketChunkSize(dfsClient.getConf().writePacketSize,
           checksum.getBytesPerChecksum());
-      streamer = new DataStreamer();
+      streamer = new DataStreamer(stat);
     }
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
index d54d5be..f6b99e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
@@ -106,8 +106,8 @@ public interface DataTransferProtocol {
       final long maxBytesRcvd,
       final long latestGenerationStamp,
       final DataChecksum requestedChecksum,
-      final CachingStrategy cachingStrategy) throws IOException;
-
+      final CachingStrategy cachingStrategy,
+      final boolean allowLazyPersist) throws IOException;
   /**
    * Transfer a block to another datanode.
    * The block stage must be

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
index a09437c..78693bb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
@@ -137,7 +137,8 @@ public abstract class Receiver implements DataTransferProtocol {
         fromProto(proto.getRequestedChecksum()),
         (proto.hasCachingStrategy() ?
             getCachingStrategy(proto.getCachingStrategy()) :
-          CachingStrategy.newDefaultStrategy()));
+          CachingStrategy.newDefaultStrategy()),
+        (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false));
   }
 
   /** Receive {@link Op#TRANSFER_BLOCK} */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
index 68da523..4298bb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java
@@ -124,7 +124,8 @@ public class Sender implements DataTransferProtocol {
       final long maxBytesRcvd,
       final long latestGenerationStamp,
       DataChecksum requestedChecksum,
-      final CachingStrategy cachingStrategy) throws IOException {
+      final CachingStrategy cachingStrategy,
+      final boolean allowLazyPersist) throws IOException {
     ClientOperationHeaderProto header = DataTransferProtoUtil.buildClientHeader(
         blk, clientName, blockToken);
     
@@ -142,7 +143,8 @@ public class Sender implements DataTransferProtocol {
       .setMaxBytesRcvd(maxBytesRcvd)
       .setLatestGenerationStamp(latestGenerationStamp)
       .setRequestedChecksum(checksumProto)
-      .setCachingStrategy(getCachingStrategy(cachingStrategy));
+      .setCachingStrategy(getCachingStrategy(cachingStrategy))
+      .setAllowLazyPersist(allowLazyPersist);
     
     if (source != null) {
       proto.setSource(PBHelper.convertDatanodeInfo(source));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 1ec91d0..e86ea0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1809,7 +1809,8 @@ public class DataNode extends Configured
 
         new Sender(out).writeBlock(b, targetStorageTypes[0], accessToken,
             clientname, targets, targetStorageTypes, srcNode,
-            stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy);
+            stage, 0, 0, 0, 0, blockSender.getChecksum(), cachingStrategy,
+            false);
 
         // send data & checksum
         blockSender.sendBlock(out, unbufOut, null);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 4575c93..3b8304e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -544,7 +544,8 @@ class DataXceiver extends Receiver implements Runnable {
       final long maxBytesRcvd,
       final long latestGenerationStamp,
       DataChecksum requestedChecksum,
-      CachingStrategy cachingStrategy) throws IOException {
+      CachingStrategy cachingStrategy,
+      final boolean allowLazyPersist) throws IOException {
     previousOpClientName = clientname;
     updateCurrentThreadName("Receiving block " + block);
     final boolean isDatanode = clientname.length() == 0;
@@ -648,10 +649,11 @@ class DataXceiver extends Receiver implements Runnable {
               HdfsConstants.SMALL_BUFFER_SIZE));
           mirrorIn = new DataInputStream(unbufMirrorIn);
 
+          // Do not propagate allowLazyPersist to downstream DataNodes.
           new Sender(mirrorOut).writeBlock(originalBlock, targetStorageTypes[0],
               blockToken, clientname, targets, targetStorageTypes, srcDataNode,
               stage, pipelineSize, minBytesRcvd, maxBytesRcvd,
-              latestGenerationStamp, requestedChecksum, cachingStrategy);
+              latestGenerationStamp, requestedChecksum, cachingStrategy, false);
 
           mirrorOut.flush();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
index 6283b56..13747ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto
@@ -109,6 +109,13 @@ message OpWriteBlockProto {
   optional CachingStrategyProto cachingStrategy = 10;
   optional StorageTypeProto storageType = 11 [default = DISK];
   repeated StorageTypeProto targetStorageTypes = 12;
+
+  /**
+   * Hint to the DataNode that the block can be allocated on transient
+   * storage i.e. memory and written to disk lazily. The DataNode is free
+   * to ignore this hint.
+   */
+  optional bool allowLazyPersist = 13 [default = false];
 }
   
 message OpTransferBlockProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index bcb68e9..3586551 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -524,6 +524,6 @@ public class TestDataTransferProtocol {
         BlockTokenSecretManager.DUMMY_TOKEN, "cl",
         new DatanodeInfo[1], new StorageType[1], null, stage,
         0, block.getNumBytes(), block.getNumBytes(), newGS,
-        checksum, CachingStrategy.newDefaultStrategy());
+        checksum, CachingStrategy.newDefaultStrategy(), false);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8612590d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
index 4b5b6e1..f440bb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
@@ -152,7 +152,7 @@ public class TestDiskError {
         BlockTokenSecretManager.DUMMY_TOKEN, "",
         new DatanodeInfo[0], new StorageType[0], null,
         BlockConstructionStage.PIPELINE_SETUP_CREATE, 1, 0L, 0L, 0L,
-        checksum, CachingStrategy.newDefaultStrategy());
+        checksum, CachingStrategy.newDefaultStrategy(), false);
     out.flush();
 
     // close the connection before sending the content of the block


[18/22] git commit: Add HDFS-6879 to CHANGES.txt

Posted by ar...@apache.org.
Add HDFS-6879 to CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37549576
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37549576
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37549576

Branch: refs/heads/HDFS-6581
Commit: 37549576e7aca2fe3d0fe03ea2e82aeb953bca44
Parents: 6962510
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Wed Aug 27 14:18:18 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Wed Aug 27 14:18:18 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++
 1 file changed, 3 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37549576/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d5797e8..30664c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -517,6 +517,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-4486. Add log category for long-running DFSClient notices (Zhe Zhang
     via Colin Patrick McCabe)
 
+    HDFS-6879. Adding tracing to Hadoop RPC (Masatake Iwasaki via Colin Patrick
+    McCabe)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)


[10/22] git commit: HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. Contributed by Juan Yu and Jing Zhao.

Posted by ar...@apache.org.
HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion. Contributed by Juan Yu and Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b441d22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b441d22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b441d22

Branch: refs/heads/HDFS-6581
Commit: 6b441d227a8806e87224106a81361bd61f0b3d0b
Parents: e2d0ff3
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Aug 27 10:26:22 2014 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Aug 27 10:26:22 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +
 .../snapshot/DirectoryWithSnapshotFeature.java  | 10 ++-
 .../namenode/snapshot/TestSnapshotDeletion.java | 77 +++++++++++++++++++-
 3 files changed, 85 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b441d22/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index fb3906a..63c434d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -642,6 +642,9 @@ Release 2.6.0 - UNRELEASED
 
     HDFS-4852. libhdfs documentation is out of date. (cnauroth)
 
+    HDFS-6908. Incorrect snapshot directory diff generated by snapshot deletion.
+    (Juan Yu and jing9 via jing9)
+
 Release 2.5.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b441d22/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
index 9893bba..9c9d435 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java
@@ -722,6 +722,8 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
         counts.add(lastDiff.diff.destroyCreatedList(currentINode,
             collectedBlocks, removedINodes));
       }
+      counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
+          collectedBlocks, removedINodes, priorDeleted, countDiffChange));
     } else {
       // update prior
       prior = getDiffs().updatePrior(snapshot, prior);
@@ -739,7 +741,9 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
       
       counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior,
           currentINode, collectedBlocks, removedINodes, countDiffChange));
-      
+      counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
+          collectedBlocks, removedINodes, priorDeleted, countDiffChange));
+
       // check priorDiff again since it may be created during the diff deletion
       if (prior != Snapshot.NO_SNAPSHOT_ID) {
         DirectoryDiff priorDiff = this.getDiffs().getDiffById(prior);
@@ -778,9 +782,7 @@ public class DirectoryWithSnapshotFeature implements INode.Feature {
         }
       }
     }
-    counts.add(currentINode.cleanSubtreeRecursively(snapshot, prior,
-        collectedBlocks, removedINodes, priorDeleted, countDiffChange));
-    
+
     if (currentINode.isQuotaSet()) {
       currentINode.getDirectoryWithQuotaFeature().addSpaceConsumed2Cache(
           -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b441d22/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
index 77fa2a2..1450a7d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -558,7 +559,81 @@ public class TestSnapshotDeletion {
           + toDeleteFileInSnapshot.toString(), e);
     }
   }
-  
+
+  /**
+   * Delete a snapshot that is taken before a directory deletion,
+   * directory diff list should be combined correctly.
+   */
+  @Test (timeout=60000)
+  public void testDeleteSnapshot1() throws Exception {
+    final Path root = new Path("/");
+
+    Path dir = new Path("/dir1");
+    Path file1 = new Path(dir, "file1");
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(root);
+    hdfs.createSnapshot(root, "s1");
+
+    Path file2 = new Path(dir, "file2");
+    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.createSnapshot(root, "s2");
+
+    // delete file
+    hdfs.delete(file1, true);
+    hdfs.delete(file2, true);
+
+    // delete directory
+    assertTrue(hdfs.delete(dir, false));
+
+    // delete second snapshot
+    hdfs.deleteSnapshot(root, "s2");
+
+    NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
+    NameNodeAdapter.saveNamespace(cluster.getNameNode());
+
+    // restart NN
+    cluster.restartNameNodes();
+  }
+
+  /**
+   * Delete a snapshot that is taken before a directory deletion (recursively),
+   * directory diff list should be combined correctly.
+   */
+  @Test (timeout=60000)
+  public void testDeleteSnapshot2() throws Exception {
+    final Path root = new Path("/");
+
+    Path dir = new Path("/dir1");
+    Path file1 = new Path(dir, "file1");
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+
+    hdfs.allowSnapshot(root);
+    hdfs.createSnapshot(root, "s1");
+
+    Path file2 = new Path(dir, "file2");
+    DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
+    INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
+    long file2NodeId = file2Node.getId();
+
+    hdfs.createSnapshot(root, "s2");
+
+    // delete directory recursively
+    assertTrue(hdfs.delete(dir, true));
+    assertNotNull(fsdir.getInode(file2NodeId));
+
+    // delete second snapshot
+    hdfs.deleteSnapshot(root, "s2");
+    assertTrue(fsdir.getInode(file2NodeId) == null);
+
+    NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
+    NameNodeAdapter.saveNamespace(cluster.getNameNode());
+
+    // restart NN
+    cluster.restartNameNodes();
+  }
+
   /**
    * Test deleting snapshots in a more complicated scenario: need to combine
    * snapshot diffs, but no need to handle diffs distributed in a dir tree


[21/22] git commit: HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and edit logs. (Arpit Agarwal)

Posted by ar...@apache.org.
HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and edit logs. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43ccda51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43ccda51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43ccda51

Branch: refs/heads/HDFS-6581
Commit: 43ccda51a19b0d904d2f36c6317f79b958f868cc
Parents: 5e81c4f
Author: arp <ar...@apache.org>
Authored: Wed Aug 27 15:12:19 2014 -0700
Committer: arp <ar...@apache.org>
Committed: Wed Aug 27 15:23:02 2014 -0700

----------------------------------------------------------------------
 .../hadoop-hdfs/CHANGES-HDFS-6581.txt           |  3 ++-
 .../server/blockmanagement/BlockCollection.java |  6 +++++
 .../hdfs/server/namenode/FSDirectory.java       | 17 ++++++++++-----
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |  2 ++
 .../hdfs/server/namenode/FSEditLogLoader.java   |  3 ++-
 .../hdfs/server/namenode/FSEditLogOp.java       | 23 ++++++++++++++++++++
 .../hdfs/server/namenode/FSImageFormat.java     |  9 ++++++--
 .../server/namenode/FSImageFormatPBINode.java   |  6 +++--
 .../server/namenode/FSImageSerialization.java   |  6 +++--
 .../hdfs/server/namenode/FSNamesystem.java      |  2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 22 ++++++++++++++-----
 .../server/namenode/INodeFileAttributes.java    | 11 +++++++---
 .../server/namenode/NameNodeLayoutVersion.java  |  4 +++-
 .../snapshot/FSImageFormatPBSnapshot.java       |  4 +++-
 .../tools/offlineImageViewer/FSImageLoader.java |  1 +
 .../offlineImageViewer/PBImageXmlWriter.java    |  4 ++++
 .../hadoop-hdfs/src/main/proto/fsimage.proto    |  1 +
 .../hdfs/server/namenode/CreateEditsLog.java    |  4 ++--
 .../hdfs/server/namenode/TestEditLog.java       |  2 +-
 .../namenode/TestFSPermissionChecker.java       |  2 +-
 .../hdfs/server/namenode/TestINodeFile.java     |  8 +++----
 21 files changed, 107 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
index fc6e0e0..1f2bf64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-6581.txt
@@ -4,5 +4,6 @@
 
     HDFS-6924. Add new RAM_DISK storage type. (Arpit Agarwal)
 
-
+    HDFS-6922. Add LazyPersist flag to INodeFile, save it in FsImage and
+    edit logs. (Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index c1e0682..bd3a780 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -55,6 +55,12 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
+   * Return true if the file was created with {@Link CreateFlag#LAZY_PERSIST}.
+   * @return
+   */
+  public boolean getLazyPersistFlag();
+
+  /**
    * Get block replication for the collection 
    * @return block replication value
    */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index d03a4e5..afe9d97 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -278,6 +278,7 @@ public class FSDirectory implements Closeable {
    */
   INodeFile addFile(String path, PermissionStatus permissions,
                     short replication, long preferredBlockSize,
+                    boolean isLazyPersist,
                     String clientName, String clientMachine)
     throws FileAlreadyExistsException, QuotaExceededException,
       UnresolvedLinkException, SnapshotAccessControlException, AclException {
@@ -285,7 +286,7 @@ public class FSDirectory implements Closeable {
     long modTime = now();
     INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
         permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
-        preferredBlockSize);
+        preferredBlockSize, isLazyPersist);
     newNode.toUnderConstruction(clientName, clientMachine);
 
     boolean added = false;
@@ -315,6 +316,7 @@ public class FSDirectory implements Closeable {
                             long modificationTime,
                             long atime,
                             long preferredBlockSize,
+                            boolean isLazyPersist,
                             boolean underConstruction,
                             String clientName,
                             String clientMachine) {
@@ -323,12 +325,12 @@ public class FSDirectory implements Closeable {
     if (underConstruction) {
       newNode = new INodeFile(id, null, permissions, modificationTime,
           modificationTime, BlockInfo.EMPTY_ARRAY, replication,
-          preferredBlockSize);
+          preferredBlockSize, isLazyPersist);
       newNode.toUnderConstruction(clientName, clientMachine);
 
     } else {
       newNode = new INodeFile(id, null, permissions, modificationTime, atime,
-          BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize);
+          BlockInfo.EMPTY_ARRAY, replication, preferredBlockSize, isLazyPersist);
     }
 
     try {
@@ -2283,11 +2285,13 @@ public class FSDirectory implements Closeable {
      long size = 0;     // length is zero for directories
      short replication = 0;
      long blocksize = 0;
+     boolean isLazyPersist = false;
      if (node.isFile()) {
        final INodeFile fileNode = node.asFile();
        size = fileNode.computeFileSize(snapshot);
        replication = fileNode.getFileReplication(snapshot);
        blocksize = fileNode.getPreferredBlockSize();
+       isLazyPersist = fileNode.getLazyPersistFlag();
      }
      int childrenNum = node.isDirectory() ? 
          node.asDirectory().getChildrenNum(snapshot) : 0;
@@ -2300,7 +2304,7 @@ public class FSDirectory implements Closeable {
         node.isDirectory(), 
         replication, 
         blocksize,
-        false,
+        isLazyPersist,
         node.getModificationTime(snapshot),
         node.getAccessTime(snapshot),
         getPermissionForFileStatus(node, snapshot),
@@ -2322,6 +2326,7 @@ public class FSDirectory implements Closeable {
     long size = 0; // length is zero for directories
     short replication = 0;
     long blocksize = 0;
+    boolean isLazyPersist = false;
     LocatedBlocks loc = null;
     final FileEncryptionInfo feInfo = isRawPath ? null :
         getFileEncryptionInfo(node, snapshot);
@@ -2329,7 +2334,7 @@ public class FSDirectory implements Closeable {
       final INodeFile fileNode = node.asFile();
       size = fileNode.computeFileSize(snapshot);
       replication = fileNode.getFileReplication(snapshot);
-      blocksize = fileNode.getPreferredBlockSize();
+      isLazyPersist = fileNode.getLazyPersistFlag();
 
       final boolean inSnapshot = snapshot != Snapshot.CURRENT_STATE_ID; 
       final boolean isUc = !inSnapshot && fileNode.isUnderConstruction();
@@ -2348,7 +2353,7 @@ public class FSDirectory implements Closeable {
 
     HdfsLocatedFileStatus status =
         new HdfsLocatedFileStatus(size, node.isDirectory(), replication,
-          blocksize, false, node.getModificationTime(snapshot),
+          blocksize, isLazyPersist, node.getModificationTime(snapshot),
           node.getAccessTime(snapshot),
           getPermissionForFileStatus(node, snapshot),
           node.getUserName(snapshot), node.getGroupName(snapshot),

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index b2adcd4..bf3767b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -697,6 +697,7 @@ public class FSEditLog implements LogsPurgeable {
       .setModificationTime(newNode.getModificationTime())
       .setAccessTime(newNode.getAccessTime())
       .setBlockSize(newNode.getPreferredBlockSize())
+      .setLazyPersistFlag(newNode.getLazyPersistFlag())
       .setBlocks(newNode.getBlocks())
       .setPermissionStatus(permissions)
       .setClientName(newNode.getFileUnderConstructionFeature().getClientName())
@@ -727,6 +728,7 @@ public class FSEditLog implements LogsPurgeable {
       .setModificationTime(newNode.getModificationTime())
       .setAccessTime(newNode.getAccessTime())
       .setBlockSize(newNode.getPreferredBlockSize())
+      .setLazyPersistFlag(newNode.getLazyPersistFlag())
       .setBlocks(newNode.getBlocks())
       .setPermissionStatus(newNode.getPermissionStatus());
     

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index d522e51..951f3e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -357,7 +357,8 @@ public class FSEditLogLoader {
             path, addCloseOp.permissions, addCloseOp.aclEntries,
             addCloseOp.xAttrs,
             replication, addCloseOp.mtime, addCloseOp.atime,
-            addCloseOp.blockSize, true, addCloseOp.clientName,
+            addCloseOp.blockSize, addCloseOp.isLazyPersist,
+            true, addCloseOp.clientName,
             addCloseOp.clientMachine);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 5543e0c..94c287c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -401,6 +401,7 @@ public abstract class FSEditLogOp {
     long mtime;
     long atime;
     long blockSize;
+    boolean isLazyPersist;
     Block[] blocks;
     PermissionStatus permissions;
     List<AclEntry> aclEntries;
@@ -448,6 +449,11 @@ public abstract class FSEditLogOp {
       return (T)this;
     }
 
+    <T extends AddCloseOp> T setLazyPersistFlag(boolean isLazyPersist) {
+      this.isLazyPersist = isLazyPersist;
+      return (T)this;
+    }
+
     <T extends AddCloseOp> T setBlocks(Block[] blocks) {
       if (blocks.length > MAX_BLOCKS) {
         throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
@@ -495,6 +501,7 @@ public abstract class FSEditLogOp {
       FSImageSerialization.writeLong(mtime, out);
       FSImageSerialization.writeLong(atime, out);
       FSImageSerialization.writeLong(blockSize, out);
+      FSImageSerialization.writeInt((isLazyPersist ? 1 : 0), out);
       new ArrayWritable(Block.class, blocks).write(out);
       permissions.write(out);
 
@@ -562,6 +569,13 @@ public abstract class FSEditLogOp {
         this.blockSize = readLong(in);
       }
 
+      if (NameNodeLayoutVersion.supports(
+          NameNodeLayoutVersion.Feature.LAZY_PERSIST_FILES, logVersion)) {
+        this.isLazyPersist = (FSImageSerialization.readInt(in) != 0);
+      } else {
+        this.isLazyPersist = false;
+      }
+
       this.blocks = readBlocks(in, logVersion);
       this.permissions = PermissionStatus.read(in);
 
@@ -615,6 +629,8 @@ public abstract class FSEditLogOp {
       builder.append(atime);
       builder.append(", blockSize=");
       builder.append(blockSize);
+      builder.append(", lazyPersist");
+      builder.append(isLazyPersist);
       builder.append(", blocks=");
       builder.append(Arrays.toString(blocks));
       builder.append(", permissions=");
@@ -651,6 +667,8 @@ public abstract class FSEditLogOp {
           Long.toString(atime));
       XMLUtils.addSaxString(contentHandler, "BLOCKSIZE",
           Long.toString(blockSize));
+      XMLUtils.addSaxString(contentHandler, "LAZY_PERSIST",
+          Boolean.toString(isLazyPersist));
       XMLUtils.addSaxString(contentHandler, "CLIENT_NAME", clientName);
       XMLUtils.addSaxString(contentHandler, "CLIENT_MACHINE", clientMachine);
       for (Block b : blocks) {
@@ -674,6 +692,11 @@ public abstract class FSEditLogOp {
       this.mtime = Long.parseLong(st.getValue("MTIME"));
       this.atime = Long.parseLong(st.getValue("ATIME"));
       this.blockSize = Long.parseLong(st.getValue("BLOCKSIZE"));
+
+      String lazyPersistString = st.getValueOrNull("LAZY_PERSIST");
+      this.isLazyPersist =
+          lazyPersistString != null && Boolean.parseBoolean(lazyPersistString);
+
       this.clientName = st.getValue("CLIENT_NAME");
       this.clientMachine = st.getValue("CLIENT_MACHINE");
       if (st.hasChildren("BLOCK")) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 5b6d269..76b6c80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -783,8 +783,11 @@ public class FSImageFormat {
       if (counter != null) {
         counter.increment();
       }
+
+      // Images in the old format will not have the lazyPersist flag so it is
+      // safe to pass false always.
       final INodeFile file = new INodeFile(inodeId, localName, permissions,
-          modificationTime, atime, blocks, replication, blockSize);
+          modificationTime, atime, blocks, replication, blockSize, false);
       if (underConstruction) {
         file.toUnderConstruction(clientName, clientMachine);
       }
@@ -884,8 +887,10 @@ public class FSImageFormat {
           in.readShort());
       final long preferredBlockSize = in.readLong();
 
+      // LazyPersist flag will not be present in old image formats and hence
+      // can be safely set to false always.
       return new INodeFileAttributes.SnapshotCopy(name, permissions, null, modificationTime,
-          accessTime, replication, preferredBlockSize, null);
+          accessTime, replication, preferredBlockSize, false, null);
     }
 
     public INodeDirectoryAttributes loadINodeDirectoryAttributes(DataInput in)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
index feff704..51e297d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java
@@ -283,7 +283,8 @@ public final class FSImageFormatPBINode {
 
       final INodeFile file = new INodeFile(n.getId(),
           n.getName().toByteArray(), permissions, f.getModificationTime(),
-          f.getAccessTime(), blocks, replication, f.getPreferredBlockSize());
+          f.getAccessTime(), blocks, replication, f.getPreferredBlockSize(),
+          f.hasIsLazyPersist() ? f.getIsLazyPersist() : false);
 
       if (f.hasAcl()) {
         file.addAclFeature(new AclFeature(loadAclEntries(f.getAcl(),
@@ -391,7 +392,8 @@ public final class FSImageFormatPBINode {
           .setModificationTime(file.getModificationTime())
           .setPermission(buildPermissionStatus(file, state.getStringMap()))
           .setPreferredBlockSize(file.getPreferredBlockSize())
-          .setReplication(file.getFileReplication());
+          .setReplication(file.getFileReplication())
+          .setIsLazyPersist(file.getLazyPersistFlag());
 
       AclFeature f = file.getAclFeature();
       if (f != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index eb8354d..e369575 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -146,14 +146,16 @@ public class FSImageSerialization {
     int numLocs = in.readInt();
     assert numLocs == 0 : "Unexpected block locations";
 
+    // Images in the pre-protobuf format will not have the lazyPersist flag,
+    // so it is safe to pass false always.
     INodeFile file = new INodeFile(inodeId, name, perm, modificationTime,
-        modificationTime, blocks, blockReplication, preferredBlockSize);
+        modificationTime, blocks, blockReplication, preferredBlockSize, false);
     file.toUnderConstruction(clientName, clientMachine);
     return file;
   }
 
   // Helper function that writes an INodeUnderConstruction
-  // into the input stream
+  // into the output stream
   //
   static void writeINodeUnderConstruction(DataOutputStream out, INodeFile cons,
       String path) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6f1f969..8054b79 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2616,7 +2616,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       if (parent != null && mkdirsRecursively(parent.toString(),
               permissions, true, now())) {
         newNode = dir.addFile(src, permissions, replication, blockSize,
-                              holder, clientMachine);
+                              isLazyPersist, holder, clientMachine);
       }
 
       if (newNode == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 94fa686..a254f3e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -74,7 +74,9 @@ public class INodeFile extends INodeWithAdditionalFields
   /** Format: [16 bits for replication][48 bits for PreferredBlockSize] */
   static enum HeaderFormat {
     PREFERRED_BLOCK_SIZE(null, 48, 1),
-    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 16, 1);
+    REPLICATION(PREFERRED_BLOCK_SIZE.BITS, 12, 1),
+    LAZY_PERSIST(REPLICATION.BITS, 4, 0);
+
 
     private final LongBitFormat BITS;
 
@@ -90,12 +92,18 @@ public class INodeFile extends INodeWithAdditionalFields
       return PREFERRED_BLOCK_SIZE.BITS.retrieve(header);
     }
 
-    static long toLong(long preferredBlockSize, short replication) {
+    static boolean getLazyPersistFlag(long header) {
+      return LAZY_PERSIST.BITS.retrieve(header) == 0 ? false : true;
+    }
+
+    static long toLong(long preferredBlockSize, short replication, boolean isLazyPersist) {
       long h = 0;
       h = PREFERRED_BLOCK_SIZE.BITS.combine(preferredBlockSize, h);
       h = REPLICATION.BITS.combine(replication, h);
+      h = LAZY_PERSIST.BITS.combine(isLazyPersist ? 1 : 0, h);
       return h;
     }
+
   }
 
   private long header = 0L;
@@ -104,9 +112,9 @@ public class INodeFile extends INodeWithAdditionalFields
 
   INodeFile(long id, byte[] name, PermissionStatus permissions, long mtime,
       long atime, BlockInfo[] blklist, short replication,
-      long preferredBlockSize) {
+      long preferredBlockSize, boolean isLazyPersist) {
     super(id, name, permissions, mtime, atime);
-    header = HeaderFormat.toLong(preferredBlockSize, replication);
+    header = HeaderFormat.toLong(preferredBlockSize, replication, isLazyPersist);
     this.blocks = blklist;
   }
   
@@ -160,7 +168,6 @@ public class INodeFile extends INodeWithAdditionalFields
     return getFileUnderConstructionFeature() != null;
   }
 
-  /** Convert this file to an {@link INodeFileUnderConstruction}. */
   INodeFile toUnderConstruction(String clientName, String clientMachine) {
     Preconditions.checkState(!isUnderConstruction(),
         "file is already under construction");
@@ -356,6 +363,11 @@ public class INodeFile extends INodeWithAdditionalFields
   }
 
   @Override
+  public boolean getLazyPersistFlag() {
+    return HeaderFormat.getLazyPersistFlag(header);
+  }
+
+  @Override
   public long getHeaderLong() {
     return header;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
index 47b76b7..64ee1fc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileAttributes.java
@@ -21,7 +21,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile.HeaderFormat;
 import org.apache.hadoop.hdfs.server.namenode.XAttrFeature;
-
 /**
  * The attributes of a file.
  */
@@ -32,6 +31,8 @@ public interface INodeFileAttributes extends INodeAttributes {
 
   /** @return preferred block size in bytes */
   public long getPreferredBlockSize();
+
+  public boolean getLazyPersistFlag();
   
   /** @return the header as a long. */
   public long getHeaderLong();
@@ -45,10 +46,11 @@ public interface INodeFileAttributes extends INodeAttributes {
 
     public SnapshotCopy(byte[] name, PermissionStatus permissions,
         AclFeature aclFeature, long modificationTime, long accessTime,
-        short replication, long preferredBlockSize, XAttrFeature xAttrsFeature) {
+        short replication, long preferredBlockSize,
+        boolean isTransient, XAttrFeature xAttrsFeature) {
       super(name, permissions, aclFeature, modificationTime, accessTime, 
           xAttrsFeature);
-      header = HeaderFormat.toLong(preferredBlockSize, replication);
+      header = HeaderFormat.toLong(preferredBlockSize, replication, isTransient);
     }
 
     public SnapshotCopy(INodeFile file) {
@@ -67,6 +69,9 @@ public interface INodeFileAttributes extends INodeAttributes {
     }
 
     @Override
+    public boolean getLazyPersistFlag() { return HeaderFormat.getLazyPersistFlag(header); }
+
+    @Override
     public long getHeaderLong() {
       return header;
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
index 6ae2806..16d55fd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeLayoutVersion.java
@@ -65,7 +65,9 @@ public class NameNodeLayoutVersion {
   public static enum Feature implements LayoutFeature {
     ROLLING_UPGRADE(-55, -53, "Support rolling upgrade", false),
     EDITLOG_LENGTH(-56, "Add length field to every edit log op"),
-    XATTRS(-57, "Extended attributes");
+    XATTRS(-57, "Extended attributes"),
+    LAZY_PERSIST_FILES(-58, "Support for optional lazy persistence of "
+        + " files with reduced durability guarantees");
     
     private final FeatureInfo info;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
index 3f4cda5..6e00c17 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
@@ -220,7 +220,9 @@ public class FSImageFormatPBSnapshot {
           copy = new INodeFileAttributes.SnapshotCopy(pbf.getName()
               .toByteArray(), permission, acl, fileInPb.getModificationTime(),
               fileInPb.getAccessTime(), (short) fileInPb.getReplication(),
-              fileInPb.getPreferredBlockSize(), xAttrs);
+              fileInPb.getPreferredBlockSize(),
+              fileInPb.hasIsLazyPersist() ? fileInPb.getIsLazyPersist() : false,
+              xAttrs);
         }
 
         FileDiff diff = new FileDiff(pbf.getSnapshotId(), copy, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
index bab83a1..7ad1c59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
@@ -391,6 +391,7 @@ class FSImageLoader {
             f.getPermission(), stringTable);
         map.put("accessTime", f.getAccessTime());
         map.put("blockSize", f.getPreferredBlockSize());
+        map.put("lazyPersist", f.getIsLazyPersist());
         map.put("group", p.getGroupName());
         map.put("length", getFileSize(f));
         map.put("modificationTime", f.getModificationTime());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 99617b8..744fc75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -247,6 +247,10 @@ public final class PBImageXmlWriter {
         .o("perferredBlockSize", f.getPreferredBlockSize())
         .o("permission", dumpPermission(f.getPermission()));
 
+    if (f.hasIsLazyPersist()) {
+      o("lazyPersist", f.getIsLazyPersist());
+    }
+
     if (f.getBlocksCount() > 0) {
       out.print("<blocks>");
       for (BlockProto b : f.getBlocksList()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
index 1c8edfa..63674de 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/fsimage.proto
@@ -134,6 +134,7 @@ message INodeSection {
     optional FileUnderConstructionFeature fileUC = 7;
     optional AclFeatureProto acl = 8;
     optional XAttrFeatureProto xAttrs = 9;
+    optional bool isLazyPersist = 10 [default = false];
   }
 
   message INodeDirectory {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
index a5e2edf..7f193f5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
@@ -82,7 +82,7 @@ public class CreateEditsLog {
       }
 
       final INodeFile inode = new INodeFile(inodeId.nextValue(), null,
-          p, 0L, 0L, blocks, replication, blockSize);
+          p, 0L, 0L, blocks, replication, blockSize, false);
       inode.toUnderConstruction("", "");
 
      // Append path to filename with information about blockIDs 
@@ -97,7 +97,7 @@ public class CreateEditsLog {
         editLog.logMkDir(currentDir, dirInode);
       }
       INodeFile fileUc = new INodeFile(inodeId.nextValue(), null,
-          p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
+          p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, false);
       fileUc.toUnderConstruction("", "");
       editLog.logOpenFile(filePath, fileUc, false);
       editLog.logCloseFile(filePath, inode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 8074a68..762969e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -194,7 +194,7 @@ public class TestEditLog {
 
       for (int i = 0; i < numTransactions; i++) {
         INodeFile inode = new INodeFile(namesystem.allocateNewInodeId(), null,
-            p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
+            p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize, false);
         inode.toUnderConstruction("", "");
 
         editLog.logOpenFile("/filename" + (startIndex + i), inode, false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
index b1c5ca7..ad51445 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
@@ -423,7 +423,7 @@ public class TestFSPermissionChecker {
       FsPermission.createImmutable(perm));
     INodeFile inodeFile = new INodeFile(INodeId.GRANDFATHER_INODE_ID,
       name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION,
-      PREFERRED_BLOCK_SIZE);
+      PREFERRED_BLOCK_SIZE, false);
     parent.addChild(inodeFile);
     return inodeFile;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43ccda51/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index a739b7a..6d669bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -82,7 +82,7 @@ public class TestINodeFile {
 
   INodeFile createINodeFile(short replication, long preferredBlockSize) {
     return new INodeFile(INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L,
-        null, replication, preferredBlockSize);
+        null, replication, preferredBlockSize, false);
   }
   /**
    * Test for the Replication value. Sets a value and checks if it was set
@@ -259,7 +259,7 @@ public class TestINodeFile {
     INodeFile[] iNodes = new INodeFile[nCount];
     for (int i = 0; i < nCount; i++) {
       iNodes[i] = new INodeFile(i, null, perm, 0L, 0L, null, replication,
-          preferredBlockSize);
+          preferredBlockSize, false);
       iNodes[i].setLocalName(DFSUtil.string2Bytes(fileNamePrefix + i));
       BlockInfo newblock = new BlockInfo(replication);
       iNodes[i].addBlock(newblock);
@@ -316,7 +316,7 @@ public class TestINodeFile {
 
     {//cast from INodeFileUnderConstruction
       final INode from = new INodeFile(
-          INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L);
+          INodeId.GRANDFATHER_INODE_ID, null, perm, 0L, 0L, null, replication, 1024L, false);
       from.asFile().toUnderConstruction("client", "machine");
     
       //cast to INodeFile, should success
@@ -1079,7 +1079,7 @@ public class TestINodeFile {
   public void testFileUnderConstruction() {
     replication = 3;
     final INodeFile file = new INodeFile(INodeId.GRANDFATHER_INODE_ID, null,
-        perm, 0L, 0L, null, replication, 1024L);
+        perm, 0L, 0L, null, replication, 1024L, false);
     assertFalse(file.isUnderConstruction());
 
     final String clientName = "client";