You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/03/19 02:44:31 UTC

hadoop git commit: HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 8234fd0e1 -> bee5a6a64


HDFS-7943. Append cannot handle the last block with length greater than the preferred block size. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bee5a6a6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bee5a6a6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bee5a6a6

Branch: refs/heads/trunk
Commit: bee5a6a64a1c037308fa4d52249be39c82791590
Parents: 8234fd0
Author: Jing Zhao <ji...@apache.org>
Authored: Wed Mar 18 18:40:59 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Wed Mar 18 18:40:59 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../hdfs/server/namenode/FSDirConcatOp.java     | 26 ++++++++++++++++++--
 .../hdfs/server/namenode/TestHDFSConcat.java    | 17 +++++++++++++
 3 files changed, 44 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 385c39b..d9d9e1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1199,6 +1199,9 @@ Release 2.7.0 - UNRELEASED
     HDFS-7945. The WebHdfs system on DN does not honor the length parameter.
     (wheat9)
 
+    HDFS-7943. Append cannot handle the last block with length greater than
+    the preferred block size. (jing9)
+
     BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
       HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 5ccd3ea..31a6af7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -34,6 +34,16 @@ import java.util.List;
 
 import static org.apache.hadoop.util.Time.now;
 
+/**
+ * Restrictions for a concat operation:
+ * <pre>
+ * 1. the src file and the target file are in the same dir
+ * 2. all the source files are not in snapshot
+ * 3. any source file cannot be the same with the target file
+ * 4. source files cannot be under construction or empty
+ * 5. source file's preferred block size cannot be greater than the target file
+ * </pre>
+ */
 class FSDirConcatOp {
 
   static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
@@ -123,14 +133,25 @@ class FSDirConcatOp {
         throw new SnapshotException("Concat: the source file " + src
             + " is referred by some other reference in some snapshot.");
       }
+      // source file cannot be the same with the target file
       if (srcINode == targetINode) {
         throw new HadoopIllegalArgumentException("concat: the src file " + src
             + " is the same with the target file " + targetIIP.getPath());
       }
+      // source file cannot be under construction or empty
       if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
         throw new HadoopIllegalArgumentException("concat: source file " + src
             + " is invalid or empty or underConstruction");
       }
+      // source file's preferred block size cannot be greater than the target
+      // file
+      if (srcINodeFile.getPreferredBlockSize() >
+          targetINode.getPreferredBlockSize()) {
+        throw new HadoopIllegalArgumentException("concat: source file " + src
+            + " has preferred block size " + srcINodeFile.getPreferredBlockSize()
+            + " which is greater than the target file's preferred block size "
+            + targetINode.getPreferredBlockSize());
+      }
       si.add(srcINodeFile);
     }
 
@@ -143,9 +164,10 @@ class FSDirConcatOp {
     return si.toArray(new INodeFile[si.size()]);
   }
 
-  private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
+  private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
+      INodeFile target, INodeFile[] srcList) {
     QuotaCounts deltas = new QuotaCounts.Builder().build();
-    short targetRepl = target.getBlockReplication();
+    final short targetRepl = target.getBlockReplication();
     for (INodeFile src : srcList) {
       short srcRepl = src.getBlockReplication();
       long fileSize = src.computeFileSize();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bee5a6a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index ddf5a3e..e1c3c0f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -388,6 +389,22 @@ public class TestHDFSConcat {
     } catch (Exception e) {
       // exspected
     }
+
+    // the source file's preferred block size cannot be greater than the target
+    {
+      final Path src1 = new Path(parentDir, "src1");
+      DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
+      final Path src2 = new Path(parentDir, "src2");
+      // create a file whose preferred block size is greater than the target
+      DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
+          dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
+      try {
+        dfs.concat(trg, new Path[] {src1, src2});
+        fail("didn't fail for src with greater preferred block size");
+      } catch (Exception e) {
+        GenericTestUtils.assertExceptionContains("preferred block size", e);
+      }
+    }
   }
 
   /**