You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2020/04/27 21:31:04 UTC

[hadoop] branch branch-3.1 updated: HDFS-15286. Concat on a same file deleting the file. Contributed by hemanthboyina.

This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 74ec3c0  HDFS-15286. Concat on a same file deleting the file. Contributed by hemanthboyina.
74ec3c0 is described below

commit 74ec3c001def5fe38347fa41890c67cda275cbc0
Author: Akira Ajisaka <aa...@apache.org>
AuthorDate: Tue Apr 28 06:16:42 2020 +0900

    HDFS-15286. Concat on a same file deleting the file. Contributed by hemanthboyina.
    
    (cherry picked from commit 5e0eda5d5f696aba7fc209874d232baf2a50d547)
---
 .../hadoop/hdfs/server/namenode/FSDirConcatOp.java |  2 +-
 .../hdfs/server/namenode/TestHDFSConcat.java       | 39 ++++++++++++++++++++++
 2 files changed, 40 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index b92c414..ebd7b60 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -150,7 +150,7 @@ class FSDirConcatOp {
             + " is referred by some other reference in some snapshot.");
       }
       // source file cannot be the same with the target file
-      if (srcINode == targetINode) {
+      if (srcINode.equals(targetINode)) {
         throw new HadoopIllegalArgumentException("concat: the src file " + src
             + " is the same with the target file " + targetIIP.getPath());
       }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
index b5e0efe..cbb5164 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -524,4 +525,42 @@ public class TestHDFSConcat {
       GenericTestUtils.assertExceptionContains(errMsg, e);
     }
   }
+
+  /**
+   * Test concat on same source and target file which is a inode reference.
+   */
+  @Test
+  public void testConcatOnSameFile() throws Exception {
+    String dir = "/dir1";
+    Path trgDir = new Path(dir);
+    dfs.mkdirs(new Path(dir));
+
+    // create a source file
+    String dir2 = "/dir2";
+    Path srcDir = new Path(dir2);
+    dfs.mkdirs(srcDir);
+    dfs.allowSnapshot(srcDir);
+    Path src = new Path(srcDir, "file1");
+    DFSTestUtil.createFile(dfs, src, 512, (short) 2, 0);
+
+    // make the file as an Inode reference and delete the reference
+    dfs.createSnapshot(srcDir, "s1");
+    dfs.rename(src, trgDir);
+    dfs.deleteSnapshot(srcDir, "s1");
+    Path[] srcs = new Path[1];
+    srcs[0] = new Path(dir, "file1");
+
+    // perform concat
+    LambdaTestUtils.intercept(RemoteException.class,
+        "concat: the src file /dir1/file1 is the same with the target"
+            + " file /dir1/file1",
+        () -> dfs.concat(srcs[0], srcs));
+
+    // the file should exists and read the file
+    byte[] buff = new byte[1080];
+    FSDataInputStream stream = dfs.open(srcs[0]);
+    stream.readFully(0, buff, 0, 512);
+
+    assertEquals(1, dfs.getContentSummary(new Path(dir)).getFileCount());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org