You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2019/12/17 16:36:10 UTC

[hadoop] branch branch-2.10 updated: HDFS-14519. NameQuota is not update after concat operation, so namequota is wrong. Contributed by Ranith Sardar.

This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-2.10
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.10 by this push:
     new 0927041  HDFS-14519. NameQuota is not update after concat operation, so namequota is wrong. Contributed by Ranith Sardar.
0927041 is described below

commit 0927041589203f7f8f3db9a15e26d960b14531eb
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Tue Dec 17 21:55:58 2019 +0530

    HDFS-14519. NameQuota is not update after concat operation, so namequota is wrong. Contributed by Ranith Sardar.
---
 .../hadoop/hdfs/server/namenode/FSDirConcatOp.java |  1 +
 .../hadoop/hdfs/server/namenode/TestINodeFile.java | 35 +++++++++++++++++++++-
 2 files changed, 35 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
index 14df725..b3b45ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java
@@ -208,6 +208,7 @@ class FSDirConcatOp {
         }
       }
     }
+    deltas.addNameSpace(-srcList.length);
     return deltas;
   }
 
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index cbf834b..e11e342 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -31,6 +31,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
@@ -42,6 +43,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
+import org.apache.hadoop.fs.QuotaUsage;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -1115,4 +1117,35 @@ public class TestINodeFile {
     toBeCleared.clearBlocks();
     assertTrue(toBeCleared.getBlocks().length == 0);
   }
-}
+
+  @Test
+  public void testConcat() throws IOException {
+    Configuration conf = new Configuration();
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()) {
+      cluster.waitActive();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      String dir = "/testConcat";
+      dfs.mkdirs(new Path(dir), FsPermission.getDirDefault());
+      dfs.setQuota(new Path(dir), 100L, HdfsConstants.QUOTA_DONT_SET);
+
+      // Create 4 files
+      Path trg = new Path(dir + "/file");
+      DFSTestUtil.createFile(dfs, trg, 512, (short) 1, 0);
+      Path[] srcs = new Path[4];
+      for (int i = 0; i < 4; i++) {
+        srcs[i] = new Path(dir + "/file" + i);
+        DFSTestUtil.createFile(dfs, srcs[i], 512, (short) 1, 0);
+      }
+
+      // Concat file1, file2, file3 to file0
+      dfs.concat(trg, srcs);
+
+      // Check the file and directory count and consumed space
+      ContentSummary cs = dfs.getContentSummary(new Path(dir));
+      QuotaUsage qu = dfs.getQuotaUsage(new Path(dir));
+
+      assertEquals(cs.getFileCount() + cs.getDirectoryCount(),
+          qu.getFileAndDirectoryCount());
+    }
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org