You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2019/09/05 23:51:47 UTC

[hadoop] 01/02: Revert "HDFS-14633. The StorageType quota and consume in QuotaFeature is not handled for rename. Contributed by Jinglun."

This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 53d1bfe6808fd1b719caea8f567e52f6e11bdc97
Author: Xiaoyu Yao <xy...@apache.org>
AuthorDate: Fri Aug 30 16:46:04 2019 -0700

    Revert "HDFS-14633. The StorageType quota and consume in QuotaFeature is not handled for rename. Contributed by Jinglun."
    
    This reverts commit 259c6bb0c7fc814ba463a7bf3a786670dccd4c73.
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  7 +--
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  5 +-
 .../java/org/apache/hadoop/hdfs/TestQuota.java     | 62 ----------------------
 3 files changed, 2 insertions(+), 72 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index 6ca6568..68cf3e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -76,12 +76,7 @@ class FSDirRenameOp {
     while(src.getINode(i) == dst.getINode(i)) { i++; }
     // src[i - 1] is the last common ancestor.
     BlockStoragePolicySuite bsps = fsd.getBlockStoragePolicySuite();
-    // Assume dstParent existence check done by callers.
-    INode dstParent = dst.getINode(-2);
-    // Use the destination parent's storage policy for quota delta verify.
-    final QuotaCounts delta = src.getLastINode()
-        .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false,
-            Snapshot.CURRENT_STATE_ID);
+    final QuotaCounts delta = src.getLastINode().computeQuotaUsage(bsps);
 
     // Reduce the required quota by dst that is being removed
     final INode dstINode = dst.getLastINode();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index d0ffb3b..c49e00f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.util.StringUtils;
 
 import com.google.common.annotations.VisibleForTesting;
@@ -1299,9 +1298,7 @@ public class FSDirectory implements Closeable {
     // always verify inode name
     verifyINodeName(inode.getLocalNameBytes());
 
-    final QuotaCounts counts = inode
-        .computeQuotaUsage(getBlockStoragePolicySuite(),
-            parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID);
+    final QuotaCounts counts = inode.computeQuotaUsage(getBlockStoragePolicySuite());
     updateCount(existing, pos, counts, checkQuota);
 
     boolean isRename = (inode.getParent() != null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
index b63813b..1c4855f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java
@@ -1566,57 +1566,6 @@ public class TestQuota {
   }
 
   @Test
-  public void testClrQuotaOnRoot() throws Exception {
-    long orignalQuota = dfs.getQuotaUsage(new Path("/")).getQuota();
-    DFSAdmin admin = new DFSAdmin(conf);
-    String[] args;
-    args = new String[] {"-setQuota", "3K", "/"};
-    runCommand(admin, args, false);
-    assertEquals(3 * 1024, dfs.getQuotaUsage(new Path("/")).getQuota());
-    args = new String[] {"-clrQuota", "/"};
-    runCommand(admin, args, false);
-    assertEquals(orignalQuota, dfs.getQuotaUsage(new Path("/")).getQuota());
-  }
-
-  @Test
-  public void testRename() throws Exception {
-    int fileLen = 1024;
-    short replication = 3;
-
-    final Path parent = new Path(PathUtils.getTestDir(getClass()).getPath(),
-        GenericTestUtils.getMethodName());
-    assertTrue(dfs.mkdirs(parent));
-
-    final Path srcDir = new Path(parent, "src-dir");
-    Path file = new Path(srcDir, "file1");
-    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0);
-    dfs.setStoragePolicy(srcDir, HdfsConstants.HOT_STORAGE_POLICY_NAME);
-
-    final Path dstDir = new Path(parent, "dst-dir");
-    assertTrue(dfs.mkdirs(dstDir));
-    dfs.setStoragePolicy(dstDir, HdfsConstants.ALLSSD_STORAGE_POLICY_NAME);
-
-    dfs.setQuota(srcDir, 100000, 100000);
-    dfs.setQuota(dstDir, 100000, 100000);
-
-    Path dstFile = new Path(dstDir, "file1");
-    // Test quota check of rename. Expect a QuotaExceedException.
-    dfs.setQuotaByStorageType(dstDir, StorageType.SSD, 10);
-    try {
-      dfs.rename(file, dstFile);
-      fail("Expect QuotaExceedException.");
-    } catch (QuotaExceededException qe) {
-    }
-
-    // Set enough quota, expect a successful rename.
-    dfs.setQuotaByStorageType(dstDir, StorageType.SSD, fileLen * replication);
-    dfs.rename(file, dstFile);
-    // Verify the storage type usage is properly updated on source and dst.
-    checkQuotaAndCount(dfs, srcDir);
-    checkQuotaAndCount(dfs, dstDir);
-  }
-
-  @Test
   public void testSpaceQuotaExceptionOnAppend() throws Exception {
     GenericTestUtils.setLogLevel(DFSOutputStream.LOG, Level.TRACE);
     GenericTestUtils.setLogLevel(DataStreamer.LOG, Level.TRACE);
@@ -1686,15 +1635,4 @@ public class TestQuota {
     }
     scanner.close();
   }
-
-  // quota and count should match.
-  private void checkQuotaAndCount(DistributedFileSystem fs, Path path)
-      throws IOException {
-    QuotaUsage qu = fs.getQuotaUsage(path);
-    ContentSummary cs = fs.getContentSummary(path);
-    for (StorageType st : StorageType.values()) {
-      // it will fail here, because the quota and consume is not handled right.
-      assertEquals(qu.getTypeConsumed(st), cs.getTypeConsumed(st));
-    }
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org