You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by he...@apache.org on 2020/08/17 07:26:43 UTC

[hadoop] branch trunk updated: HDFS-15483. Ordered snapshot deletion: Disallow rename between two snapshottable directories. (#2172)

This is an automated email from the ASF dual-hosted git repository.

hemanthboyina pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/trunk by this push:
     new 092bfe7  HDFS-15483. Ordered snapshot deletion: Disallow rename between two snapshottable directories. (#2172)
092bfe7 is described below

commit 092bfe7c8e9736efb0dbe3d39af77c4bfffa6ef2
Author: bshashikant <sh...@apache.org>
AuthorDate: Mon Aug 17 12:56:13 2020 +0530

    HDFS-15483. Ordered snapshot deletion: Disallow rename between two snapshottable directories. (#2172)
    
    * HDFS-15483. Ordered snapshot deletion: Disallow rename between two snapshottable directories.
    
    * Addressed review comments.
    
    * Rebased to latest trunk and added snapshotTrashRoot config check.
    
    * Addressed review comments.
    
    * Removede empty line added in SnapshotManager.Java.
    
    * Addressed whitespace issues.
---
 .../hadoop/hdfs/server/namenode/FSDirRenameOp.java |  27 ++++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  |  16 ++-
 .../server/namenode/snapshot/SnapshotManager.java  |  34 +++++--
 .../TestRenameWithOrderedSnapshotDeletion.java     | 110 +++++++++++++++++++++
 4 files changed, 172 insertions(+), 15 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index a0c1e3a..7396519 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.Time;
@@ -193,7 +194,7 @@ class FSDirRenameOp {
     }
 
     validateNestSnapshot(fsd, src, dstParent.asDirectory(), snapshottableDirs);
-
+    checkUnderSameSnapshottableRoot(fsd, srcIIP, dstIIP);
     fsd.ezManager.checkMoveValidity(srcIIP, dstIIP);
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
@@ -407,6 +408,7 @@ class FSDirRenameOp {
 
     validateNestSnapshot(fsd, src,
             dstParent.asDirectory(), srcSnapshottableDirs);
+    checkUnderSameSnapshottableRoot(fsd, srcIIP, dstIIP);
 
     // Ensure dst has quota to accommodate rename
     verifyFsLimitsForRename(fsd, srcIIP, dstIIP);
@@ -821,6 +823,29 @@ class FSDirRenameOp {
     }
   }
 
+  private static void checkUnderSameSnapshottableRoot(
+      FSDirectory fsd, INodesInPath srcIIP, INodesInPath dstIIP)
+      throws IOException {
+    // Ensure rename out of a snapshottable root is not permitted if ordered
+    // snapshot deletion feature is enabled
+    SnapshotManager snapshotManager = fsd.getFSNamesystem().
+        getSnapshotManager();
+    if (snapshotManager.isSnapshotDeletionOrdered() && fsd.getFSNamesystem()
+        .isSnapshotTrashRootEnabled()) {
+      INodeDirectory srcRoot = snapshotManager.
+          getSnapshottableAncestorDir(srcIIP);
+      INodeDirectory dstRoot = snapshotManager.
+          getSnapshottableAncestorDir(dstIIP);
+      // Ensure snapshoottable root for both src and dest are same.
+      if (srcRoot != dstRoot) {
+        String errMsg = "Source " + srcIIP.getPath() +
+            " and dest " + dstIIP.getPath() + " are not under " +
+            "the same snapshot root.";
+        throw new SnapshotException(errMsg);
+      }
+    }
+  }
+
   private static RenameResult createRenameResult(FSDirectory fsd,
       INodesInPath dst, boolean filesDeleted,
       BlocksMapUpdateInfo collectedBlocks) throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0a273f0..15bf6b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -384,9 +384,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       .getLogger(FSNamesystem.class.getName());
 
   // The following are private configurations
-  static final String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED =
+  public static final String DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED =
       "dfs.namenode.snapshot.trashroot.enabled";
-  static final boolean DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT = false;
+  public static final boolean DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT
+      = false;
 
   private final MetricsRegistry registry = new MetricsRegistry("FSNamesystem");
   @Metric final MutableRatesWithAggregation detailedLockHoldTimeMetrics =
@@ -468,6 +469,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   private final UserGroupInformation fsOwner;
   private final String supergroup;
   private final boolean standbyShouldCheckpoint;
+  private final boolean isSnapshotTrashRootEnabled;
   private final int snapshotDiffReportLimit;
   private final int blockDeletionIncrement;
 
@@ -882,6 +884,9 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       // Get the checksum type from config
       String checksumTypeStr = conf.get(DFS_CHECKSUM_TYPE_KEY,
           DFS_CHECKSUM_TYPE_DEFAULT);
+      this.isSnapshotTrashRootEnabled = conf.getBoolean(
+          DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED,
+          DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT);
       DataChecksum.Type checksumType;
       try {
          checksumType = DataChecksum.Type.valueOf(checksumTypeStr);
@@ -909,8 +914,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
               CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
               ""),
           blockManager.getStoragePolicySuite().getDefaultPolicy().getId(),
-          conf.getBoolean(DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED,
-              DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED_DEFAULT));
+          isSnapshotTrashRootEnabled);
 
       this.maxFsObjects = conf.getLong(DFS_NAMENODE_MAX_OBJECTS_KEY, 
                                        DFS_NAMENODE_MAX_OBJECTS_DEFAULT);
@@ -1054,6 +1058,10 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     return maxListOpenFilesResponses;
   }
 
+  boolean isSnapshotTrashRootEnabled() {
+    return isSnapshotTrashRootEnabled;
+  }
+
   void lockRetryCache() {
     if (retryCache != null) {
       retryCache.lock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
index 3866125..b5b0971 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
@@ -363,21 +363,35 @@ public class SnapshotManager implements SnapshotStatsMXBean {
    * @param iip INodesInPath for the directory to get snapshot root.
    * @return the snapshot root INodeDirectory
    */
+  public INodeDirectory checkAndGetSnapshottableAncestorDir(
+      final INodesInPath iip) throws IOException {
+    final INodeDirectory dir = getSnapshottableAncestorDir(iip);
+    if (dir == null) {
+      throw new SnapshotException("Directory is neither snapshottable nor" +
+          " under a snap root!");
+    }
+    return dir;
+  }
+
   public INodeDirectory getSnapshottableAncestorDir(final INodesInPath iip)
       throws IOException {
     final String path = iip.getPath();
-    final INodeDirectory dir = INodeDirectory.valueOf(iip.getLastINode(), path);
+    final INode inode = iip.getLastINode();
+    final INodeDirectory dir;
+    if (inode instanceof INodeDirectory) {
+      dir = INodeDirectory.valueOf(inode, path);
+    } else {
+      dir = INodeDirectory.valueOf(iip.getINode(-2), iip.getParentPath());
+    }
     if (dir.isSnapshottable()) {
       return dir;
-    } else {
-      for (INodeDirectory snapRoot : this.snapshottables.values()) {
-        if (dir.isAncestorDirectory(snapRoot)) {
-          return snapRoot;
-        }
+    }
+    for (INodeDirectory snapRoot : this.snapshottables.values()) {
+      if (dir.isAncestorDirectory(snapRoot)) {
+        return snapRoot;
       }
-      throw new SnapshotException("Directory is neither snapshottable nor" +
-          " under a snap root!");
     }
+    return null;
   }
 
   public boolean isDescendantOfSnapshotRoot(INodeDirectory dir) {
@@ -641,7 +655,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
     // All the check for path has been included in the valueOf method.
     INodeDirectory snapshotRootDir;
     if (this.snapshotDiffAllowSnapRootDescendant) {
-      snapshotRootDir = getSnapshottableAncestorDir(iip);
+      snapshotRootDir = checkAndGetSnapshottableAncestorDir(iip);
     } else {
       snapshotRootDir = getSnapshottableRoot(iip);
     }
@@ -674,7 +688,7 @@ public class SnapshotManager implements SnapshotStatsMXBean {
     // All the check for path has been included in the valueOf method.
     INodeDirectory snapshotRootDir;
     if (this.snapshotDiffAllowSnapRootDescendant) {
-      snapshotRootDir = getSnapshottableAncestorDir(iip);
+      snapshotRootDir = checkAndGetSnapshottableAncestorDir(iip);
     } else {
       snapshotRootDir = getSnapshottableRoot(iip);
     }
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java
new file mode 100644
index 0000000..052610b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithOrderedSnapshotDeletion.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+
+import java.io.IOException;
+
+import static org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager.DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED;
+import static org.apache.hadoop.hdfs.server.namenode.FSNamesystem.DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED;
+/**
+ * Test Rename with ordered snapshot deletion.
+ */
+public class TestRenameWithOrderedSnapshotDeletion {
+  private final Path snapshottableDir
+      = new Path("/" + getClass().getSimpleName());
+  private DistributedFileSystem hdfs;
+  private MiniDFSCluster cluster;
+
+  @Before
+  public void setUp() throws Exception {
+    final Configuration conf = new Configuration();
+    conf.setBoolean(DFS_NAMENODE_SNAPSHOT_DELETION_ORDERED, true);
+    conf.setBoolean(DFS_NAMENODE_SNAPSHOT_TRASHROOT_ENABLED, true);
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    cluster.waitActive();
+    hdfs = cluster.getFileSystem();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test(timeout = 60000)
+  public void testRename() throws Exception {
+    final Path dir1 = new Path("/dir1");
+    final Path dir2 = new Path("/dir2");
+    final Path sub0 = new Path(snapshottableDir, "sub0");
+    final Path sub1 = new Path(snapshottableDir, "sub1");
+    hdfs.mkdirs(sub0);
+    hdfs.mkdirs(dir2);
+    final Path file1 = new Path(dir1, "file1");
+    final Path file2 = new Path(sub0, "file2");
+    hdfs.mkdirs(snapshottableDir);
+    hdfs.mkdirs(dir1);
+    hdfs.mkdirs(dir2);
+    hdfs.mkdirs(sub0);
+    DFSTestUtil.createFile(hdfs, file1, 0, (short) 1, 0);
+    DFSTestUtil.createFile(hdfs, file2, 0, (short) 1, 0);
+    hdfs.allowSnapshot(snapshottableDir);
+    // rename from non snapshottable dir to snapshottable dir should fail
+    validateRename(file1, sub0);
+    hdfs.createSnapshot(snapshottableDir, "s0");
+    validateRename(file1, sub0);
+    // rename across non snapshottable dirs should work
+    hdfs.rename(file1, dir2);
+    // rename beyond snapshottable root should fail
+    validateRename(file2, dir1);
+    // rename within snapshottable root should work
+    hdfs.rename(file2, snapshottableDir);
+
+    // rename dirs outside snapshottable root should work
+    hdfs.rename(dir2, dir1);
+    // rename dir into snapshottable root should fail
+    validateRename(dir1, snapshottableDir);
+    // rename dir outside snapshottable root should fail
+    validateRename(sub0, dir2);
+    // rename dir within snapshottable root should work
+    hdfs.rename(sub0, sub1);
+  }
+
+  private void validateRename(Path src, Path dest) {
+    try {
+      hdfs.rename(src, dest);
+      Assert.fail("Expected exception not thrown.");
+    } catch (IOException ioe) {
+      Assert.assertTrue(ioe.getMessage().contains("are not under the" +
+          " same snapshot root."));
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org