You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2013/07/12 02:27:50 UTC

svn commit: r1502403 - in /hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/ src/test/java/org/apache/hadoop/hdfs/ser...

Author: jing9
Date: Fri Jul 12 00:27:50 2013
New Revision: 1502403

URL: http://svn.apache.org/r1502403
Log:
HDFS-4978. Merge r1502402 from branch-2.

Modified:
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
    hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1502403&r1=1502402&r2=1502403&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jul 12 00:27:50 2013
@@ -958,6 +958,8 @@ Release 2.1.0-beta - 2013-07-02
     HDFS-4797. BlockScanInfo does not override equals(..) and hashCode()
     consistently.  (szetszwo)
 
+    HDFS-4978. Make disallowSnapshot idempotent. (jing9)
+
 Release 2.0.5-alpha - 06/06/2013
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java?rev=1502403&r1=1502402&r2=1502403&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java Fri Jul 12 00:27:50 2013
@@ -1041,6 +1041,7 @@ public interface ClientProtocol {
    * @param snapshotRoot the directory to be snapped
    * @throws IOException on error
    */
+  @Idempotent
   public void allowSnapshot(String snapshotRoot)
       throws IOException;
     
@@ -1049,6 +1050,7 @@ public interface ClientProtocol {
    * @param snapshotRoot the directory to disallow snapshot
    * @throws IOException on error
    */
+  @Idempotent
   public void disallowSnapshot(String snapshotRoot)
       throws IOException;
   
@@ -1067,6 +1069,7 @@ public interface ClientProtocol {
    * @return The difference report represented as a {@link SnapshotDiffReport}.
    * @throws IOException on error
    */
+  @Idempotent
   public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
       String fromSnapshot, String toSnapshot) throws IOException;
 }

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java?rev=1502403&r1=1502402&r2=1502403&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java Fri Jul 12 00:27:50 2013
@@ -38,8 +38,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SnapshotDiffInfo;
 
-import com.google.common.base.Preconditions;
-
 /**
  * Manage snapshottable directories and their snapshots.
  * 
@@ -128,8 +126,7 @@ public class SnapshotManager implements 
 
   /** Remove the given snapshottable directory from {@link #snapshottables}. */
   private void removeSnapshottable(INodeDirectorySnapshottable s) {
-    final INodeDirectorySnapshottable removed = snapshottables.remove(s.getId());
-    Preconditions.checkState(s == removed);
+    snapshottables.remove(s.getId());
   }
   
   /** Remove snapshottable directories from {@link #snapshottables} */
@@ -148,17 +145,18 @@ public class SnapshotManager implements 
    */
   public void resetSnapshottable(final String path) throws IOException {
     final INodesInPath iip = fsdir.getINodesInPath4Write(path);
-    final INodeDirectorySnapshottable s = INodeDirectorySnapshottable.valueOf(
-        iip.getLastINode(), path);
+    final INodeDirectory d = INodeDirectory.valueOf(iip.getLastINode(), path);
+    if (!d.isSnapshottable()) {
+      // the directory is already non-snapshottable
+      return;
+    }
+    final INodeDirectorySnapshottable s = (INodeDirectorySnapshottable) d;
     if (s.getNumSnapshots() > 0) {
       throw new SnapshotException("The directory " + path + " has snapshot(s). "
           + "Please redo the operation after removing all the snapshots.");
     }
 
     if (s == fsdir.getRoot()) {
-      if (s.getSnapshotQuota() == 0) {
-        throw new SnapshotException("Root is not a snapshottable directory");
-      }
       s.setSnapshotQuota(0); 
     } else {
       s.replaceSelf(iip.getLatestSnapshot(), fsdir.getINodeMap());

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java?rev=1502403&r1=1502402&r2=1502403&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java Fri Jul 12 00:27:50 2013
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.Random;
@@ -41,7 +40,6 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -132,13 +130,6 @@ public class TestNestedSnapshots {
     print("delete snapshot " + rootSnapshot);
     hdfs.disallowSnapshot(rootPath);
     print("disallow snapshot " + rootStr);
-    try {
-      hdfs.disallowSnapshot(rootPath);
-      fail("Expect snapshot exception when disallowing snapshot on root again");
-    } catch (SnapshotException e) {
-      GenericTestUtils.assertExceptionContains(
-          "Root is not a snapshottable directory", e);
-    }
     
     //change foo to non-snapshottable
     hdfs.deleteSnapshot(foo, s1name);

Modified: hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java?rev=1502403&r1=1502402&r2=1502403&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java (original)
+++ hadoop/common/branches/branch-2.1-beta/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java Fri Jul 12 00:27:50 2013
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode.snapshot;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
 import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer;
@@ -374,6 +376,71 @@ public class TestSnapshot {
           "Directory is not a snapshottable directory: " + dir, e);
     }
   }
+  
+  /**
+   * Test multiple calls of allowSnapshot and disallowSnapshot, to make sure 
+   * they are idempotent
+   */
+  @Test
+  public void testAllowAndDisallowSnapshot() throws Exception {
+    final Path dir = new Path("/dir");
+    final Path file0 = new Path(dir, "file0");
+    final Path file1 = new Path(dir, "file1");
+    DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
+    DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
+    
+    INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertFalse(dirNode.isSnapshottable());
+    
+    hdfs.allowSnapshot(dir);
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    // call allowSnapshot again
+    hdfs.allowSnapshot(dir);
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertTrue(dirNode.isSnapshottable());
+    
+    // disallowSnapshot on dir
+    hdfs.disallowSnapshot(dir);
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertFalse(dirNode.isSnapshottable());
+    // do it again
+    hdfs.disallowSnapshot(dir);
+    dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
+    assertFalse(dirNode.isSnapshottable());
+    
+    // same process on root
+    
+    final Path root = new Path("/");
+    INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
+        .asDirectory();
+    assertTrue(rootNode.isSnapshottable());
+    // root is snapshottable dir, but with 0 snapshot quota
+    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    
+    hdfs.allowSnapshot(root);
+    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
+    assertTrue(rootNode.isSnapshottable());
+    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
+        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    // call allowSnapshot again
+    hdfs.allowSnapshot(root);
+    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
+    assertTrue(rootNode.isSnapshottable());
+    assertEquals(INodeDirectorySnapshottable.SNAPSHOT_LIMIT,
+        ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    
+    // disallowSnapshot on dir
+    hdfs.disallowSnapshot(root);
+    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
+    assertTrue(rootNode.isSnapshottable());
+    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+    // do it again
+    hdfs.disallowSnapshot(root);
+    rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
+    assertTrue(rootNode.isSnapshottable());
+    assertEquals(0, ((INodeDirectorySnapshottable) rootNode).getSnapshotQuota());
+  }
 
   /**
    * Prepare a list of modifications. A modification may be a file creation,