You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ji...@apache.org on 2014/08/20 20:13:03 UTC

svn commit: r1619192 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java

Author: jing9
Date: Wed Aug 20 18:13:03 2014
New Revision: 1619192

URL: http://svn.apache.org/r1619192
Log:
HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. Contributed by Yi Liu.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1619192&r1=1619191&r2=1619192&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Aug 20 18:13:03 2014
@@ -523,6 +523,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6868. portmap and nfs3 are documented as hadoop commands instead of hdfs
     (brandonli)
 
+    HDFS-6870. Blocks and INodes could leak for Rename with overwrite flag. (Yi
+    Liu via jing9)
+
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1619192&r1=1619191&r2=1619192&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Wed Aug 20 18:13:03 2014
@@ -644,15 +644,20 @@ public class FSDirectory implements Clos
         tx.updateMtimeAndLease(timestamp);
 
         // Collect the blocks and remove the lease for previous dst
-        long filesDeleted = -1;
+        boolean filesDeleted = false;
         if (removedDst != null) {
           undoRemoveDst = false;
           if (removedNum > 0) {
             BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
             List<INode> removedINodes = new ChunkedArrayList<INode>();
-            filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
-                dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
-                true).get(Quota.NAMESPACE);
+            if (!removedDst.isInLatestSnapshot(dstIIP.getLatestSnapshotId())) {
+              removedDst.destroyAndCollectBlocks(collectedBlocks, removedINodes);
+              filesDeleted = true;
+            } else {
+              filesDeleted = removedDst.cleanSubtree(Snapshot.CURRENT_STATE_ID,
+                  dstIIP.getLatestSnapshotId(), collectedBlocks, removedINodes,
+                  true).get(Quota.NAMESPACE) >= 0;
+            }
             getFSNamesystem().removePathAndBlocks(src, collectedBlocks,
                 removedINodes, false);
           }
@@ -665,7 +670,7 @@ public class FSDirectory implements Clos
         }
 
         tx.updateQuotasInSourceTree();
-        return filesDeleted >= 0;
+        return filesDeleted;
       }
     } finally {
       if (undoRemoveSrc) {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java?rev=1619192&r1=1619191&r2=1619192&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRename.java Wed Aug 20 18:13:03 2014
@@ -27,6 +27,9 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.junit.Test;
 
@@ -125,4 +128,45 @@ public class TestDFSRename {
       if (cluster != null) {cluster.shutdown();}
     }
   }
+  
+  /**
+   * Check the blocks of dst file are cleaned after rename with overwrite
+   */
+  @Test(timeout = 120000)
+  public void testRenameWithOverwrite() throws Exception {
+    final short replFactor = 2;
+    final long blockSize = 512;
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).
+        numDataNodes(replFactor).build();
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    try {
+      
+      long fileLen = blockSize*3;
+      String src = "/foo/src";
+      String dst = "/foo/dst";
+      Path srcPath = new Path(src);
+      Path dstPath = new Path(dst);
+      
+      DFSTestUtil.createFile(dfs, srcPath, fileLen, replFactor, 1);
+      DFSTestUtil.createFile(dfs, dstPath, fileLen, replFactor, 1);
+      
+      LocatedBlocks lbs = NameNodeAdapter.getBlockLocations(
+          cluster.getNameNode(), dst, 0, fileLen);
+      BlockManager bm = NameNodeAdapter.getNamesystem(cluster.getNameNode()).
+          getBlockManager();
+      assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+          getLocalBlock()) != null);
+      dfs.rename(srcPath, dstPath, Rename.OVERWRITE);
+      assertTrue(bm.getStoredBlock(lbs.getLocatedBlocks().get(0).getBlock().
+          getLocalBlock()) == null);
+    } finally {
+      if (dfs != null) {
+        dfs.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }