You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ma...@apache.org on 2018/01/08 23:34:05 UTC

hadoop git commit: HDFS-12985. NameNode crashes during restart after an OpenForWrite file present in the Snapshot got deleted.

Repository: hadoop
Updated Branches:
  refs/heads/trunk 2ee0d64ac -> 73ff09b79


HDFS-12985. NameNode crashes during restart after an OpenForWrite file present in the Snapshot got deleted.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73ff09b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73ff09b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73ff09b7

Branch: refs/heads/trunk
Commit: 73ff09b79a5cf9932edc21c58f3a730f7379086b
Parents: 2ee0d64
Author: Manoj Govindassamy <ma...@apache.org>
Authored: Mon Jan 8 15:34:00 2018 -0800
Committer: Manoj Govindassamy <ma...@apache.org>
Committed: Mon Jan 8 15:34:00 2018 -0800

----------------------------------------------------------------------
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 16 ++++---
 .../snapshot/TestOpenFilesWithSnapshot.java     | 45 ++++++++++++++++++++
 2 files changed, 55 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ff09b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 906a940..90659f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -727,6 +727,13 @@ public class INodeFile extends INodeWithAdditionalFields
     this.blocks = BlockInfo.EMPTY_ARRAY;
   }
 
+  private void updateRemovedUnderConstructionFiles(
+      ReclaimContext reclaimContext) {
+    if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
+      reclaimContext.removedUCFiles.add(getId());
+    }
+  }
+
   @Override
   public void cleanSubtree(ReclaimContext reclaimContext,
       final int snapshot, int priorSnapshotId) {
@@ -735,6 +742,7 @@ public class INodeFile extends INodeWithAdditionalFields
       // TODO: avoid calling getStoragePolicyID
       sf.cleanFile(reclaimContext, this, snapshot, priorSnapshotId,
           getStoragePolicyID());
+      updateRemovedUnderConstructionFiles(reclaimContext);
     } else {
       if (snapshot == CURRENT_STATE_ID) {
         if (priorSnapshotId == NO_SNAPSHOT_ID) {
@@ -747,9 +755,7 @@ public class INodeFile extends INodeWithAdditionalFields
           // clean the 0-sized block if the file is UC
           if (uc != null) {
             uc.cleanZeroSizeBlock(this, reclaimContext.collectedBlocks);
-            if (reclaimContext.removedUCFiles != null) {
-              reclaimContext.removedUCFiles.add(getId());
-            }
+            updateRemovedUnderConstructionFiles(reclaimContext);
           }
         }
       }
@@ -768,9 +774,7 @@ public class INodeFile extends INodeWithAdditionalFields
           reclaimContext.collectedBlocks);
       sf.clearDiffs();
     }
-    if (isUnderConstruction() && reclaimContext.removedUCFiles != null) {
-      reclaimContext.removedUCFiles.add(getId());
-    }
+    updateRemovedUnderConstructionFiles(reclaimContext);
   }
 
   public void clearFile(ReclaimContext reclaimContext) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73ff09b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
index 17082a1..38cd5f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
@@ -630,6 +630,51 @@ public class TestOpenFilesWithSnapshot {
   }
 
   /**
+   * Verify if the NameNode can restart properly after an OpenForWrite
+   * file and the only snapshot it was present in were deleted.
+   *
+   * @throws Exception
+   */
+  @Test (timeout = 600000)
+  public void testOpenFileDeletionAndNNRestart() throws Exception {
+    // Construct the directory tree
+    final Path snapRootDir = new Path("/level_0_A/test");
+    final String hbaseFileName = "hbase.log";
+    final String snap1Name = "snap_1";
+
+    // Create a file with few blocks. Get its output stream
+    // for append.
+    final Path hbaseFile = new Path(snapRootDir, hbaseFileName);
+    createFile(hbaseFile);
+    FSDataOutputStream hbaseOutputStream = fs.append(hbaseFile);
+
+    int newWriteLength = (int) (BLOCKSIZE * 1.5);
+    byte[] buf = new byte[newWriteLength];
+    Random random = new Random();
+    random.nextBytes(buf);
+
+    // Write more data to the file
+    writeToStream(hbaseOutputStream, buf);
+
+    // Take a snapshot while the file is open for write
+    final Path snap1Dir = SnapshotTestHelper.createSnapshot(
+        fs, snapRootDir, snap1Name);
+    LOG.info("Open file status in snap: " +
+        fs.getFileStatus(new Path(snap1Dir, hbaseFileName)));
+
+    // Delete the open file and the snapshot while
+    // its output stream is still open.
+    fs.delete(hbaseFile, true);
+    fs.deleteSnapshot(snapRootDir, snap1Name);
+    Assert.assertFalse(fs.exists(hbaseFile));
+
+    // Verify file existence after the NameNode restart
+    cluster.restartNameNode();
+    cluster.waitActive();
+    Assert.assertFalse(fs.exists(hbaseFile));
+  }
+
+  /**
    * Test client writing to open files are not interrupted when snapshots
    * that captured open files get deleted.
    */


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org