You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by zj...@apache.org on 2015/03/18 04:33:58 UTC
[06/50] hadoop git commit: HDFS-7903. Cannot recover block after
truncate and delete snapshot. Contributed by Plamen Jeliazkov.
HDFS-7903. Cannot recover block after truncate and delete snapshot. Contributed by Plamen Jeliazkov.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6acb7f21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6acb7f21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6acb7f21
Branch: refs/heads/YARN-2928
Commit: 6acb7f2110897264241df44d564db2f85260348f
Parents: d324164
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Fri Mar 13 12:39:01 2015 -0700
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Fri Mar 13 13:12:51 2015 -0700
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../server/namenode/snapshot/FileDiffList.java | 19 +++++++++++--
.../hdfs/server/namenode/TestFileTruncate.java | 30 ++++++++++++++++++++
3 files changed, 49 insertions(+), 3 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac7e096..a149f18 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1148,6 +1148,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7926. NameNode implementation of ClientProtocol.truncate(..) is not
idempotent (Tsz Wo Nicholas Sze via brandonli)
+ HDFS-7903. Cannot recover block after truncate and delete snapshot.
+ (Plamen Jeliazkov via shv)
+
BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index 0c94554..5c9e121 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.util.Collections;
import java.util.List;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@@ -125,9 +128,19 @@ public class FileDiffList extends
continue;
break;
}
- // Collect the remaining blocks of the file
- while(i < removedBlocks.length) {
- collectedBlocks.addDeleteBlock(removedBlocks[i++]);
+ // Check if last block is part of truncate recovery
+ BlockInfoContiguous lastBlock = file.getLastBlock();
+ Block dontRemoveBlock = null;
+ if(lastBlock != null && lastBlock.getBlockUCState().equals(
+ HdfsServerConstants.BlockUCState.UNDER_RECOVERY)) {
+ dontRemoveBlock = ((BlockInfoContiguousUnderConstruction) lastBlock)
+ .getTruncateBlock();
+ }
+ // Collect the remaining blocks of the file, ignoring truncate block
+ for(;i < removedBlocks.length; i++) {
+ if(dontRemoveBlock == null || !removedBlocks[i].equals(dontRemoveBlock)) {
+ collectedBlocks.addDeleteBlock(removedBlocks[i]);
+ }
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6acb7f21/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index 260d8bb..3b6e107 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -178,6 +178,36 @@ public class TestFileTruncate {
fs.delete(dir, true);
}
+ /** Truncate the same file multiple times until its size is zero. */
+ @Test
+ public void testSnapshotTruncateThenDeleteSnapshot() throws IOException {
+ Path dir = new Path("/testSnapshotTruncateThenDeleteSnapshot");
+ fs.mkdirs(dir);
+ fs.allowSnapshot(dir);
+ final Path p = new Path(dir, "file");
+ final byte[] data = new byte[BLOCK_SIZE];
+ DFSUtil.getRandom().nextBytes(data);
+ writeContents(data, data.length, p);
+ final String snapshot = "s0";
+ fs.createSnapshot(dir, snapshot);
+ Block lastBlock = getLocatedBlocks(p).getLastLocatedBlock()
+ .getBlock().getLocalBlock();
+ final int newLength = data.length - 1;
+ assert newLength % BLOCK_SIZE != 0 :
+ " newLength must not be multiple of BLOCK_SIZE";
+ final boolean isReady = fs.truncate(p, newLength);
+ LOG.info("newLength=" + newLength + ", isReady=" + isReady);
+ assertEquals("File must be closed for truncating at the block boundary",
+ isReady, newLength % BLOCK_SIZE == 0);
+ fs.deleteSnapshot(dir, snapshot);
+ if (!isReady) {
+ checkBlockRecovery(p);
+ }
+ checkFullFile(p, newLength, data);
+ assertBlockNotPresent(lastBlock);
+ fs.delete(dir, true);
+ }
+
/**
* Truncate files and then run other operations such as
* rename, set replication, set permission, etc.