You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2017/12/02 02:38:21 UTC
[08/50] [abbrv] hadoop git commit: HDFS-12638. Delete
copy-on-truncate block along with the original block,
when deleting a file being truncated. Contributed by Konstantin Shvachko.
HDFS-12638. Delete copy-on-truncate block along with the original block, when deleting a file being truncated. Contributed by Konstantin Shvachko.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60fd0d7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60fd0d7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60fd0d7f
Branch: refs/heads/HDFS-9806
Commit: 60fd0d7fd73198fd610e59d1a4cd007c5fcc7205
Parents: a63d19d
Author: Konstantin V Shvachko <sh...@apache.org>
Authored: Thu Nov 30 18:18:09 2017 -0800
Committer: Konstantin V Shvachko <sh...@apache.org>
Committed: Thu Nov 30 18:18:28 2017 -0800
----------------------------------------------------------------------
.../hadoop/hdfs/server/namenode/INode.java | 14 +++++++
.../hdfs/server/namenode/TestFileTruncate.java | 41 ++++++++++++++++++++
2 files changed, 55 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60fd0d7f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 34bfe10..1682a30 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -33,9 +33,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
@@ -1058,6 +1060,18 @@ public abstract class INode implements INodeAttributes, Diff.Element<byte[]> {
assert toDelete != null : "toDelete is null";
toDelete.delete();
toDeleteList.add(toDelete);
+ // If the file is being truncated
+ // the copy-on-truncate block should also be collected for deletion
+ BlockUnderConstructionFeature uc = toDelete.getUnderConstructionFeature();
+ if(uc == null) {
+ return;
+ }
+ Block truncateBlock = uc.getTruncateBlock();
+ if(truncateBlock == null || truncateBlock.equals(toDelete)) {
+ return;
+ }
+ assert truncateBlock instanceof BlockInfo : "should be BlockInfo";
+ addDeleteBlock((BlockInfo) truncateBlock);
}
public void addUpdateReplicationFactor(BlockInfo block, short targetRepl) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/60fd0d7f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index d4215e8..51a94e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -60,6 +60,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
@@ -1155,6 +1156,46 @@ public class TestFileTruncate {
fs.delete(parent, true);
}
+ /**
+ * While rolling upgrade is in-progress the test truncates a file
+ * such that copy-on-truncate is triggered, then deletes the file,
+ * and makes sure that no blocks involved in truncate are hanging around.
+ */
+ @Test
+ public void testTruncateWithRollingUpgrade() throws Exception {
+ final DFSAdmin dfsadmin = new DFSAdmin(cluster.getConfiguration(0));
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ //start rolling upgrade
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ int status = dfsadmin.run(new String[]{"-rollingUpgrade", "prepare"});
+ assertEquals("could not prepare for rolling upgrade", 0, status);
+ dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+ Path dir = new Path("/testTruncateWithRollingUpgrade");
+ fs.mkdirs(dir);
+ final Path p = new Path(dir, "file");
+ final byte[] data = new byte[3];
+ ThreadLocalRandom.current().nextBytes(data);
+ writeContents(data, data.length, p);
+
+ assertEquals("block num should 1", 1,
+ cluster.getNamesystem().getFSDirectory().getBlockManager()
+ .getTotalBlocks());
+
+ final boolean isReady = fs.truncate(p, 2);
+ assertFalse("should be copy-on-truncate", isReady);
+ assertEquals("block num should 2", 2,
+ cluster.getNamesystem().getFSDirectory().getBlockManager()
+ .getTotalBlocks());
+ fs.delete(p, true);
+
+ assertEquals("block num should 0", 0,
+ cluster.getNamesystem().getFSDirectory().getBlockManager()
+ .getTotalBlocks());
+ status = dfsadmin.run(new String[]{"-rollingUpgrade", "finalize"});
+ assertEquals("could not finalize rolling upgrade", 0, status);
+ }
+
static void writeContents(byte[] contents, int fileLength, Path p)
throws IOException {
FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org