You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ki...@apache.org on 2015/02/03 21:46:01 UTC
hadoop git commit: HDFS-7707. Edit log corruption due to delayed
block removal again. Contributed by Yongjun Zhang
Repository: hadoop
Updated Branches:
refs/heads/trunk 21d80b3dd -> 843806d03
HDFS-7707. Edit log corruption due to delayed block removal again. Contributed by Yongjun Zhang
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/843806d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/843806d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/843806d0
Branch: refs/heads/trunk
Commit: 843806d03ab1a24f191782f42eb817505228eb9f
Parents: 21d80b3
Author: Kihwal Lee <ki...@apache.org>
Authored: Tue Feb 3 14:45:15 2015 -0600
Committer: Kihwal Lee <ki...@apache.org>
Committed: Tue Feb 3 14:45:15 2015 -0600
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++
.../hdfs/server/namenode/FSNamesystem.java | 14 +++++++--
.../TestCommitBlockSynchronization.java | 4 ++-
.../hdfs/server/namenode/TestDeleteRace.java | 32 +++++++++++++++-----
4 files changed, 43 insertions(+), 10 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/843806d0/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index d6283b9..7a0fdb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -830,6 +830,9 @@ Release 2.7.0 - UNRELEASED
HDFS-6651. Deletion failure can leak inodes permanently.
(Jing Zhao via wheat9)
+ HDFS-7707. Edit log corruption due to delayed block removal again.
+ (Yongjun Zhang via kihwal)
+
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/843806d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ebdec1b..e5ecb53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -227,6 +227,7 @@ import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.StandbyCheckpointer;
import org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotManager;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
@@ -6012,13 +6013,22 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
INode tmpChild = file;
INodeDirectory tmpParent = file.getParent();
while (true) {
- if (tmpParent == null ||
- tmpParent.searchChildren(tmpChild.getLocalNameBytes()) < 0) {
+ if (tmpParent == null) {
return true;
}
+
+ INode childINode = tmpParent.getChild(tmpChild.getLocalNameBytes(),
+ Snapshot.CURRENT_STATE_ID);
+ if (childINode == null || !childINode.equals(tmpChild)) {
+ // a newly created INode with the same name as an already deleted one
+ // would be a different INode than the deleted one
+ return true;
+ }
+
if (tmpParent.isRoot()) {
break;
}
+
tmpChild = tmpParent;
tmpParent = tmpParent.getParent();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/843806d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
index eae65cc..d88227a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
@@ -54,7 +54,9 @@ public class TestCommitBlockSynchronization {
// set file's parent as root and put the file to inodeMap, so
// FSNamesystem's isFileDeleted() method will return false on this file
if (file.getParent() == null) {
- INodeDirectory parent = mock(INodeDirectory.class);
+ INodeDirectory mparent = mock(INodeDirectory.class);
+ INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
+ mparent.getPermissionStatus(), mparent.getAccessTime());
parent.setLocalName(new byte[0]);
parent.addChild(file);
file.setParent(parent);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/843806d0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index 267821f..7d4eb31 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
+import java.util.AbstractMap;
+import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -226,10 +228,19 @@ public class TestDeleteRace {
private void testDeleteAndCommitBlockSynchronizationRace(boolean hasSnapshot)
throws Exception {
LOG.info("Start testing, hasSnapshot: " + hasSnapshot);
- final String testPaths[] = {
- "/test-file",
- "/testdir/testdir1/test-file"
- };
+ ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> testList =
+ new ArrayList<AbstractMap.SimpleImmutableEntry<String, Boolean>> ();
+ testList.add(
+ new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file", false));
+ testList.add(
+ new AbstractMap.SimpleImmutableEntry<String, Boolean>("/test-file1", true));
+ testList.add(
+ new AbstractMap.SimpleImmutableEntry<String, Boolean>(
+ "/testdir/testdir1/test-file", false));
+ testList.add(
+ new AbstractMap.SimpleImmutableEntry<String, Boolean>(
+ "/testdir/testdir1/test-file1", true));
+
final Path rootPath = new Path("/");
final Configuration conf = new Configuration();
// Disable permissions so that another user can recover the lease.
@@ -247,8 +258,11 @@ public class TestDeleteRace {
DistributedFileSystem fs = cluster.getFileSystem();
int stId = 0;
- for (String testPath : testPaths) {
- LOG.info("test on " + testPath + " snapshot: " + hasSnapshot);
+ for(AbstractMap.SimpleImmutableEntry<String, Boolean> stest : testList) {
+ String testPath = stest.getKey();
+ Boolean mkSameDir = stest.getValue();
+ LOG.info("test on " + testPath + " mkSameDir: " + mkSameDir
+ + " snapshot: " + hasSnapshot);
Path fPath = new Path(testPath);
//find grandest non-root parent
Path grandestNonRootParent = fPath;
@@ -304,7 +318,11 @@ public class TestDeleteRace {
LOG.info("Deleting recursively " + grandestNonRootParent);
fs.delete(grandestNonRootParent, true);
-
+ if (mkSameDir && !grandestNonRootParent.toString().equals(testPath)) {
+ LOG.info("Recreate dir " + grandestNonRootParent + " testpath: "
+ + testPath);
+ fs.mkdirs(grandestNonRootParent);
+ }
delayer.proceed();
LOG.info("Now wait for result");
delayer.waitForResult();