You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xk...@apache.org on 2019/08/07 20:47:52 UTC

[hadoop] branch branch-2.9 updated: HDFS-14631. The DirectoryScanner doesn't fix the wrongly placed replica. Contributed by Jinglun.

This is an automated email from the ASF dual-hosted git repository.

xkrogen pushed a commit to branch branch-2.9
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-2.9 by this push:
     new f2bb57a  HDFS-14631. The DirectoryScanner doesn't fix the wrongly placed replica. Contributed by Jinglun.
f2bb57a is described below

commit f2bb57adf4ca061bc080478ed24fa9a4da8b1230
Author: Erik Krogen <ek...@linkedin.com>
AuthorDate: Wed Aug 7 13:46:56 2019 -0700

    HDFS-14631. The DirectoryScanner doesn't fix the wrongly placed replica. Contributed by Jinglun.
    
    (cherry picked from 97600cd9160eef9391ae23c79509e6e8853cab32)
---
 .../hadoop/hdfs/server/datanode/ReplicaInfo.java   | 15 +++--
 .../hdfs/server/datanode/TestDirectoryScanner.java | 68 ++++++++++++++++++++++
 2 files changed, 79 insertions(+), 4 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
index f3f6db1..9a42242 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
@@ -174,7 +174,7 @@ abstract public class ReplicaInfo extends Block
       return;
     }
 
-    ReplicaDirInfo dirInfo = parseBaseDir(dir);
+    ReplicaDirInfo dirInfo = parseBaseDir(dir, getBlockId());
     this.hasSubdirs = dirInfo.hasSubidrs;
     
     synchronized (internedBaseDirs) {
@@ -200,7 +200,7 @@ abstract public class ReplicaInfo extends Block
   }
   
   @VisibleForTesting
-  public static ReplicaDirInfo parseBaseDir(File dir) {
+  public static ReplicaDirInfo parseBaseDir(File dir, long blockId) {
     
     File currentDir = dir;
     boolean hasSubdirs = false;
@@ -208,8 +208,15 @@ abstract public class ReplicaInfo extends Block
       hasSubdirs = true;
       currentDir = currentDir.getParentFile();
     }
-    
-    return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs);
+
+    if (hasSubdirs) {
+      // set baseDir to currentDir if it matches id(idToBlockDir).
+      File idToBlockDir = DatanodeUtil.idToBlockDir(currentDir, blockId);
+      if (idToBlockDir.equals(dir)) {
+        return new ReplicaDirInfo(currentDir.getAbsolutePath(), true);
+      }
+    }
+    return new ReplicaDirInfo(dir.getAbsolutePath(), false);
   }
 
   /**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index 655f75e..d4ff643 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import static org.apache.hadoop.hdfs.protocol.Block.BLOCK_FILE_PREFIX;
 
 import java.io.ByteArrayOutputStream;
 import java.io.File;
@@ -66,6 +67,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.LazyPersistTestCase;
+import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo.ReplicaDirInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.AutoCloseableLock;
@@ -1124,6 +1126,72 @@ public class TestDirectoryScanner {
     }
   }
 
+  private static final String SEP = System.getProperty("file.separator");
+
+  /**
+   * Test parsing LocalReplica. We should be able to find the replica's path
+   * even if the replica's dir doesn't match the idToBlockDir.
+   */
+  @Test(timeout = 3000)
+  public void testLocalReplicaParsing() {
+    String baseDir = GenericTestUtils.getRandomizedTempPath();
+    long blkId = getRandomBlockId();
+    File blockDir = DatanodeUtil.idToBlockDir(new File(baseDir), blkId);
+    String subdir1 = new File(blockDir.getParent()).getName();
+
+    // test parsing dir without ./subdir/subdir
+    ReplicaDirInfo info = ReplicaInfo.parseBaseDir(new File(baseDir), blkId);
+    assertEquals(baseDir, info.baseDirPath);
+    assertEquals(false, info.hasSubidrs);
+
+    // test when path doesn't match the idToBLockDir.
+    String pathWithOneSubdir = baseDir + SEP + subdir1;
+    info = ReplicaInfo.parseBaseDir(new File(pathWithOneSubdir), blkId);
+    assertEquals(pathWithOneSubdir, info.baseDirPath);
+    assertEquals(false, info.hasSubidrs);
+
+    // test when path doesn't match the idToBlockDir.
+    String badPath = baseDir + SEP + subdir1 + SEP + "subdir-not-exist";
+    info = ReplicaInfo.parseBaseDir(new File(badPath), blkId);
+    assertEquals(badPath, info.baseDirPath);
+    assertEquals(false, info.hasSubidrs);
+
+    // test when path matches the idToBlockDir.
+    info = ReplicaInfo.parseBaseDir(blockDir, blkId);
+    assertEquals(baseDir, info.baseDirPath);
+    assertEquals(true, info.hasSubidrs);
+  }
+
+  /**
+   * Test whether can LocalReplica.updateWithReplica() correct the wrongly
+   * recorded replica location.
+   */
+  @Test(timeout = 3000)
+  public void testLocalReplicaUpdateWithReplica() throws Exception {
+    String baseDir = GenericTestUtils.getRandomizedTempPath();
+    long blkId = getRandomBlockId();
+    File blockDir = DatanodeUtil.idToBlockDir(new File(baseDir), blkId);
+    String subdir2 = blockDir.getName();
+    String subdir1 = new File(blockDir.getParent()).getName();
+    String diskSub = subdir2.equals("subdir0") ? "subdir1" : "subdir0";
+
+    // the block file on disk.
+    File diskBlockDir = new File(baseDir + SEP + subdir1 + SEP + diskSub);
+    File realBlkFile = new File(diskBlockDir, BLOCK_FILE_PREFIX + blkId);
+    // the block file in mem.
+    File memBlockDir = blockDir;
+    ReplicaInfo localReplica =
+        new FinalizedReplica(blkId, 0, 0, null, memBlockDir);
+
+    // DirectoryScanner find the inconsistent file and try to make it right
+    localReplica.setDir(diskBlockDir);
+    assertEquals(realBlkFile, localReplica.getBlockFile());
+  }
+
+  public long getRandomBlockId() {
+    return Math.abs(new Random().nextLong());
+  }
+
   private void writeFile(FileSystem fs, int numFiles) throws IOException {
     final String fileName = "/" + GenericTestUtils.getMethodName();
     final Path filePath = new Path(fileName);


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org