You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/06/21 04:11:03 UTC
[hadoop] branch branch-2 updated: HDFS-14303. check block directory
logic not correct when there is only meta file,
print no meaning warn log. Contributed by qiang Liu.
This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch branch-2
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-2 by this push:
new bf63475 HDFS-14303. check block directory logic not correct when there is only meta file, print no meaning warn log. Contributed by qiang Liu.
bf63475 is described below
commit bf63475e7a72ff7346226a7dc8dcb4d6fc534163
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Thu Jun 20 21:06:06 2019 -0700
HDFS-14303. check block directory logic not correct when there is only meta file, print no meaning warn log. Contributed by qiang Liu.
---
.../hdfs/server/datanode/DirectoryScanner.java | 2 +-
.../hdfs/server/datanode/TestDirectoryScanner.java | 69 ++++++++++++++++++++++
2 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 72888ee..e8e8d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -881,7 +881,7 @@ public class DirectoryScanner implements Runnable {
if (!Block.isBlockFilename(file)) {
if (isBlockMetaFile(Block.BLOCK_FILE_PREFIX, file.getName())) {
long blockId = Block.getBlockId(file.getName());
- verifyFileLocation(file.getParentFile(), bpFinalizedDir,
+ verifyFileLocation(file, bpFinalizedDir,
blockId);
report.add(new ScanInfo(blockId, null, file, vol));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
index fdcee6c..655f75e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
@@ -26,6 +26,7 @@ import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
+import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
@@ -69,6 +70,9 @@ import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.AutoCloseableLock;
import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
+import org.apache.log4j.SimpleLayout;
+import org.apache.log4j.WriterAppender;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
@@ -377,6 +381,71 @@ public class TestDirectoryScanner {
}
}
+ /**
+ * test scan only meta file NOT generate wrong folder structure warn log.
+ */
+ @Test(timeout=600000)
+ public void testScanDirectoryStructureWarn() throws Exception {
+
+ //add a logger stream to check what has printed to log
+ ByteArrayOutputStream loggerStream = new ByteArrayOutputStream();
+ org.apache.log4j.Logger rootLogger =
+ org.apache.log4j.Logger.getRootLogger();
+ rootLogger.setLevel(Level.INFO);
+ WriterAppender writerAppender =
+ new WriterAppender(new SimpleLayout(), loggerStream);
+ rootLogger.addAppender(writerAppender);
+
+ cluster = new MiniDFSCluster
+ .Builder(CONF)
+ .storageTypes(new StorageType[] {
+ StorageType.RAM_DISK, StorageType.DEFAULT })
+ .numDataNodes(1)
+ .build();
+ try {
+ cluster.waitActive();
+ bpid = cluster.getNamesystem().getBlockPoolId();
+ fds = DataNodeTestUtils.getFSDataset(cluster.getDataNodes().get(0));
+ client = cluster.getFileSystem().getClient();
+ DataNode dataNode = cluster.getDataNodes().get(0);
+ scanner = new DirectoryScanner(dataNode, fds, CONF);
+ scanner.setRetainDiffs(true);
+ FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
+
+ // Create a file file on RAM_DISK
+ createFile(GenericTestUtils.getMethodName(), BLOCK_LENGTH, true);
+
+ // Ensure no difference between volumeMap and disk.
+ scan(1, 0, 0, 0, 0, 0);
+
+ //delete thre block file , left the meta file alone
+ deleteBlockFile();
+
+ //scan to ensure log warn not printed
+ scan(1, 1, 0, 1, 0, 0, 0);
+
+ //ensure the warn log not appear and missing block log do appear
+ String logContent = new String(loggerStream.toByteArray());
+ String missingBlockWarn = "Deleted a metadata file" +
+ " for the deleted block";
+ String dirStructureWarnLog = " found in invalid directory." +
+ " Expected directory: ";
+ assertFalse("directory check print meaningless warning message",
+ logContent.contains(dirStructureWarnLog));
+ assertTrue("missing block warn log not appear",
+ logContent.contains(missingBlockWarn));
+ LOG.info("check pass");
+
+ } finally {
+ if (scanner != null) {
+ scanner.shutdown();
+ scanner = null;
+ }
+ cluster.shutdown();
+ cluster = null;
+ }
+ }
+
@Test (timeout=300000)
public void testDeleteBlockOnTransientStorage() throws Exception {
cluster = new MiniDFSCluster
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org