You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by we...@apache.org on 2019/08/21 16:01:25 UTC

[hadoop] branch branch-3.1 updated (3547261 -> 2ce0914)

This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a change to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git.


    from 3547261  YARN-9765. SLS runner crashes when run with metrics turned off. Contributed by Abhishek Modi.
     new c2aeeb0  HDFS-14582. Failed to start DN with ArithmeticException when NULL checksum used. Contributed by Surendra Singh Lilhore.
     new 2ce0914  Revert "HDFS-14476. lock too long when fix inconsistent blocks between disk and in-memory. Contributed by Sean Chow."

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../hdfs/server/datanode/DirectoryScanner.java     | 14 -------
 .../datanode/fsdataset/impl/BlockPoolSlice.java    |  5 +++
 .../org/apache/hadoop/hdfs/TestDFSInputStream.java | 43 ++++++++++++++++++++++
 3 files changed, 48 insertions(+), 14 deletions(-)


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 02/02: Revert "HDFS-14476. lock too long when fix inconsistent blocks between disk and in-memory. Contributed by Sean Chow."

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 2ce0914176596378575c340e9c30db80953ed033
Author: Wei-Chiu Chuang <we...@apache.org>
AuthorDate: Wed Aug 21 09:00:59 2019 -0700

    Revert "HDFS-14476. lock too long when fix inconsistent blocks between disk and in-memory. Contributed by Sean Chow."
    
    This reverts commit e978c6c9ed83301fcf8359c35e74ed68045c1a61.
---
 .../hadoop/hdfs/server/datanode/DirectoryScanner.java      | 14 --------------
 1 file changed, 14 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index 5a2f7c2..ab9743c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -67,7 +67,6 @@ public class DirectoryScanner implements Runnable {
       + " starting at %s with interval of %dms";
   private static final String START_MESSAGE_WITH_THROTTLE = START_MESSAGE
       + " and throttle limit of %dms/s";
-  private static final int RECONCILE_BLOCKS_BATCH_SIZE = 1000;
 
   private final FsDatasetSpi<?> dataset;
   private final ExecutorService reportCompileThreadPool;
@@ -373,11 +372,7 @@ public class DirectoryScanner implements Runnable {
    */
   @VisibleForTesting
   public void reconcile() throws IOException {
-    LOG.debug("reconcile start DirectoryScanning");
     scan();
-    // HDFS-14476: run checkAndUpadte with batch to avoid holding the lock too
-    // long
-    int loopCount = 0;
     for (Entry<String, LinkedList<ScanInfo>> entry : diffs.entrySet()) {
       String bpid = entry.getKey();
       LinkedList<ScanInfo> diff = entry.getValue();
@@ -385,15 +380,6 @@ public class DirectoryScanner implements Runnable {
       for (ScanInfo info : diff) {
         dataset.checkAndUpdate(bpid, info);
       }
-
-      if (loopCount % RECONCILE_BLOCKS_BATCH_SIZE == 0) {
-        try {
-          Thread.sleep(2000);
-        } catch (InterruptedException e) {
-          // do nothing
-        }
-      }
-      loopCount++;
     }
     if (!retainDiffs) clear();
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org


[hadoop] 01/02: HDFS-14582. Failed to start DN with ArithmeticException when NULL checksum used. Contributed by Surendra Singh Lilhore.

Posted by we...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit c2aeeb01fa6ba2c6518b0647ba0274e70e6d6ca8
Author: Surendra Singh Lilhore <su...@apache.org>
AuthorDate: Tue Aug 20 15:53:53 2019 -0700

    HDFS-14582. Failed to start DN with ArithmeticException when NULL checksum used. Contributed by Surendra Singh Lilhore.
    
    Signed-off-by: Wei-Chiu Chuang <we...@apache.org>
    (cherry picked from commit f95988113da3f06f6d975f99f1ee51d88a793537)
    (cherry picked from commit 03c62c7989f818c49d5afca0ac94c7e72a091066)
---
 .../datanode/fsdataset/impl/BlockPoolSlice.java    |  5 +++
 .../org/apache/hadoop/hdfs/TestDFSInputStream.java | 43 ++++++++++++++++++++++
 2 files changed, 48 insertions(+)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
index e725834..2d17ae2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DataChecksum.Type;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.ShutdownHookManager;
@@ -796,6 +797,10 @@ class BlockPoolSlice {
         // read and handle the common header here. For now just a version
         final DataChecksum checksum = BlockMetadataHeader.readDataChecksum(
             checksumIn, metaFile);
+        if (Type.NULL.equals(checksum.getChecksumType())) {
+          // in case of NULL checksum type consider full file as valid
+          return blockFileLen;
+        }
         int bytesPerChecksum = checksum.getBytesPerChecksum();
         int checksumSize = checksum.getChecksumSize();
         long numChunks = Math.min(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
index eb4f124..0d322da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSInputStream.java
@@ -20,10 +20,13 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 import java.io.File;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
@@ -31,6 +34,7 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.net.unix.DomainSocket;
 import org.apache.hadoop.net.unix.TemporarySocketDirectory;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
@@ -176,4 +180,43 @@ public class TestDFSInputStream {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testNullCheckSumWhenDNRestarted()
+      throws IOException, InterruptedException {
+    Configuration conf = new Configuration();
+    conf.set(HdfsClientConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
+        .build();
+    cluster.waitActive();
+    try {
+      DistributedFileSystem fs = cluster.getFileSystem();
+
+      int chunkSize = 512;
+      Random r = new Random(12345L);
+      byte[] data = new byte[chunkSize];
+      r.nextBytes(data);
+
+      Path file = new Path("/testfile");
+      try (FSDataOutputStream fout = fs.create(file)) {
+        fout.write(data);
+        fout.hflush();
+        cluster.restartDataNode(0, true, true);
+      }
+
+      // wait for block to load
+      Thread.sleep(1000);
+
+      // fetch live DN
+      final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+      cluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().fetchDatanodes(live, null, false);
+      assertTrue("DN start should be success and live dn should be 2",
+          live.size() == 2);
+      assertTrue("File size should be " + chunkSize,
+          fs.getFileStatus(file).getLen() == chunkSize);
+    } finally {
+      cluster.shutdown();
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org