You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by so...@apache.org on 2020/09/30 08:49:16 UTC

[hadoop] branch branch-3.1 updated: HDFS-15415. Reduce locking in Datanode DirectoryScanner. Contributed by Stephen O'Donnell

This is an automated email from the ASF dual-hosted git repository.

sodonnell pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 4a8b59d  HDFS-15415. Reduce locking in Datanode DirectoryScanner. Contributed by Stephen O'Donnell
4a8b59d is described below

commit 4a8b59df595e0ddf55cb14904107ca4e71e52661
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Wed Sep 30 09:39:34 2020 +0100

    HDFS-15415. Reduce locking in Datanode DirectoryScanner. Contributed by Stephen O'Donnell
---
 .../hdfs/server/datanode/DirectoryScanner.java     | 143 ++++++++++-----------
 1 file changed, 70 insertions(+), 73 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
index dc941a5..4e0d247 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
@@ -42,7 +42,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.AutoCloseableLock;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -405,88 +404,86 @@ public class DirectoryScanner implements Runnable {
     clear();
     Map<String, ScanInfo[]> diskReport = getDiskReport();
 
-    // Hold FSDataset lock to prevent further changes to the block map
-    try(AutoCloseableLock lock = dataset.acquireDatasetLock()) {
-      for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
-        String bpid = entry.getKey();
-        ScanInfo[] blockpoolReport = entry.getValue();
+    for (Entry<String, ScanInfo[]> entry : diskReport.entrySet()) {
+      String bpid = entry.getKey();
+      ScanInfo[] blockpoolReport = entry.getValue();
         
-        Stats statsRecord = new Stats(bpid);
-        stats.put(bpid, statsRecord);
-        LinkedList<ScanInfo> diffRecord = new LinkedList<ScanInfo>();
-        synchronized(diffs) {
-          diffs.put(bpid, diffRecord);
-        }
+      Stats statsRecord = new Stats(bpid);
+      stats.put(bpid, statsRecord);
+      LinkedList<ScanInfo> diffRecord = new LinkedList<ScanInfo>();
+      synchronized(diffs) {
+        diffs.put(bpid, diffRecord);
+      }
         
-        statsRecord.totalBlocks = blockpoolReport.length;
-        final List<ReplicaInfo> bl = dataset.getSortedFinalizedBlocks(bpid);
-
-        int d = 0; // index for blockpoolReport
-        int m = 0; // index for memReprot
-        while (m < bl.size() && d < blockpoolReport.length) {
-          ReplicaInfo memBlock = bl.get(m);
-          ScanInfo info = blockpoolReport[d];
-          if (info.getBlockId() < memBlock.getBlockId()) {
-            if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
-              // Block is missing in memory
-              statsRecord.missingMemoryBlocks++;
-              addDifference(diffRecord, statsRecord, info);
-            }
-            d++;
-            continue;
-          }
-          if (info.getBlockId() > memBlock.getBlockId()) {
-            // Block is missing on the disk
-            addDifference(diffRecord, statsRecord,
-                          memBlock.getBlockId(), info.getVolume());
-            m++;
-            continue;
-          }
-          // Block file and/or metadata file exists on the disk
-          // Block exists in memory
-          if (info.getVolume().getStorageType() != StorageType.PROVIDED &&
-              info.getBlockFile() == null) {
-            // Block metadata file exits and block file is missing
-            addDifference(diffRecord, statsRecord, info);
-          } else if (info.getGenStamp() != memBlock.getGenerationStamp()
-              || info.getBlockLength() != memBlock.getNumBytes()) {
-            // Block metadata file is missing or has wrong generation stamp,
-            // or block file length is different than expected
-            statsRecord.mismatchBlocks++;
-            addDifference(diffRecord, statsRecord, info);
-          } else if (memBlock.compareWith(info) != 0) {
-            // volumeMap record and on-disk files don't match.
-            statsRecord.duplicateBlocks++;
+      statsRecord.totalBlocks = blockpoolReport.length;
+      final List<ReplicaInfo> bl = dataset.getSortedFinalizedBlocks(bpid);
+
+      int d = 0; // index for blockpoolReport
+      int m = 0; // index for memReprot
+      while (m < bl.size() && d < blockpoolReport.length) {
+        ReplicaInfo memBlock = bl.get(m);
+        ScanInfo info = blockpoolReport[d];
+        if (info.getBlockId() < memBlock.getBlockId()) {
+          if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
+            // Block is missing in memory
+            statsRecord.missingMemoryBlocks++;
             addDifference(diffRecord, statsRecord, info);
           }
           d++;
-
-          if (d < blockpoolReport.length) {
-            // There may be multiple on-disk records for the same block, don't increment
-            // the memory record pointer if so.
-            ScanInfo nextInfo = blockpoolReport[Math.min(d, blockpoolReport.length - 1)];
-            if (nextInfo.getBlockId() != info.getBlockId()) {
-              ++m;
-            }
-          } else {
-            ++m;
-          }
+          continue;
         }
-        while (m < bl.size()) {
-          ReplicaInfo current = bl.get(m++);
+        if (info.getBlockId() > memBlock.getBlockId()) {
+          // Block is missing on the disk
           addDifference(diffRecord, statsRecord,
-                        current.getBlockId(), current.getVolume());
+                        memBlock.getBlockId(), info.getVolume());
+          m++;
+          continue;
         }
-        while (d < blockpoolReport.length) {
-          if (!dataset.isDeletingBlock(bpid, blockpoolReport[d].getBlockId())) {
-            statsRecord.missingMemoryBlocks++;
-            addDifference(diffRecord, statsRecord, blockpoolReport[d]);
+        // Block file and/or metadata file exists on the disk
+        // Block exists in memory
+        if (info.getVolume().getStorageType() != StorageType.PROVIDED &&
+            info.getBlockFile() == null) {
+          // Block metadata file exits and block file is missing
+          addDifference(diffRecord, statsRecord, info);
+        } else if (info.getGenStamp() != memBlock.getGenerationStamp()
+            || info.getBlockLength() != memBlock.getNumBytes()) {
+          // Block metadata file is missing or has wrong generation stamp,
+          // or block file length is different than expected
+          statsRecord.mismatchBlocks++;
+          addDifference(diffRecord, statsRecord, info);
+        } else if (memBlock.compareWith(info) != 0) {
+          // volumeMap record and on-disk files don't match.
+          statsRecord.duplicateBlocks++;
+          addDifference(diffRecord, statsRecord, info);
+        }
+        d++;
+
+        if (d < blockpoolReport.length) {
+          // There may be multiple on-disk records for the same block,
+          // don't increment the memory record pointer if so.
+          ScanInfo nextInfo
+              = blockpoolReport[Math.min(d, blockpoolReport.length - 1)];
+          if (nextInfo.getBlockId() != info.getBlockId()) {
+            ++m;
           }
-          d++;
+        } else {
+          ++m;
+        }
+      }
+      while (m < bl.size()) {
+        ReplicaInfo current = bl.get(m++);
+        addDifference(diffRecord, statsRecord,
+                      current.getBlockId(), current.getVolume());
+      }
+      while (d < blockpoolReport.length) {
+        if (!dataset.isDeletingBlock(bpid, blockpoolReport[d].getBlockId())) {
+          statsRecord.missingMemoryBlocks++;
+          addDifference(diffRecord, statsRecord, blockpoolReport[d]);
         }
-        LOG.info(statsRecord.toString());
-      } //end for
-    } //end synchronized
+        d++;
+      }
+      LOG.info(statsRecord.toString());
+    } //end for
   }
 
   /**


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org