You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vv...@apache.org on 2015/07/01 12:45:46 UTC

[15/50] hadoop git commit: HDFS-8681. BlockScanner is incorrectly disabled by default. (Contributed by Arpit Agarwal)

HDFS-8681. BlockScanner is incorrectly disabled by default. (Contributed by Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6793dd8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6793dd8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6793dd8

Branch: refs/heads/YARN-2139
Commit: c6793dd8cc69ea994eb23c3e1349efe4b9feca9a
Parents: 3dfa816
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sun Jun 28 14:51:17 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sun Jun 28 14:51:36 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 ++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +-
 .../hdfs/server/datanode/BlockScanner.java      | 36 ++++++++++++++++----
 .../src/main/resources/hdfs-default.xml         |  9 +++--
 .../fsdataset/impl/TestFsDatasetImpl.java       |  1 +
 5 files changed, 41 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e287ea4..94477fe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1113,6 +1113,9 @@ Release 2.7.1 - UNRELEASED
     HDFS08656. Preserve compatibility of ClientProtocol#rollingUpgrade after
     finalization. (wang)
 
+    HDFS-8681. BlockScanner is incorrectly disabled by default.
+    (Arpit Agarwal)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ebd668f..0e569f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -378,7 +378,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
   public static final int     DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
-  public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
+  public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 21 * 24;  // 3 weeks.
   public static final String  DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND = "dfs.block.scanner.volume.bytes.per.second";
   public static final long    DFS_BLOCK_SCANNER_VOLUME_BYTES_PER_SECOND_DEFAULT = 1048576L;
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
index b0248c5..9c4dd10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java
@@ -115,6 +115,34 @@ public class BlockScanner {
       }
     }
 
+    /**
+     * Determine the configured block scanner interval.
+     *
+     * For compatibility with prior releases of HDFS, if the
+     * configured value is zero then the scan period is
+     * set to 3 weeks.
+     *
+     * If the configured value is less than zero then the scanner
+     * is disabled.
+     *
+     * @param conf Configuration object.
+     * @return block scan period in milliseconds.
+     */
+    private static long getConfiguredScanPeriodMs(Configuration conf) {
+      long tempScanPeriodMs = getUnitTestLong(
+          conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
+              TimeUnit.MILLISECONDS.convert(conf.getLong(
+                  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
+                  DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS));
+
+      if (tempScanPeriodMs == 0) {
+        tempScanPeriodMs = TimeUnit.MILLISECONDS.convert(
+            DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT, TimeUnit.HOURS);
+      }
+
+      return tempScanPeriodMs;
+    }
+
     @SuppressWarnings("unchecked")
     Conf(Configuration conf) {
       this.targetBytesPerSec = Math.max(0L, conf.getLong(
@@ -123,11 +151,7 @@ public class BlockScanner {
       this.maxStalenessMs = Math.max(0L, getUnitTestLong(conf,
           INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS,
           INTERNAL_DFS_BLOCK_SCANNER_MAX_STALENESS_MS_DEFAULT));
-      this.scanPeriodMs = Math.max(0L,
-          getUnitTestLong(conf, INTERNAL_DFS_DATANODE_SCAN_PERIOD_MS,
-              TimeUnit.MILLISECONDS.convert(conf.getLong(
-                  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY,
-                  DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT), TimeUnit.HOURS)));
+      this.scanPeriodMs = getConfiguredScanPeriodMs(conf);
       this.cursorSaveMs = Math.max(0L, getUnitTestLong(conf,
           INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS,
           INTERNAL_DFS_BLOCK_SCANNER_CURSOR_SAVE_INTERVAL_MS_DEFAULT));
@@ -159,7 +183,7 @@ public class BlockScanner {
    * no threads will start.
    */
   public boolean isEnabled() {
-    return (conf.scanPeriodMs) > 0 && (conf.targetBytesPerSec > 0);
+    return (conf.scanPeriodMs > 0) && (conf.targetBytesPerSec > 0);
   }
 
  /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 76161a5..8cb7d5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -1071,11 +1071,14 @@
 
 <property>
   <name>dfs.datanode.scan.period.hours</name>
-  <value>0</value>
+  <value>504</value>
   <description>
-        If this is 0 or negative, the DataNode's block scanner will be
-        disabled.  If this is positive, the DataNode will not scan any
+        If this is positive, the DataNode will not scan any
         individual block more than once in the specified scan period.
+        If this is negative, the block scanner is disabled.
+        If this is set to zero, then the default value of 504 hours
+        or 3 weeks is used. Prior versions of HDFS incorrectly documented
+        that setting this key to zero will disable the block scanner.
   </description>
 </property>
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6793dd8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
index 59c7ade..d03fa2c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
@@ -274,6 +274,7 @@ public class TestFsDatasetImpl {
   public void testChangeVolumeWithRunningCheckDirs() throws IOException {
     RoundRobinVolumeChoosingPolicy<FsVolumeImpl> blockChooser =
         new RoundRobinVolumeChoosingPolicy<>();
+    conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
     final BlockScanner blockScanner = new BlockScanner(datanode, conf);
     final FsVolumeList volumeList = new FsVolumeList(
         Collections.<VolumeFailureInfo>emptyList(), blockScanner, blockChooser);