You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cm...@apache.org on 2014/10/24 22:17:24 UTC

git commit: HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin P. McCabe)

Repository: hadoop
Updated Branches:
  refs/heads/trunk 501e49f2e -> a52eb4bc5


HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a52eb4bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a52eb4bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a52eb4bc

Branch: refs/heads/trunk
Commit: a52eb4bc5fb21574859f779001ea9d95bf5207fe
Parents: 501e49f
Author: Colin Patrick Mccabe <cm...@cloudera.com>
Authored: Fri Oct 24 13:08:59 2014 -0700
Committer: Colin Patrick Mccabe <cm...@cloudera.com>
Committed: Fri Oct 24 13:08:59 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  3 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  6 ++---
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 24 ++++++++------------
 .../hdfs/server/balancer/TestBalancer.java      |  2 +-
 .../fsdataset/impl/TestLazyPersistFiles.java    |  4 ++--
 5 files changed, 19 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52eb4bc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2897f40..f793981 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -631,6 +631,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6904. YARN unable to renew delegation token fetched via webhdfs 
     due to incorrect service port. (jitendra)
 
+    HDFS-6988. Improve HDFS-6581 eviction configuration (Xiaoyu Yao via Colin
+    P. McCabe)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52eb4bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index eb0a735..5d7d252 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -135,9 +135,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_KEY = "dfs.datanode.ram.disk.replica.tracker";
   public static final Class<RamDiskReplicaLruTracker>  DFS_DATANODE_RAM_DISK_REPLICA_TRACKER_DEFAULT = RamDiskReplicaLruTracker.class;
   public static final String  DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT = "dfs.datanode.ram.disk.low.watermark.percent";
-  public static final int     DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10;
-  public static final String  DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS = "dfs.datanode.ram.disk.low.watermark.replicas";
-  public static final int     DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT = 3;
+  public static final float   DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT = 10.0f;
+  public static final String  DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES = "dfs.datanode.ram.disk.low.watermark.bytes";
+  public static final long    DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT = DFS_BLOCK_SIZE_DEFAULT;
 
   // This setting is for testing/internal use only.
   public static final String  DFS_DATANODE_DUPLICATE_REPLICA_DELETION = "dfs.datanode.duplicate.replica.deletion";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52eb4bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 5851c61..350788c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -45,7 +45,6 @@ import javax.management.StandardMBean;
 
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -2397,24 +2396,20 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   class LazyWriter implements Runnable {
     private volatile boolean shouldRun = true;
     final int checkpointerInterval;
-    final long estimateBlockSize;
-    final int lowWatermarkFreeSpacePercentage;
-    final int lowWatermarkFreeSpaceReplicas;
+    final float lowWatermarkFreeSpacePercentage;
+    final long lowWatermarkFreeSpaceBytes;
 
 
     public LazyWriter(Configuration conf) {
       this.checkpointerInterval = conf.getInt(
           DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
           DFSConfigKeys.DFS_DATANODE_LAZY_WRITER_INTERVAL_DEFAULT_SEC);
-      this.estimateBlockSize = conf.getLongBytes(
-          DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
-          DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
-      this.lowWatermarkFreeSpacePercentage = conf.getInt(
+      this.lowWatermarkFreeSpacePercentage = conf.getFloat(
           DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT,
           DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_PERCENT_DEFAULT);
-      this.lowWatermarkFreeSpaceReplicas = conf.getInt(
-          DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS,
-          DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS_DEFAULT);
+      this.lowWatermarkFreeSpaceBytes = conf.getLong(
+          DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
+          DFSConfigKeys.DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES_DEFAULT);
     }
 
     /**
@@ -2477,6 +2472,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     private boolean transientFreeSpaceBelowThreshold() throws IOException {
       long free = 0;
       long capacity = 0;
+      float percentFree = 0.0f;
 
       // Don't worry about fragmentation for now. We don't expect more than one
       // transient volume per DN.
@@ -2491,9 +2487,9 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         return false;
       }
 
-      int percentFree = (int) (free * 100 / capacity);
-      return percentFree < lowWatermarkFreeSpacePercentage ||
-             free < (estimateBlockSize * lowWatermarkFreeSpaceReplicas);
+      percentFree = (float) ((double)free * 100 / capacity);
+      return (percentFree < lowWatermarkFreeSpacePercentage) ||
+          (free < lowWatermarkFreeSpaceBytes);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52eb4bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 751f186..793675f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -114,7 +114,7 @@ public class TestBalancer {
     conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
     conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC, 1);
-    conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS, 1);
+    conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES, DEFAULT_RAM_DISK_BLOCK_SIZE);
   }
 
   /* create a file with a length of <code>fileLen</code> */

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a52eb4bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 9f1d50a..444afed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -766,8 +766,8 @@ public class TestLazyPersistFiles {
                 HEARTBEAT_RECHECK_INTERVAL_MSEC);
     conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
                 LAZY_WRITER_INTERVAL_SEC);
-    conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_REPLICAS,
-                EVICTION_LOW_WATERMARK);
+    conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
+                EVICTION_LOW_WATERMARK * BLOCK_SIZE);
 
     conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, useSCR);