You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sh...@apache.org on 2018/05/02 19:22:22 UTC

[47/50] [abbrv] hadoop git commit: HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.

HDFS-13283. Percentage based Reserved Space Calculation for DataNode. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb3414a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb3414a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb3414a2

Branch: refs/heads/YARN-8200
Commit: cb3414a27944b5878bfd8134a086276e454b3db0
Parents: c0c788a
Author: Inigo Goiri <in...@apache.org>
Authored: Mon Apr 30 15:24:21 2018 -0700
Committer: Inigo Goiri <in...@apache.org>
Committed: Mon Apr 30 15:24:21 2018 -0700

----------------------------------------------------------------------
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +
 .../datanode/fsdataset/impl/FsVolumeImpl.java   |  36 +--
 .../fsdataset/impl/ReservedSpaceCalculator.java | 227 +++++++++++++++++++
 .../src/main/resources/hdfs-default.xml         |  28 +++
 .../fsdataset/impl/TestFsVolumeList.java        |  59 ++++-
 .../impl/TestReservedSpaceCalculator.java       | 171 ++++++++++++++
 6 files changed, 516 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index ec50448..c128a8e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
+import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator;
 import org.apache.hadoop.http.HttpConfig;
 
 /** 
@@ -540,8 +541,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_DNS_INTERFACE_DEFAULT = "default";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_KEY = "dfs.datanode.dns.nameserver";
   public static final String  DFS_DATANODE_DNS_NAMESERVER_DEFAULT = "default";
+  public static final String DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY =
+      "dfs.datanode.du.reserved.calculator";
+  public static final Class<? extends ReservedSpaceCalculator>
+      DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT =
+          ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute.class;
   public static final String  DFS_DATANODE_DU_RESERVED_KEY = "dfs.datanode.du.reserved";
   public static final long    DFS_DATANODE_DU_RESERVED_DEFAULT = 0;
+  public static final String  DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY =
+      "dfs.datanode.du.reserved.pct";
+  public static final int     DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT = 0;
   public static final String  DFS_DATANODE_HANDLER_COUNT_KEY = "dfs.datanode.handler.count";
   public static final int     DFS_DATANODE_HANDLER_COUNT_DEFAULT = 10;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_KEY = "dfs.datanode.http.address";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 71d93ae..4c8accf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.CloseableReferenceCount;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Timer;
 import org.codehaus.jackson.annotate.JsonProperty;
@@ -99,7 +98,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
       = new ConcurrentHashMap<String, BlockPoolSlice>();
   private final File currentDir;    // <StorageDirectory>/current
   private final DF usage;
-  private final long reserved;
+  private final ReservedSpaceCalculator reserved;
   private CloseableReferenceCount reference = new CloseableReferenceCount();
 
   // Disk space reserved for blocks (RBW or Re-replicating) open for write.
@@ -123,24 +122,33 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
       Configuration conf, StorageType storageType) throws IOException {
+    // outside tests, usage created in ReservedSpaceCalculator.Builder
+    this(dataset, storageID, currentDir, conf, storageType, null);
+  }
+
+  FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
+      Configuration conf, StorageType storageType, DF usage)
+      throws IOException {
     this.dataset = dataset;
     this.storageID = storageID;
-    this.reserved = conf.getLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY
-        + "." + StringUtils.toLowerCase(storageType.toString()), conf.getLong(
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
-        DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT));
     this.reservedForReplicas = new AtomicLong(0L);
     this.currentDir = currentDir;
     File parent = currentDir.getParentFile();
-    this.usage = new DF(parent, conf);
     this.storageType = storageType;
     this.configuredCapacity = -1;
+
+    if (usage == null) {
+      usage = new DF(parent, conf);
+    }
+    this.usage = usage;
     // dataset.datanode may be null in some tests.
     this.fileIoProvider = dataset.datanode != null ?
         dataset.datanode.getFileIoProvider() :
         new FileIoProvider(conf, dataset.datanode);
     cacheExecutor = initializeCacheExecutor(parent);
     this.metrics = DataNodeVolumeMetrics.create(conf, parent.getAbsolutePath());
+    this.reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage).setStorageType(storageType).build();
   }
 
   protected ThreadPoolExecutor initializeCacheExecutor(File parent) {
@@ -370,7 +378,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   @VisibleForTesting
   public long getCapacity() {
     if (configuredCapacity < 0) {
-      long remaining = usage.getCapacity() - reserved;
+      long remaining = usage.getCapacity() - getReserved();
       return remaining > 0 ? remaining : 0;
     }
 
@@ -410,8 +418,9 @@ public class FsVolumeImpl implements FsVolumeSpi {
 
   private long getRemainingReserved() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
-      return reserved - actualNonDfsUsed;
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
+      return actualReserved - actualNonDfsUsed;
     }
     return 0L;
   }
@@ -424,10 +433,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
    */
   public long getNonDfsUsed() throws IOException {
     long actualNonDfsUsed = getActualNonDfsUsed();
-    if (actualNonDfsUsed < reserved) {
+    long actualReserved = getReserved();
+    if (actualNonDfsUsed < actualReserved) {
       return 0L;
     }
-    return actualNonDfsUsed - reserved;
+    return actualNonDfsUsed - actualReserved;
   }
 
   @VisibleForTesting
@@ -446,7 +456,7 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
 
   long getReserved(){
-    return reserved;
+    return reserved.getReserved();
   }
 
   BlockPoolSlice getBlockPoolSlice(String bpid) throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
new file mode 100644
index 0000000..5523cfd
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/ReservedSpaceCalculator.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.util.StringUtils;
+
+import java.lang.reflect.Constructor;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+
+/**
+ * Used for calculating file system space reserved for non-HDFS data.
+ */
+public abstract class ReservedSpaceCalculator {
+
+  /**
+   * Used for creating instances of ReservedSpaceCalculator.
+   */
+  public static class Builder {
+
+    private final Configuration conf;
+
+    private DF usage;
+    private StorageType storageType;
+
+    public Builder(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public Builder setUsage(DF newUsage) {
+      this.usage = newUsage;
+      return this;
+    }
+
+    public Builder setStorageType(
+        StorageType newStorageType) {
+      this.storageType = newStorageType;
+      return this;
+    }
+
+    ReservedSpaceCalculator build() {
+      try {
+        Class<? extends ReservedSpaceCalculator> clazz = conf.getClass(
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+            DFS_DATANODE_DU_RESERVED_CALCULATOR_DEFAULT,
+            ReservedSpaceCalculator.class);
+
+        Constructor constructor = clazz.getConstructor(
+            Configuration.class, DF.class, StorageType.class);
+
+        return (ReservedSpaceCalculator) constructor.newInstance(
+            conf, usage, storageType);
+      } catch (Exception e) {
+        throw new IllegalStateException(
+            "Error instantiating ReservedSpaceCalculator", e);
+      }
+    }
+  }
+
+  private final DF usage;
+  private final Configuration conf;
+  private final StorageType storageType;
+
+  ReservedSpaceCalculator(Configuration conf, DF usage,
+      StorageType storageType) {
+    this.usage = usage;
+    this.conf = conf;
+    this.storageType = storageType;
+  }
+
+  DF getUsage() {
+    return usage;
+  }
+
+  long getReservedFromConf(String key, long defaultValue) {
+    return conf.getLong(key + "." + StringUtils.toLowerCase(
+        storageType.toString()), conf.getLong(key, defaultValue));
+  }
+
+  /**
+   * Return the capacity of the file system space reserved for non-HDFS.
+   *
+   * @return the number of bytes reserved for non-HDFS.
+   */
+  abstract long getReserved();
+
+
+  /**
+   * Based on absolute number of reserved bytes.
+   */
+  public static class ReservedSpaceCalculatorAbsolute extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+
+    public ReservedSpaceCalculatorAbsolute(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return reservedBytes;
+    }
+  }
+
+  /**
+   * Based on percentage of total capacity in the storage.
+   */
+  public static class ReservedSpaceCalculatorPercentage extends
+      ReservedSpaceCalculator {
+
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorPercentage(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    @Override
+    long getReserved() {
+      return getPercentage(getUsage().getCapacity(), reservedPct);
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield more reserved space.
+   */
+  public static class ReservedSpaceCalculatorConservative extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorConservative(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.max(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  /**
+   * Calculates absolute and percentage based reserved space and
+   * picks the one that will yield less reserved space.
+   */
+  public static class ReservedSpaceCalculatorAggressive extends
+      ReservedSpaceCalculator {
+
+    private final long reservedBytes;
+    private final long reservedPct;
+
+    public ReservedSpaceCalculatorAggressive(Configuration conf, DF usage,
+        StorageType storageType) {
+      super(conf, usage, storageType);
+      this.reservedBytes = getReservedFromConf(DFS_DATANODE_DU_RESERVED_KEY,
+          DFS_DATANODE_DU_RESERVED_DEFAULT);
+      this.reservedPct = getReservedFromConf(
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY,
+          DFS_DATANODE_DU_RESERVED_PERCENTAGE_DEFAULT);
+    }
+
+    long getReservedBytes() {
+      return reservedBytes;
+    }
+
+    long getReservedPct() {
+      return reservedPct;
+    }
+
+    @Override
+    long getReserved() {
+      return Math.min(getReservedBytes(),
+          getPercentage(getUsage().getCapacity(), getReservedPct()));
+    }
+  }
+
+  private static long getPercentage(long total, long percentage) {
+    return (total * percentage) / 100;
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index ba98257..edd4c5f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -327,6 +327,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.calculator</name>
+  <value>org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator$ReservedSpaceCalculatorAbsolute</value>
+  <description>Determines the class of ReservedSpaceCalculator to be used for
+    calculating disk space reservedfor non-HDFS data. The default calculator is
+    ReservedSpaceCalculatorAbsolute which will use dfs.datanode.du.reserved
+    for a static reserved number of bytes. ReservedSpaceCalculatorPercentage
+    will use dfs.datanode.du.reserved.pct to calculate the reserved number
+    of bytes based on the size of the storage. ReservedSpaceCalculatorConservative and
+    ReservedSpaceCalculatorAggressive will use their combination, Conservative will use
+    maximum, Aggressive minimum. For more details see ReservedSpaceCalculator.
+  </description>
+</property>
+
+<property>
   <name>dfs.datanode.du.reserved</name>
   <value>0</value>
   <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
@@ -339,6 +353,20 @@
 </property>
 
 <property>
+  <name>dfs.datanode.du.reserved.pct</name>
+  <value>0</value>
+  <description>Reserved space in percentage. Read dfs.datanode.du.reserved.calculator to see
+    when this takes effect. The actual number of bytes reserved will be calculated by using the
+    total capacity of the data directory in question. Specific storage type based reservation
+    is also supported. The property can be followed with corresponding storage types
+    ([ssd]/[disk]/[archive]/[ram_disk]) for cluster with heterogeneous storage.
+    For example, reserved percentage space for RAM_DISK storage can be configured using property
+    'dfs.datanode.du.reserved.pct.ram_disk'. If specific storage type reservation is not configured
+    then dfs.datanode.du.reserved.pct will be used.
+  </description>
+</property>
+
+<property>
   <name>dfs.namenode.name.dir</name>
   <value>file://${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index f511dc6..147b2cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -38,15 +39,18 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
 
 public class TestFsVolumeList {
 
-  private final Configuration conf = new Configuration();
+  private Configuration conf;
   private VolumeChoosingPolicy<FsVolumeImpl> blockChooser =
       new RoundRobinVolumeChoosingPolicy<>();
   private FsDatasetImpl dataset = null;
@@ -61,6 +65,7 @@ public class TestFsVolumeList {
     blockScannerConf.setInt(DFSConfigKeys.
         DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1);
     blockScanner = new BlockScanner(null, blockScannerConf);
+    conf = new Configuration();
   }
 
   @Test(timeout=30000)
@@ -185,4 +190,56 @@ public class TestFsVolumeList {
         actualNonDfsUsage - duReserved;
     assertEquals(expectedNonDfsUsage, spyVolume.getNonDfsUsed());
   }
+
+  @Test
+  public void testDfsReservedPercentageForDifferentStorageTypes()
+      throws IOException {
+    conf.setClass(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 15);
+
+    File volDir = new File(baseDir, "volume-0");
+    volDir.mkdirs();
+
+    DF usage = mock(DF.class);
+    when(usage.getCapacity()).thenReturn(4000L);
+    when(usage.getAvailable()).thenReturn(1000L);
+
+    // when storage type reserved is not configured, should consider
+    // dfs.datanode.du.reserved.pct
+    FsVolumeImpl volume = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK, usage);
+
+    assertEquals(600, volume.getReserved());
+    assertEquals(3400, volume.getCapacity());
+    assertEquals(400, volume.getAvailable());
+
+    // when storage type reserved is configured.
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.RAM_DISK.toString()), 10);
+    conf.setLong(
+        DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + "."
+            + StringUtils.toLowerCase(StorageType.SSD.toString()), 50);
+    FsVolumeImpl volume1 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.RAM_DISK, usage);
+    assertEquals(400, volume1.getReserved());
+    assertEquals(3600, volume1.getCapacity());
+    assertEquals(600, volume1.getAvailable());
+
+    FsVolumeImpl volume2 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.SSD, usage);
+    assertEquals(2000, volume2.getReserved());
+    assertEquals(2000, volume2.getCapacity());
+    assertEquals(0, volume2.getAvailable());
+
+    FsVolumeImpl volume3 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.DISK, usage);
+    assertEquals(600, volume3.getReserved());
+
+    FsVolumeImpl volume4 = new FsVolumeImpl(dataset, "storage-id", volDir,
+        conf, StorageType.ARCHIVE, usage);
+    assertEquals(600, volume4.getReserved());
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb3414a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
new file mode 100644
index 0000000..e04a239
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestReservedSpaceCalculator.java
@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.fs.StorageType;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAbsolute;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorAggressive;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorConservative;
+import static org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.ReservedSpaceCalculator.ReservedSpaceCalculatorPercentage;
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit testing for different types of ReservedSpace calculators.
+ */
+public class TestReservedSpaceCalculator {
+
+  private Configuration conf;
+  private DF usage;
+  private ReservedSpaceCalculator reserved;
+
+  @Before
+  public void setUp() {
+    conf = new Configuration();
+    usage = Mockito.mock(DF.class);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolute() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY, 900);
+
+    checkReserved(StorageType.DISK, 10000, 900);
+    checkReserved(StorageType.SSD, 10000, 900);
+    checkReserved(StorageType.ARCHIVE, 10000, 900);
+  }
+
+  @Test
+  public void testReservedSpaceAbsolutePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAbsolute.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 500);
+    checkReserved(StorageType.DISK, 2300, 500);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ssd", 750);
+    checkReserved(StorageType.SSD, 1550, 750);
+  }
+
+  @Test
+  public void testReservedSpacePercentage() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test both using global configuration
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 10);
+    checkReserved(StorageType.DISK, 10000, 1000);
+    checkReserved(StorageType.SSD, 10000, 1000);
+    checkReserved(StorageType.ARCHIVE, 10000, 1000);
+
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY, 50);
+    checkReserved(StorageType.DISK, 4000, 2000);
+    checkReserved(StorageType.SSD, 4000, 2000);
+    checkReserved(StorageType.ARCHIVE, 4000, 2000);
+  }
+
+  @Test
+  public void testReservedSpacePercentagePerStorageType() {
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorPercentage.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 320);
+
+    // Test SSD
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ssd", 50);
+    checkReserved(StorageType.SSD, 8001, 4000);
+  }
+
+  @Test
+  public void testReservedSpaceConservativePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorConservative.class,
+        ReservedSpaceCalculator.class);
+
+    // Test DISK + taking the reserved bytes over percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".disk", 800);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".disk", 20);
+    checkReserved(StorageType.DISK, 1600, 800);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives more reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 1300);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 50);
+    checkReserved(StorageType.ARCHIVE, 6200, 3100);
+  }
+
+  @Test
+  public void testReservedSpaceAggresivePerStorageType() {
+    // This policy should take the maximum of the two
+    conf.setClass(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY,
+        ReservedSpaceCalculatorAggressive.class,
+        ReservedSpaceCalculator.class);
+
+    // Test RAM_DISK + taking the reserved bytes over percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".ram_disk", 100);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".ram_disk", 10);
+    checkReserved(StorageType.RAM_DISK, 1600, 100);
+
+    // Test ARCHIVE + taking reserved space based on the percentage,
+    // as that gives less reserved space
+    conf.setLong(DFS_DATANODE_DU_RESERVED_KEY + ".archive", 20000);
+    conf.setLong(DFS_DATANODE_DU_RESERVED_PERCENTAGE_KEY + ".archive", 5);
+    checkReserved(StorageType.ARCHIVE, 100000, 5000);
+  }
+
+  @Test(expected = IllegalStateException.class)
+  public void testInvalidCalculator() {
+    conf.set(DFS_DATANODE_DU_RESERVED_CALCULATOR_KEY, "INVALIDTYPE");
+    reserved = new ReservedSpaceCalculator.Builder(conf)
+        .setUsage(usage)
+        .setStorageType(StorageType.DISK)
+        .build();
+  }
+
+  private void checkReserved(StorageType storageType,
+      long totalCapacity, long reservedExpected) {
+    when(usage.getCapacity()).thenReturn(totalCapacity);
+
+    reserved = new ReservedSpaceCalculator.Builder(conf).setUsage(usage)
+        .setStorageType(storageType).build();
+    assertEquals(reservedExpected, reserved.getReserved());
+  }
+}
\ No newline at end of file


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org