You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ar...@apache.org on 2015/04/20 01:13:08 UTC

[2/2] hadoop git commit: HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)

HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/490fac39
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/490fac39
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/490fac39

Branch: refs/heads/branch-2
Commit: 490fac391c25893ba5d9efaefaea0f8fd999dde6
Parents: 890238b
Author: Arpit Agarwal <ar...@apache.org>
Authored: Sun Apr 19 16:09:06 2015 -0700
Committer: Arpit Agarwal <ar...@apache.org>
Committed: Sun Apr 19 16:09:12 2015 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |   2 +
 .../fsdataset/impl/LazyPersistTestCase.java     | 152 +++++++++++--------
 .../fsdataset/impl/TestLazyPersistFiles.java    |  20 +--
 .../fsdataset/impl/TestLazyPersistPolicy.java   |   8 +-
 .../impl/TestLazyPersistReplicaPlacement.java   |  12 +-
 .../impl/TestLazyPersistReplicaRecovery.java    |   4 +-
 .../datanode/fsdataset/impl/TestLazyWriter.java |  14 +-
 .../fsdataset/impl/TestScrLazyPersistFiles.java |  48 ++++--
 8 files changed, 152 insertions(+), 108 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 119ec11..1ef60be 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -130,6 +130,8 @@ Release 2.8.0 - UNRELEASED
     HDFS-8165. Move GRANDFATHER_GENERATION_STAMP and GRANDFATER_INODE_ID to
     hdfs-client. (wheat9)
 
+    HDFS-8152. Refactoring of lazy persist storage cases. (Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 6adec20..d46964b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+
 import static org.apache.hadoop.fs.CreateFlag.CREATE;
 import static org.apache.hadoop.fs.CreateFlag.LAZY_PERSIST;
 import static org.apache.hadoop.fs.StorageType.DEFAULT;
@@ -45,6 +47,7 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 
+import com.google.common.base.Preconditions;
 import org.apache.commons.io.IOUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -228,11 +231,15 @@ public abstract class LazyPersistTestCase {
    * If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
    * capped. If ramDiskStorageLimit < 0 then it is ignored.
    */
-  protected final void startUpCluster(boolean hasTransientStorage,
-                                      final int ramDiskReplicaCapacity,
-                                      final boolean useSCR,
-                                      final boolean useLegacyBlockReaderLocal)
-      throws IOException {
+  protected final void startUpCluster(
+      int numDatanodes,
+      boolean hasTransientStorage,
+      StorageType[] storageTypes,
+      int ramDiskReplicaCapacity,
+      long ramDiskStorageLimit,
+      long evictionLowWatermarkReplicas,
+      boolean useSCR,
+      boolean useLegacyBlockReaderLocal) throws IOException {
 
     Configuration conf = new Configuration();
     conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -243,17 +250,17 @@ public abstract class LazyPersistTestCase {
                 HEARTBEAT_RECHECK_INTERVAL_MSEC);
     conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
                 LAZY_WRITER_INTERVAL_SEC);
-    conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
-                EVICTION_LOW_WATERMARK * BLOCK_SIZE);
+    conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
+                evictionLowWatermarkReplicas * BLOCK_SIZE);
 
     if (useSCR) {
       conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
       // Do not share a client context across tests.
       conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
+      conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+          UserGroupInformation.getCurrentUser().getShortUserName());
       if (useLegacyBlockReaderLocal) {
         conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
-        conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
-            UserGroupInformation.getCurrentUser().getShortUserName());
       } else {
         sockDir = new TemporarySocketDirectory();
         conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
@@ -261,22 +268,29 @@ public abstract class LazyPersistTestCase {
       }
     }
 
-    long[] capacities = null;
+    Preconditions.checkState(
+        ramDiskReplicaCapacity < 0 || ramDiskStorageLimit < 0,
+        "Cannot specify non-default values for both ramDiskReplicaCapacity "
+            + "and ramDiskStorageLimit");
+
+    long[] capacities;
     if (hasTransientStorage && ramDiskReplicaCapacity >= 0) {
       // Convert replica count to byte count, add some delta for .meta and
       // VERSION files.
-      long ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
+      ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
           (BLOCK_SIZE - 1);
-      capacities = new long[] { ramDiskStorageLimit, -1 };
     }
+    capacities = new long[] { ramDiskStorageLimit, -1 };
 
     cluster = new MiniDFSCluster
         .Builder(conf)
-        .numDataNodes(REPL_FACTOR)
+        .numDataNodes(numDatanodes)
         .storageCapacities(capacities)
-        .storageTypes(hasTransientStorage ?
-            new StorageType[]{ RAM_DISK, DEFAULT } : null)
+        .storageTypes(storageTypes != null ? storageTypes :
+                          (hasTransientStorage ? new StorageType[]{RAM_DISK, DEFAULT} : null))
         .build();
+    cluster.waitActive();
+
     fs = cluster.getFileSystem();
     client = fs.getClient();
     try {
@@ -287,65 +301,77 @@ public abstract class LazyPersistTestCase {
     LOG.info("Cluster startup complete");
   }
 
+  ClusterWithRamDiskBuilder getClusterBuilder() {
+    return new ClusterWithRamDiskBuilder();
+  }
+
   /**
-   * If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
-   * capped. If ramDiskStorageLimit < 0 then it is ignored.
+   * Builder class that allows controlling RAM disk-specific properties for a
+   * MiniDFSCluster.
    */
-  protected final void startUpCluster(final int numDataNodes,
-                                      final StorageType[] storageTypes,
-                                      final long ramDiskStorageLimit,
-                                      final boolean useSCR)
-    throws IOException {
+  class ClusterWithRamDiskBuilder {
+    public ClusterWithRamDiskBuilder setNumDatanodes(
+        int numDatanodes) {
+      this.numDatanodes = numDatanodes;
+      return this;
+    }
 
-    Configuration conf = new Configuration();
-    conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
-    conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
-      LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
-    conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
-    conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
-      HEARTBEAT_RECHECK_INTERVAL_MSEC);
-    conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
-      LAZY_WRITER_INTERVAL_SEC);
+    public ClusterWithRamDiskBuilder setStorageTypes(
+        StorageType[] storageTypes) {
+      this.storageTypes = storageTypes;
+      return this;
+    }
 
-    if (useSCR)
-    {
-      conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,useSCR);
-      conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
-      sockDir = new TemporarySocketDirectory();
-      conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
-          this.getClass().getSimpleName() + "._PORT.sock").getAbsolutePath());
-      conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
-        UserGroupInformation.getCurrentUser().getShortUserName());
+    public ClusterWithRamDiskBuilder setRamDiskReplicaCapacity(
+        int ramDiskReplicaCapacity) {
+      this.ramDiskReplicaCapacity = ramDiskReplicaCapacity;
+      return this;
     }
 
-    cluster = new MiniDFSCluster
-      .Builder(conf)
-      .numDataNodes(numDataNodes)
-      .storageTypes(storageTypes != null ?
-          storageTypes : new StorageType[] { DEFAULT, DEFAULT })
-      .build();
-    fs = cluster.getFileSystem();
-    client = fs.getClient();
+    public ClusterWithRamDiskBuilder setRamDiskStorageLimit(
+        long ramDiskStorageLimit) {
+      this.ramDiskStorageLimit = ramDiskStorageLimit;
+      return this;
+    }
 
-    // Artificially cap the storage capacity of the RAM_DISK volume.
-    if (ramDiskStorageLimit >= 0) {
-      List<? extends FsVolumeSpi> volumes =
-        cluster.getDataNodes().get(0).getFSDataset().getVolumes();
+    public ClusterWithRamDiskBuilder setUseScr(boolean useScr) {
+      this.useScr = useScr;
+      return this;
+    }
 
-      for (FsVolumeSpi volume : volumes) {
-        if (volume.getStorageType() == RAM_DISK) {
-          ((FsVolumeImpl) volume).setCapacityForTesting(ramDiskStorageLimit);
-        }
-      }
+    public ClusterWithRamDiskBuilder setHasTransientStorage(
+        boolean hasTransientStorage) {
+      this.hasTransientStorage = hasTransientStorage;
+      return this;
     }
 
-    LOG.info("Cluster startup complete");
-  }
+    public ClusterWithRamDiskBuilder setUseLegacyBlockReaderLocal(
+        boolean useLegacyBlockReaderLocal) {
+      this.useLegacyBlockReaderLocal = useLegacyBlockReaderLocal;
+      return this;
+    }
+
+    public ClusterWithRamDiskBuilder setEvictionLowWatermarkReplicas(
+        long evictionLowWatermarkReplicas) {
+      this.evictionLowWatermarkReplicas = evictionLowWatermarkReplicas;
+      return this;
+    }
+
+    public void build() throws IOException {
+      LazyPersistTestCase.this.startUpCluster(
+          numDatanodes, hasTransientStorage, storageTypes, ramDiskReplicaCapacity,
+          ramDiskStorageLimit, evictionLowWatermarkReplicas,
+          useScr, useLegacyBlockReaderLocal);
+    }
 
-  protected final void startUpCluster(boolean hasTransientStorage,
-                                      final int ramDiskReplicaCapacity)
-      throws IOException {
-    startUpCluster(hasTransientStorage, ramDiskReplicaCapacity, false, false);
+    private int numDatanodes = REPL_FACTOR;
+    private StorageType[] storageTypes = null;
+    private int ramDiskReplicaCapacity = -1;
+    private long ramDiskStorageLimit = -1;
+    private boolean hasTransientStorage = true;
+    private boolean useScr = false;
+    private boolean useLegacyBlockReaderLocal = false;
+    private long evictionLowWatermarkReplicas = EVICTION_LOW_WATERMARK;
   }
 
   protected final void triggerBlockReport()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 30e5d26..41398c8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
@@ -19,11 +19,6 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Test;
@@ -34,17 +29,12 @@ import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.ArrayList;
-import java.util.Collections;
 
-import static org.apache.hadoop.fs.StorageType.DEFAULT;
 import static org.apache.hadoop.fs.StorageType.RAM_DISK;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 import static org.hamcrest.core.Is.is;
-import static org.hamcrest.core.IsNot.not;
 import static org.junit.Assert.assertThat;
-import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 public class TestLazyPersistFiles extends LazyPersistTestCase {
@@ -56,7 +46,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
    */
   @Test
   public void testAppendIsDenied() throws IOException {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -77,7 +67,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
    */
   @Test
   public void testTruncateIsDenied() throws IOException {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -99,7 +89,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
   @Test
   public void testCorruptFilesAreDiscarded()
       throws IOException, InterruptedException {
-    startUpCluster(true, 2);
+    getClusterBuilder().setRamDiskReplicaCapacity(2).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
 
@@ -136,7 +126,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
   @Test
   public void testConcurrentRead()
     throws Exception {
-    startUpCluster(true, 2);
+    getClusterBuilder().setRamDiskReplicaCapacity(2).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     final Path path1 = new Path("/" + METHOD_NAME + ".dat");
 
@@ -187,7 +177,7 @@ public class TestLazyPersistFiles extends LazyPersistTestCase {
   @Test
   public void testConcurrentWrites()
     throws IOException, InterruptedException {
-    startUpCluster(true, 9);
+    getClusterBuilder().setRamDiskReplicaCapacity(9).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     final int SEED = 0xFADED;
     final int NUM_WRITERS = 4;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
index 873e2b0..6b198f1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistPolicy.java
@@ -34,7 +34,7 @@ import static org.junit.Assert.assertThat;
 public class TestLazyPersistPolicy extends LazyPersistTestCase {
   @Test
   public void testPolicyNotSetByDefault() throws IOException {
-    startUpCluster(false, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -47,7 +47,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
 
   @Test
   public void testPolicyPropagation() throws IOException {
-    startUpCluster(false, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -59,7 +59,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
 
   @Test
   public void testPolicyPersistenceInEditLog() throws IOException {
-    startUpCluster(false, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -73,7 +73,7 @@ public class TestLazyPersistPolicy extends LazyPersistTestCase {
 
   @Test
   public void testPolicyPersistenceInFsImage() throws IOException {
-    startUpCluster(false, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
index 4e1a893..018eaba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaPlacement.java
@@ -33,7 +33,7 @@ import static org.junit.Assert.fail;
 public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
   @Test
   public void testPlacementOnRamDisk() throws IOException {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -43,7 +43,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
 
   @Test
   public void testPlacementOnSizeLimitedRamDisk() throws IOException {
-    startUpCluster(true, 3);
+    getClusterBuilder().setRamDiskReplicaCapacity(3).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
     Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
@@ -62,7 +62,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
    */
   @Test
   public void testFallbackToDisk() throws IOException {
-    startUpCluster(false, -1);
+    getClusterBuilder().setHasTransientStorage(false).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -76,7 +76,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
    */
   @Test
   public void testFallbackToDiskFull() throws Exception {
-    startUpCluster(false, 0);
+    getClusterBuilder().setRamDiskReplicaCapacity(0).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -95,7 +95,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
   @Test
   public void testFallbackToDiskPartial()
       throws IOException, InterruptedException {
-    startUpCluster(true, 2);
+    getClusterBuilder().setRamDiskReplicaCapacity(2).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -134,7 +134,7 @@ public class TestLazyPersistReplicaPlacement extends LazyPersistTestCase {
    */
   @Test
   public void testRamDiskNotChosenByDefault() throws IOException {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
index b8b2653..231353a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistReplicaRecovery.java
@@ -32,7 +32,7 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
   public void testDnRestartWithSavedReplicas()
       throws IOException, InterruptedException {
 
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
 
@@ -57,7 +57,7 @@ public class TestLazyPersistReplicaRecovery extends LazyPersistTestCase {
   public void testDnRestartWithUnsavedReplicas()
       throws IOException, InterruptedException {
 
-    startUpCluster(true, 1);
+    getClusterBuilder().build();
     FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
 
     final String METHOD_NAME = GenericTestUtils.getMethodName();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
index 23203c7..ee8aaf0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyWriter.java
@@ -39,7 +39,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   @Test
   public void testLazyPersistBlocksAreSaved()
       throws IOException, InterruptedException {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -63,7 +63,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
    */
   @Test
   public void testRamDiskEviction() throws Exception {
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
     Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
@@ -99,7 +99,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   @Test
   public void testRamDiskEvictionBeforePersist()
       throws IOException, InterruptedException {
-    startUpCluster(true, 1);
+    getClusterBuilder().setRamDiskReplicaCapacity(1).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
     Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
@@ -133,7 +133,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   public void testRamDiskEvictionIsLru()
       throws Exception {
     final int NUM_PATHS = 5;
-    startUpCluster(true, NUM_PATHS + EVICTION_LOW_WATERMARK);
+    getClusterBuilder().setRamDiskReplicaCapacity(NUM_PATHS + EVICTION_LOW_WATERMARK).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path paths[] = new Path[NUM_PATHS * 2];
 
@@ -194,7 +194,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   @Test
   public void testDeleteBeforePersist()
       throws Exception {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     FsDatasetTestUtil.stopLazyWriter(cluster.getDataNodes().get(0));
 
@@ -221,7 +221,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   @Test
   public void testDeleteAfterPersist()
       throws Exception {
-    startUpCluster(true, -1);
+    getClusterBuilder().build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 
@@ -249,7 +249,7 @@ public class TestLazyWriter extends LazyPersistTestCase {
   @Test
   public void testDfsUsageCreateDelete()
       throws IOException, InterruptedException {
-    startUpCluster(true, 4);
+    getClusterBuilder().setRamDiskReplicaCapacity(4).build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path = new Path("/" + METHOD_NAME + ".dat");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/490fac39/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
index d54e5a4..7c7ba64 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java
@@ -70,9 +70,11 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
   @Test
   public void testRamDiskShortCircuitRead()
     throws IOException, InterruptedException {
-    startUpCluster(REPL_FACTOR,
-      new StorageType[]{RAM_DISK, DEFAULT},
-      2 * BLOCK_SIZE - 1, true);  // 1 replica + delta, SCR read
+    getClusterBuilder().setNumDatanodes(REPL_FACTOR)
+                       .setStorageTypes(new StorageType[]{RAM_DISK, DEFAULT})
+                       .setRamDiskStorageLimit(2 * BLOCK_SIZE - 1)
+                       .setUseScr(true)
+                       .build();
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     final int SEED = 0xFADED;
     Path path = new Path("/" + METHOD_NAME + ".dat");
@@ -111,8 +113,14 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
   @Test
   public void testRamDiskEvictionWithShortCircuitReadHandle()
     throws IOException, InterruptedException {
-    startUpCluster(REPL_FACTOR, new StorageType[] { RAM_DISK, DEFAULT },
-      (6 * BLOCK_SIZE -1), true);  // 5 replica + delta, SCR.
+    // 5 replica + delta, SCR.
+    getClusterBuilder().setNumDatanodes(REPL_FACTOR)
+                       .setStorageTypes(new StorageType[]{RAM_DISK, DEFAULT})
+                       .setRamDiskStorageLimit(6 * BLOCK_SIZE - 1)
+                       .setEvictionLowWatermarkReplicas(3)
+                       .setUseScr(true)
+                       .build();
+
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path path1 = new Path("/" + METHOD_NAME + ".01.dat");
     Path path2 = new Path("/" + METHOD_NAME + ".02.dat");
@@ -156,14 +164,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
   public void testShortCircuitReadAfterEviction()
       throws IOException, InterruptedException {
     Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(false)
+                       .build();
     doShortCircuitReadAfterEvictionTest();
   }
 
   @Test
   public void testLegacyShortCircuitReadAfterEviction()
       throws IOException, InterruptedException {
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(true)
+                       .build();
     doShortCircuitReadAfterEvictionTest();
   }
 
@@ -220,14 +234,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
   public void testShortCircuitReadBlockFileCorruption() throws IOException,
       InterruptedException {
     Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(false)
+                       .build();
     doShortCircuitReadBlockFileCorruptionTest();
   }
 
   @Test
   public void testLegacyShortCircuitReadBlockFileCorruption() throws IOException,
       InterruptedException {
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(true)
+                       .build();
     doShortCircuitReadBlockFileCorruptionTest();
   }
 
@@ -260,14 +280,20 @@ public class TestScrLazyPersistFiles extends LazyPersistTestCase {
   public void testShortCircuitReadMetaFileCorruption() throws IOException,
       InterruptedException {
     Assume.assumeThat(DomainSocket.getLoadingFailureReason(), equalTo(null));
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, false);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(false)
+                       .build();
     doShortCircuitReadMetaFileCorruptionTest();
   }
 
   @Test
   public void testLegacyShortCircuitReadMetaFileCorruption() throws IOException,
       InterruptedException {
-    startUpCluster(true, 1 + EVICTION_LOW_WATERMARK, true, true);
+    getClusterBuilder().setRamDiskReplicaCapacity(1 + EVICTION_LOW_WATERMARK)
+                       .setUseScr(true)
+                       .setUseLegacyBlockReaderLocal(true)
+                       .build();
     doShortCircuitReadMetaFileCorruptionTest();
   }