You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by st...@apache.org on 2011/10/07 17:31:15 UTC

svn commit: r1180078 - in /hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs: ./ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/ src/test/java/org/apache/hadoop/hdfs/server/datanode/ s...

Author: stevel
Date: Fri Oct  7 15:31:14 2011
New Revision: 1180078

URL: http://svn.apache.org/viewvc?rev=1180078&view=rev
Log:
HDFS-2209. Make MiniDFS easier to embed in other apps.

Added:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
Modified:
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Oct  7 15:31:14 2011
@@ -692,6 +692,8 @@ Release 0.23.0 - Unreleased
     HDFS-2363. Move datanodes size printing from FSNamesystem.metasave(..)
     to BlockManager.  (Uma Maheswara Rao G via szetszwo)
 
+    HDFS-2209. Make MiniDFS easier to embed in other apps. (stevel)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Oct  7 15:31:14 2011
@@ -86,6 +86,10 @@ public class MiniDFSCluster {
 
   private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
   private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
+  /** System property to set the data dir: {@value} */
+  public static final String PROP_TEST_BUILD_DATA = "test.build.data";
+  /** Configuration option to set the data dir: {@value} */
+  public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
 
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
@@ -495,7 +499,7 @@ public class MiniDFSCluster {
       boolean waitSafeMode, boolean setupHostsFile, boolean federation) 
   throws IOException {
     this.conf = conf;
-    base_dir = new File(getBaseDirectory());
+    base_dir = new File(determineDfsBaseDir());
     data_dir = new File(base_dir, "data");
     this.federation = federation;
     this.waitSafeMode = waitSafeMode;
@@ -504,7 +508,7 @@ public class MiniDFSCluster {
     String rpcEngineName = System.getProperty("hdfs.rpc.engine");
     if (rpcEngineName != null && !"".equals(rpcEngineName)) {
       
-      System.out.println("HDFS using RPCEngine: "+rpcEngineName);
+      LOG.info("HDFS using RPCEngine: " + rpcEngineName);
       try {
         Class<?> rpcEngine = conf.getClassByName(rpcEngineName);
         setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
@@ -856,8 +860,8 @@ public class MiniDFSCluster {
       // Set up datanode address
       setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
       if (manageDfsDirs) {
-        File dir1 = getStorageDir(i, 0);
-        File dir2 = getStorageDir(i, 1);
+        File dir1 = getInstanceStorageDir(i, 0);
+        File dir2 = getInstanceStorageDir(i, 1);
         dir1.mkdirs();
         dir2.mkdirs();
         if (!dir1.isDirectory() || !dir2.isDirectory()) { 
@@ -873,17 +877,17 @@ public class MiniDFSCluster {
         dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
             simulatedCapacities[i-curDatanodesNum]);
       }
-      System.out.println("Starting DataNode " + i + " with "
+      LOG.info("Starting DataNode " + i + " with "
                          + DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
                          + dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
       if (hosts != null) {
         dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
-        System.out.println("Starting DataNode " + i + " with hostname set to: " 
+        LOG.info("Starting DataNode " + i + " with hostname set to: "
                            + dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
       }
       if (racks != null) {
         String name = hosts[i - curDatanodesNum];
-        System.out.println("Adding node with hostname : " + name + " to rack "+
+        LOG.info("Adding node with hostname : " + name + " to rack " +
                             racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(name,
                                     racks[i-curDatanodesNum]);
@@ -901,7 +905,7 @@ public class MiniDFSCluster {
       String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
       if (racks != null) {
         int port = dn.getSelfAddr().getPort();
-        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
+        LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
                             " to rack " + racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                   racks[i-curDatanodesNum]);
@@ -1097,7 +1101,7 @@ public class MiniDFSCluster {
    * Shutdown all the nodes in the cluster.
    */
   public void shutdown() {
-    System.out.println("Shutting down the Mini HDFS Cluster");
+    LOG.info("Shutting down the Mini HDFS Cluster");
     shutdownDataNodes();
     for (NameNodeInfo nnInfo : nameNodes) {
       NameNode nameNode = nnInfo.nameNode;
@@ -1137,7 +1141,7 @@ public class MiniDFSCluster {
   public synchronized void shutdownNameNode(int nnIndex) {
     NameNode nn = nameNodes[nnIndex].nameNode;
     if (nn != null) {
-      System.out.println("Shutting down the namenode");
+      LOG.info("Shutting down the namenode");
       nn.stop();
       nn.join();
       Configuration conf = nameNodes[nnIndex].conf;
@@ -1181,9 +1185,9 @@ public class MiniDFSCluster {
     nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
     if (waitActive) {
       waitClusterUp();
-      System.out.println("Restarted the namenode");
+      LOG.info("Restarted the namenode");
       waitActive();
-      System.out.println("Cluster is active");
+      LOG.info("Cluster is active");
     }
   }
 
@@ -1259,7 +1263,7 @@ public class MiniDFSCluster {
     }
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNode dn = dnprop.datanode;
-    System.out.println("MiniDFSCluster Stopping DataNode " + 
+    LOG.info("MiniDFSCluster Stopping DataNode " +
                        dn.getMachineName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
@@ -1348,7 +1352,7 @@ public class MiniDFSCluster {
     for (int i = dataNodes.size() - 1; i >= 0; i--) {
       if (!restartDataNode(i, keepPort))
         return false;
-      System.out.println("Restarted DataNode " + i);
+      LOG.info("Restarted DataNode " + i);
     }
     return true;
   }
@@ -1375,8 +1379,8 @@ public class MiniDFSCluster {
     } catch (IOException ioe) {
       // This method above should never throw.
       // It only throws IOE since it is exposed via RPC
-      throw new AssertionError("Unexpected IOE thrown: "
-          + StringUtils.stringifyException(ioe));
+      throw (AssertionError)(new AssertionError("Unexpected IOE thrown: "
+          + StringUtils.stringifyException(ioe)).initCause(ioe));
     }
     boolean isUp = false;
     synchronized (this) {
@@ -1522,7 +1526,7 @@ public class MiniDFSCluster {
           failedCount++;
           // Cached RPC connection to namenode, if any, is expected to fail once
           if (failedCount > 1) {
-            System.out.println("Tried waitActive() " + failedCount
+            LOG.warn("Tried waitActive() " + failedCount
                 + " time(s) and failed, giving up.  "
                 + StringUtils.stringifyException(e));
             throw e;
@@ -1574,7 +1578,7 @@ public class MiniDFSCluster {
   }
 
   public void formatDataNodeDirs() throws IOException {
-    base_dir = new File(getBaseDirectory());
+    base_dir = new File(determineDfsBaseDir());
     data_dir = new File(base_dir, "data");
     if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
       throw new IOException("Cannot remove data directory: " + data_dir);
@@ -1695,8 +1699,49 @@ public class MiniDFSCluster {
     return data_dir.getAbsolutePath();
   }
 
+  /**
+   * Get the base directory for this MiniDFS instance.
+   * <p/>
+   * Within the MiniDFCluster class and any subclasses, this method should be
+   * used instead of {@link #getBaseDirectory()} which doesn't support
+   * configuration-specific base directories.
+   * <p/>
+   * First the Configuration property {@link #HDFS_MINIDFS_BASEDIR} is fetched.
+   * If non-null, this is returned.
+   * If this is null, then {@link #getBaseDirectory()} is called.
+   * @return the base directory for this instance.
+   */
+  protected String determineDfsBaseDir() {
+    String dfsdir = conf.get(HDFS_MINIDFS_BASEDIR, null);
+    if (dfsdir == null) {
+      dfsdir = getBaseDirectory();
+    }
+    return dfsdir;
+  }
+
+  /**
+   * Get the base directory for any DFS cluster whose configuration does
+   * not explicitly set it. This is done by retrieving the system property
+   * {@link #PROP_TEST_BUILD_DATA} (defaulting to "build/test/data" ),
+   * and returning that directory with a subdir of /dfs.
+   * @return a directory for use as a miniDFS filesystem.
+   */
   public static String getBaseDirectory() {
-    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
+    return System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
+  }
+
+  /**
+   * Get a storage directory for a datanode in this specific instance of
+   * a MiniCluster.
+   *
+   * @param dnIndex datanode index (starts from 0)
+   * @param dirIndex directory index (0 or 1). Index 0 provides access to the
+   *          first storage directory. Index 1 provides access to the second
+   *          storage directory.
+   * @return Storage directory
+   */
+  public File getInstanceStorageDir(int dnIndex, int dirIndex) {
+    return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
   }
 
   /**
@@ -1714,13 +1759,25 @@ public class MiniDFSCluster {
    * @return Storage directory
    */
   public static File getStorageDir(int dnIndex, int dirIndex) {
-    return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex));
+    return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
   }
-  
+
   /**
-   * Get current directory corresponding to the datanode
-   * @param storageDir
-   * @return current directory
+   * Calculate the DN instance-specific path for appending to the base dir
+   * to determine the location of the storage of a DN instance in the mini cluster
+   * @param dnIndex datanode index
+   * @param dirIndex directory index (0 or 1).
+   * @return
+   */
+  private static String getStorageDirPath(int dnIndex, int dirIndex) {
+    return "data/data" + (2 * dnIndex + 1 + dirIndex);
+  }
+
+  /**
+   * Get current directory corresponding to the datanode as defined in
+   * (@link Storage#STORAGE_DIR_CURRENT}
+   * @param storageDir the storage directory of a datanode.
+   * @return the datanode current directory
    */
   public static String getDNCurrentDir(File storageDir) {
     return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
@@ -1728,8 +1785,8 @@ public class MiniDFSCluster {
   
   /**
    * Get directory corresponding to block pool directory in the datanode
-   * @param storageDir
-   * @return current directory
+   * @param storageDir the storage directory of a datanode.
+   * @return the block pool directory
    */
   public static String getBPDir(File storageDir, String bpid) {
     return getDNCurrentDir(storageDir) + bpid + "/";
@@ -1775,6 +1832,16 @@ public class MiniDFSCluster {
     return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
         blk.getBlockName());
   }
+
+  /**
+   * Shut down a cluster if it is not null
+   * @param cluster cluster reference or null
+   */
+  public static void shutdownCluster(MiniDFSCluster cluster) {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
   
   /**
    * Get all files related to a block from all the datanodes

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java Fri Oct  7 15:31:14 2011
@@ -83,7 +83,7 @@ public class TestCrcCorruption {
       // file disallows this Datanode to send data to another datanode.
       // However, a client is alowed access to this block.
       //
-      File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      File storageDir = cluster.getInstanceStorageDir(0, 1);
       String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
@@ -142,7 +142,7 @@ public class TestCrcCorruption {
       // Now deliberately corrupt all meta blocks from the second
       // directory of the first datanode
       //
-      storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      storageDir = cluster.getInstanceStorageDir(0, 1);
       data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       blocks = data_dir.listFiles();

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Oct  7 15:31:14 2011
@@ -65,7 +65,7 @@ public class TestFileCorruption extends 
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
-      File storageDir = MiniDFSCluster.getStorageDir(2, 0);
+      File storageDir = cluster.getInstanceStorageDir(2, 0);
       String bpid = cluster.getNamesystem().getBlockPoolId();
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
@@ -127,11 +127,11 @@ public class TestFileCorruption extends 
       
       // get the block
       final String bpid = cluster.getNamesystem().getBlockPoolId();
-      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
+      File storageDir = cluster.getInstanceStorageDir(0, 0);
       File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       ExtendedBlock blk = getBlock(bpid, dataDir);
       if (blk == null) {
-        storageDir = MiniDFSCluster.getStorageDir(0, 1);
+        storageDir = cluster.getInstanceStorageDir(0, 1);
         dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         blk = getBlock(bpid, dataDir);
       }

Added: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java?rev=1180078&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java (added)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java Fri Oct  7 15:31:14 2011
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+
+/**
+ * Tests MiniDFS cluster setup/teardown and isolation.
+ * Every instance is brought up with a new data dir, to ensure that
+ * shutdown work in background threads don't interfere with bringing up
+ * the new cluster.
+ */
+public class TestMiniDFSCluster {
+
+  private static final String CLUSTER_1 = "cluster1";
+  private static final String CLUSTER_2 = "cluster2";
+  private static final String CLUSTER_3 = "cluster3";
+  protected String testDataPath;
+  protected File testDataDir;
+  @Before
+  public void setUp() {
+    testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
+    testDataDir = new File(new File(testDataPath).getParentFile(),
+                           "miniclusters");
+
+
+  }
+  @After
+  public void tearDown() {
+    System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath);
+  }
+
+  /**
+   * Verify that without system properties the cluster still comes up, provided
+   * the configuration is set
+   *
+   * @throws Throwable on a failure
+   */
+  @Test
+  public void testClusterWithoutSystemProperties() throws Throwable {
+    System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
+    Configuration conf = new HdfsConfiguration();
+    File testDataCluster1 = new File(testDataPath, CLUSTER_1);
+    String c1Path = testDataCluster1.getAbsolutePath();
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    try {
+      Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory());
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Bring up two clusters and assert that they are in different directories.
+   * @throws Throwable on a failure
+   */
+  @Test
+  public void testDualClusters() throws Throwable {
+    File testDataCluster2 = new File(testDataPath, CLUSTER_2);
+    File testDataCluster3 = new File(testDataPath, CLUSTER_3);
+    Configuration conf = new HdfsConfiguration();
+    String c2Path = testDataCluster2.getAbsolutePath();
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
+    MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster3 = null;
+    try {
+      String dataDir2 = cluster2.getDataDirectory();
+      Assert.assertEquals(c2Path + "/data", dataDir2);
+      //change the data dir
+      conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+               testDataCluster3.getAbsolutePath());
+      MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+      cluster3 = builder.build();
+      String dataDir3 = cluster3.getDataDirectory();
+      Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2,
+                        !dataDir2.equals(dataDir3));
+    } finally {
+      MiniDFSCluster.shutdownCluster(cluster3);
+      MiniDFSCluster.shutdownCluster(cluster2);
+    }
+  }
+
+
+}

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Fri Oct  7 15:31:14 2011
@@ -63,7 +63,7 @@ public class TestOverReplicatedBlocks ex
       DataNodeProperties dnProps = cluster.stopDataNode(0);
       // remove block scanner log to trigger block scanning
       File scanLog = new File(MiniDFSCluster.getFinalizedDir(
-          MiniDFSCluster.getStorageDir(0, 0),
+          cluster.getInstanceStorageDir(0, 0),
           cluster.getNamesystem().getBlockPoolId()).getParent().toString()
           + "/../dncp_block_verification.log.prev");
       //wait for one minute for deletion to succeed;

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Fri Oct  7 15:31:14 2011
@@ -324,7 +324,7 @@ public class TestDataNodeVolumeFailure {
     final String bpid = cluster.getNamesystem().getBlockPoolId();
     for(int i=0; i<dn_num; i++) {
       for(int j=0; j<=1; j++) {
-        File storageDir = MiniDFSCluster.getStorageDir(i, j);
+        File storageDir = cluster.getInstanceStorageDir(i, j);
         File dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         if(dir == null) {
           System.out.println("dir is null for dn=" + i + " and data_dir=" + j);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java Fri Oct  7 15:31:14 2011
@@ -234,8 +234,8 @@ public class TestDataNodeVolumeFailureTo
     // Fail the current directory since invalid storage directory perms
     // get fixed up automatically on datanode startup.
     File[] dirs = {
-        new File(MiniDFSCluster.getStorageDir(dnIndex, 0), "current"),
-        new File(MiniDFSCluster.getStorageDir(dnIndex, 1), "current") };
+        new File(cluster.getInstanceStorageDir(dnIndex, 0), "current"),
+        new File(cluster.getInstanceStorageDir(dnIndex, 1), "current") };
 
     try {
       for (int i = 0; i < volumesFailed; i++) {
@@ -274,7 +274,7 @@ public class TestDataNodeVolumeFailureTo
     final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
     ).getDatanodeManager();
     long origCapacity = DFSTestUtil.getLiveDatanodeCapacity(dm);
-    File dir = new File(MiniDFSCluster.getStorageDir(0, 0), "current");
+    File dir = new File(cluster.getInstanceStorageDir(0, 0), "current");
 
     try {
       prepareDirToFail(dir);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java Fri Oct  7 15:31:14 2011
@@ -64,10 +64,10 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
 
-      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
-      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
-      File dn2StorageDir1 = MiniDFSCluster.getStorageDir(1, 0);
-      File dn2StorageDir2 = MiniDFSCluster.getStorageDir(1, 1);
+      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
+      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
+      File dn2StorageDir1 = cluster.getInstanceStorageDir(1, 0);
+      File dn2StorageDir2 = cluster.getInstanceStorageDir(1, 1);
 
       // Although namenode is shutdown, the bp offerservice is still running
       try {
@@ -171,8 +171,8 @@ public class TestDeleteBlockPool {
       String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
       String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
       
-      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
-      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
+      File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
+      File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
       
       Configuration nn1Conf = cluster.getConfiguration(0);
       nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java Fri Oct  7 15:31:14 2011
@@ -86,9 +86,9 @@ public class TestDiskError {
     cluster.waitActive();
     final int dnIndex = 0;
     String bpid = cluster.getNamesystem().getBlockPoolId();
-    File storageDir = MiniDFSCluster.getStorageDir(dnIndex, 0);
+    File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
     File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
-    storageDir = MiniDFSCluster.getStorageDir(dnIndex, 1);
+    storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
     File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
     try {
       // make the data directory of the first datanode to be readonly
@@ -155,9 +155,9 @@ public class TestDiskError {
 
     // the temporary block & meta files should be deleted
     String bpid = cluster.getNamesystem().getBlockPoolId();
-    File storageDir = MiniDFSCluster.getStorageDir(sndNode, 0);
+    File storageDir = cluster.getInstanceStorageDir(sndNode, 0);
     File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
-    storageDir = MiniDFSCluster.getStorageDir(sndNode, 1);
+    storageDir = cluster.getInstanceStorageDir(sndNode, 1);
     File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
     while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
       Thread.sleep(100);

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Fri Oct  7 15:31:14 2011
@@ -466,7 +466,7 @@ public class TestFsck extends TestCase {
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i=0; i<4; i++) {
         for (int j=0; j<=1; j++) {
-          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
           File[] blocks = data_dir.listFiles();
           if (blocks == null)

Modified: hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java?rev=1180078&r1=1180077&r2=1180078&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java (original)
+++ hadoop/common/branches/branch-0.23/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java Fri Oct  7 15:31:14 2011
@@ -80,7 +80,7 @@ public class TestListCorruptFileBlocks {
 
       // Now deliberately corrupt one block
       String bpid = cluster.getNamesystem().getBlockPoolId();
-      File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+      File storageDir = cluster.getInstanceStorageDir(0, 1);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();
@@ -163,7 +163,7 @@ public class TestListCorruptFileBlocks {
           + " corrupt files. Expecting None.", badFiles.size() == 0);
 
       // Now deliberately corrupt one block
-      File storageDir = MiniDFSCluster.getStorageDir(0, 0);
+      File storageDir = cluster.getInstanceStorageDir(0, 0);
       File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, 
           cluster.getNamesystem().getBlockPoolId());
       assertTrue("data directory does not exist", data_dir.exists());
@@ -284,7 +284,7 @@ public class TestListCorruptFileBlocks {
       String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i = 0; i < 4; i++) {
         for (int j = 0; j <= 1; j++) {
-          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
           File[] blocks = data_dir.listFiles();
           if (blocks == null)
@@ -391,7 +391,7 @@ public class TestListCorruptFileBlocks {
       String bpid = cluster.getNamesystem().getBlockPoolId();
       // For loop through number of datadirectories per datanode (2)
       for (int i = 0; i < 2; i++) {
-        File storageDir = MiniDFSCluster.getStorageDir(0, i);
+        File storageDir = cluster.getInstanceStorageDir(0, i);
         File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
         File[] blocks = data_dir.listFiles();
         if (blocks == null)
@@ -466,7 +466,7 @@ public class TestListCorruptFileBlocks {
       final String bpid = cluster.getNamesystem().getBlockPoolId();
       for (int i=0; i<4; i++) {
         for (int j=0; j<=1; j++) {
-          File storageDir = MiniDFSCluster.getStorageDir(i, j);
+          File storageDir = cluster.getInstanceStorageDir(i, j);
           File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
           LOG.info("Removing files from " + data_dir);
           File[] blocks = data_dir.listFiles();