You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by to...@apache.org on 2007/04/06 23:09:29 UTC

svn commit: r526281 - in /lucene/hadoop/trunk: ./ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/

Author: tomwhite
Date: Fri Apr  6 14:09:28 2007
New Revision: 526281

URL: http://svn.apache.org/viewvc?view=rev&rev=526281
Log:
HADOOP-1203.  Change UpgradeUtilities used by DFS tests to use MiniDFSCluster to start and stop NameNode/DataNodes.  Contributed by Nigel Daley.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Fri Apr  6 14:09:28 2007
@@ -115,6 +115,10 @@
 36. HADOOP-988.  Change namenode to use a single map of blocks to metadata.
     (Raghu Angadi via tomwhite)
 
+37. HADOOP-1203.  Change UpgradeUtilities used by DFS tests to use
+    MiniDFSCluster to start and stop NameNode/DataNodes.
+    (Nigel Daley via tomwhite)
+
 
 Release 0.12.3 - 2007-04-06
 

Modified: lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java (original)
+++ lucene/hadoop/trunk/src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java Fri Apr  6 14:09:28 2007
@@ -109,7 +109,7 @@
         Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
       }
       
-      cluster = new MiniDFSCluster(65312, conf, 2, false);
+      cluster = new MiniDFSCluster(conf, 2, true, null);
       fs = cluster.getFileSystem();
       parentdir = new Path("/hbase");
       fs.mkdirs(parentdir);

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DataNode.java Fri Apr  6 14:09:28 2007
@@ -123,6 +123,7 @@
     private DataNodeMetrics myMetrics = new DataNodeMetrics();
     private static InetSocketAddress nameNodeAddr;
     private static DataNode datanodeObject = null;
+    private static Thread dataNodeThread = null;
     String machineName;
 
     private class DataNodeMetrics implements Updater {
@@ -187,15 +188,6 @@
         blocksRemoved += nblocks;
       }
     }
-
-    /**
-     * @deprecated
-     * TODO: only MiniDFSCluster needs it, should be removed
-     */
-    DataNode( Configuration conf, String networkLoc, String[] dataDirs ) throws IOException {
-      // networkLoc is ignored since it is already in the conf
-      this( conf, Storage.makeListOfFiles( dataDirs ) );
-    }
     
     /**
      * Create the DataNode given a configuration and an array of dataDirs.
@@ -389,6 +381,13 @@
           } catch (IOException ie) {
           }
         }
+        if (dataNodeThread != null) {
+          dataNodeThread.interrupt();
+          try {
+            dataNodeThread.join();
+          } catch (InterruptedException ie) {
+          }
+        }
     }
 
     void handleDiskError( String errMsgr ) {
@@ -1112,36 +1111,20 @@
         
         LOG.info("Finishing DataNode in: "+data);
     }
-
-    private static ArrayList<DataNode> dataNodeList = new ArrayList<DataNode>();
-    private static ArrayList<Thread> dataNodeThreadList = new ArrayList<Thread>();
     
     /** Start datanode daemon.
      */
-    public static void run(Configuration conf) throws IOException {
+    public static DataNode run(Configuration conf) throws IOException {
         String[] dataDirs = conf.getStrings("dfs.data.dir");
         DataNode dn = makeInstance(dataDirs, conf);
         if (dn != null) {
-          dataNodeList.add(dn);
-          Thread t = new Thread(dn, "DataNode: [" +
+          dataNodeThread = new Thread(dn, "DataNode: [" +
               StringUtils.arrayToString(dataDirs) + "]");
-          t.setDaemon(true); // needed for JUnit testing
-          t.start();
-          dataNodeThreadList.add(t);
+          dataNodeThread.setDaemon(true); // needed for JUnit testing
+          dataNodeThread.start();
         }
+        return dn;
     }
-    
-  /**
-   * Shut down all datanodes that where started via the 
-   * run(conf,networkLoc) method.
-   * Returns only after shutdown is complete.
-   */
-  public static void shutdownAll(){
-    while (!dataNodeList.isEmpty()) {
-      dataNodeList.remove(0).shutdown();
-      dataNodeThreadList.remove(0).interrupt();
-    }
-  }
 
   /** Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
@@ -1154,15 +1137,13 @@
       printUsage();
       return null;
     }
-    run(conf);
-    return (DataNode)dataNodeList.get(0);
+    return run(conf);
   }
 
   void join() {
-    if (dataNodeThreadList.size() > 0) {
-      Thread t = (Thread) dataNodeThreadList.remove(dataNodeThreadList.size()-1);
+    if (dataNodeThread != null) {
       try {
-        t.join();
+        dataNodeThread.join();
       } catch (InterruptedException e) {}
     }
   }

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Fri Apr  6 14:09:28 2007
@@ -816,7 +816,7 @@
     }
 
     static NameNode createNameNode( String argv[], 
-                                    Configuration conf ) throws Exception {
+                                    Configuration conf ) throws IOException {
       if( conf == null )
         conf = new Configuration();
       StartupOption startOpt = parseArguments( argv, conf );

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/Storage.java Fri Apr  6 14:09:28 2007
@@ -523,16 +523,4 @@
            + "-" + Integer.toString( storage.getLayoutVersion() )
            + "-" + Long.toString( storage.getCTime() );
   }
-  
-  /**
-   * @deprecated
-   * Provides conversion for deprecated DataNode constructor, should be removed
-   */
-  static AbstractList<File> makeListOfFiles( String[] dirs ) {
-    AbstractList<File> list = new ArrayList<File>( dirs.length );
-    for (int idx = 0; idx < dirs.length; idx++) {
-      list.add(new File(dirs[idx]));
-    }
-    return list;
-  }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Fri Apr  6 14:09:28 2007
@@ -19,389 +19,275 @@
 
 import java.io.*;
 import java.net.*;
+import java.util.ArrayList;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.fs.*;
-import org.apache.hadoop.net.NetworkTopology;
 
 /**
  * This class creates a single-process DFS cluster for junit testing.
- * One thread is created for each server.
  * The data directories for DFS are undering the testing directory.
- * @author Owen O'Malley
  */
 public class MiniDFSCluster {
 
   private Configuration conf;
-  int nDatanodes;
-  private Thread nameNodeThread;
-  private Thread dataNodeThreads[];
-  private NameNodeRunner nameNode;
-  private DataNodeRunner dataNodes[];
-
-  private int nameNodePort = 0;
-  private int nameNodeInfoPort = 0;
+  private NameNode nameNode;
+  private ArrayList<DataNode> dataNodes = new ArrayList<DataNode>();
+  private File base_dir;
+  private File data_dir;
+  
+  /**
+   * Modify the config and start up the servers with the given operation.
+   * Servers will be started on free ports.
+   * <p>
+   * The caller must manage the creation of NameNode and DataNode directories
+   * and have already set dfs.name.dir and dfs.data.dir in the given conf.
+   * 
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param operation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   */
+  public MiniDFSCluster(Configuration conf,
+                        int numDataNodes,
+                        StartupOption nameNodeOperation) throws IOException {
+    this(0, conf, numDataNodes, false, false, nameNodeOperation, null);
+  }
   
   /**
-   * An inner class that runs a name node.
+   * Modify the config and start up the servers.  The rpc and info ports for
+   * servers are guaranteed to use free ports.
+   * <p>
+   * NameNode and DataNode directory creation and configuration will be
+   * managed by this class.
+   *
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param racks array of strings indicating the rack that each DataNode is on
    */
-  class NameNodeRunner implements Runnable {
-    private NameNode node;
-    private volatile boolean isInitialized = false;
-    private boolean isCrashed = false;
-    private boolean isRunning = true;
+  public MiniDFSCluster(Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        String[] racks) throws IOException {
+    this(0, conf, numDataNodes, format, true, null, racks);
+  }
 
-    public InetSocketAddress getAddress() {
-      return node.getNameNodeAddress();
-    }
+  /**
+   * NOTE: if possible, the other constructors should be used as they will
+   * ensure that the servers use free ports.
+   * <p>
+   * Modify the config and start up the servers.  
+   * 
+   * @param nameNodePort suggestion for which rpc port to use.  caller should
+   *          use getNameNodePort() to get the actual port used.
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param manageDfsDirs if true, the data directories for servers will be
+   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   */
+  public MiniDFSCluster(int nameNodePort, 
+                        Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        boolean manageDfsDirs,
+                        StartupOption operation,
+                        String[] racks) throws IOException {
+    this.conf = conf;
+    base_dir = new File(System.getProperty("test.build.data"), "dfs/");
+    data_dir = new File(base_dir, "data");
     
-    synchronized public boolean isInitialized() {
-      return isInitialized;
+    // Setup the NameNode configuration
+    conf.set("fs.default.name", "localhost:"+ Integer.toString(nameNodePort));
+    conf.setInt("dfs.info.port", 0);
+    if (manageDfsDirs) {
+    conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
+        new File(base_dir, "name2").getPath());
     }
+    conf.setInt("dfs.replication", Math.min(3, numDataNodes));
+    conf.setInt("dfs.safemode.extension", 0);
     
-    synchronized public boolean isCrashed() {
-      return isCrashed;
-    }
-
-    public boolean isUp() {
-      if (node == null) {
-        return false;
-      }
-      try {
-        long[] sizes = node.getStats();
-        boolean isUp = false;
-        synchronized (this) {
-          isUp = (isInitialized && !node.isInSafeMode() && sizes[0] != 0);
+    // Format and clean out DataNode directories
+    if (format) {
+      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
+        throw new IOException("Cannot remove data directory: " + data_dir);
+      }
+      NameNode.format(conf); 
+    }
+    
+    // Start the NameNode
+    String[] args = (operation == null ||
+      operation == StartupOption.FORMAT ||
+      operation == StartupOption.REGULAR) ?
+        new String[] {} : new String[] {"-"+operation.toString()};
+    nameNode = NameNode.createNameNode(args, conf);
+    
+    // Start the DataNodes
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks);
+    
+    if (numDataNodes > 0) {
+      while (!isClusterUp()) {
+        try {
+          System.err.println("Waiting for the Mini HDFS Cluster to start...");
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
         }
-        return isUp;
-      } catch (IOException ie) {
-        return false;
-      }
-    }
-    
-    /**
-     * Create the name node and run it.
-     */
-    public void run() {
-      try {
-        synchronized( this ) {
-          if( isRunning ) {
-            node = new NameNode(conf);
-          }
-          isInitialized = true;
-        }
-      } catch (Throwable e) {
-        shutdown();
-        System.err.println("Name node crashed:");
-        e.printStackTrace();
-        synchronized (this) {
-          isCrashed = true;
-        }
-      }
-    }
-    
-    /**
-     * Shutdown the name node and wait for it to finish.
-     */
-    public synchronized void shutdown() {
-      isRunning = false;
-      if (node != null) {
-        node.stop();
-        node.join();
-        node = null;
       }
     }
   }
   
   /**
-   * An inner class to run the data node.
-   */
-  class DataNodeRunner implements Runnable {
-    private DataNode node;
-    Configuration conf = null;
-    private boolean isRunning = true;
-    
-    public DataNodeRunner(Configuration conf, File dataDir, int index) {
-      this.conf = new Configuration(conf);
-      this.conf.set("dfs.data.dir",
-          new File(dataDir, "data"+(2*index+1)).getPath()+","+
-          new File(dataDir, "data"+(2*index+2)).getPath());
-    }
-    
-    public DataNodeRunner(Configuration conf, File dataDir, 
-        String networkLoc, int index) {
-        this(conf, dataDir, index);
-        this.conf.set("dfs.datanode.rack", networkLoc);
+   * Modify the config and start up the DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  public void startDataNodes(Configuration conf, int numDataNodes, 
+                             boolean manageDfsDirs, StartupOption operation, 
+                             String[] racks) throws IOException {
+    if (nameNode == null) {
+      throw new IllegalStateException("NameNode is not running");
     }
+
+    // Set up the right ports for the datanodes
+    conf.setInt("dfs.datanode.info.port", 0);
+    InetSocketAddress nnAddr = nameNode.getNameNodeAddress(); 
+    int nameNodePort = nnAddr.getPort(); 
+    conf.set("fs.default.name", 
+      nnAddr.getHostName()+ ":" + Integer.toString(nameNodePort));
+    
+    String[] args = (operation == null ||
+      operation == StartupOption.FORMAT ||
+      operation == StartupOption.REGULAR) ?
+        new String[] {} : new String[] {"-"+operation.toString()};
         
-    /**
-     * Create and run the data node.
-     */
-    public void run() {
-      try {
-        String[] dirs = conf.getStrings("dfs.data.dir");
-        for (int idx = 0; idx < dirs.length; idx++) {
-          File dataDir = new File(dirs[idx]);
-          synchronized (DataNodeRunner.class) {
-            if (!dataDir.mkdirs()) {
-              if (!dataDir.isDirectory()) {
-                throw new RuntimeException("Mkdirs failed to create directory " +
-                    dataDir.toString());
-              }
-            }
-          }
-        }
-        synchronized (this){
-          if (isRunning) {
-            node = new DataNode(conf, conf.get("dfs.datanode.rack", 
-                NetworkTopology.DEFAULT_RACK), dirs);
-          }
+    for (int i = 0; i < numDataNodes; i++) {
+      Configuration dnConf = new Configuration(conf);
+      if (manageDfsDirs) {
+        File dir1 = new File(data_dir, "data"+(2*i+1));
+        File dir2 = new File(data_dir, "data"+(2*i+2));
+        dir1.mkdirs();
+        dir2.mkdirs();
+        if (!dir1.isDirectory() || !dir2.isDirectory()) { 
+          throw new IOException("Mkdirs failed to create directory for DataNode "
+            + i + ": " + dir1 + " or " + dir2);
         }
-        node.run();
-      } catch (Throwable e) {
-        shutdown();
-        System.err.println("Data node crashed:");
-        e.printStackTrace();
+        dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 
       }
-    }
-
-    /**    
-     * Shut down the server and wait for it to finish.
-     */
-    public synchronized void shutdown() {
-      isRunning = false;
-      if (node != null) {
-        node.shutdown();
-        node = null;
+      if (racks != null && i < racks.length) {
+        dnConf.set("dfs.datanode.rack", racks[i]);
       }
+      System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
+        + dnConf.get("dfs.data.dir"));
+      dataNodes.add(DataNode.createDataNode(args, dnConf));
     }
   }
-
-  public MiniDFSCluster(Configuration conf,
-          int nDatanodes,
-          boolean formatNamenode,
-          String[] racks) throws IOException {
-    this(0, conf, nDatanodes, formatNamenode, racks);
-  }
   
   /**
-   * Create the config and start up the servers.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @param dataNodeFirst should the datanode be brought up before the namenode?
-   * @deprecated use {@link #MiniDFSCluster(Configuration, int, boolean, String[])}
-   */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        boolean dataNodeFirst) throws IOException {
-    this(namenodePort, conf, 1, dataNodeFirst, true, null);
+   * If the NameNode is running, attempt to finalize a previous upgrade.
+   * When this method return, the NameNode should be finalized, but
+   * DataNodes may not be since that occurs asynchronously.
+   *
+   * @throw IllegalStateException if the Namenode is not running.
+   */
+  public void finalizeCluster(Configuration conf) throws Exception {
+    if (nameNode == null) {
+      throw new IllegalStateException("Attempting to finalize "
+        + "Namenode but it is not running");
+    }
+    new DFSAdmin().doMain(conf, new String[] {"-finalizeUpgrade"});
   }
-
+  
   /**
-   * Create the config and start up the only the namenode.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @deprecated use {@link #MiniDFSCluster(Configuration, int, boolean, String[])}                     
+   * Gets the started NameNode.  May be null.
    */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        int numRetries,
-                        int numRetriesPerPort) throws IOException {
-    this(namenodePort, conf, 0, false, false, null);  
+  public NameNode getNameNode() {
+    return nameNode;
   }
-
+  
   /**
-   * Create the config and start up the servers.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @param nDatanodes Number of datanodes   
-   * @param dataNodeFirst should the datanode be brought up before the namenode?
-   * @deprecated use {@link #MiniDFSCluster(Configuration, int, boolean, String[])}
+   * Gets a list of the started DataNodes.  May be empty.
    */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        int nDatanodes,
-                        boolean dataNodeFirst) throws IOException {
-    this(namenodePort, conf, nDatanodes, dataNodeFirst, true, null);
+  public ArrayList<DataNode> getDataNodes() {
+    return dataNodes;
   }
   
   /**
-   * Create the config and start up the servers.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @param nDatanodes Number of datanodes   
-   * @param dataNodeFirst should the datanode be brought up before the namenode?
-   * @param formatNamenode should the namenode be formatted before starting up ?
-   * @deprecated use {@link #MiniDFSCluster(Configuration, int, boolean, String[])}
-   */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        int nDatanodes,
-                        boolean dataNodeFirst,
-                        boolean formatNamenode ) throws IOException {
-    this(namenodePort, conf, nDatanodes, dataNodeFirst, formatNamenode, null);
+   * Gets the rpc port used by the NameNode, because the caller 
+   * supplied port is not necessarily the actual port used.
+   */     
+  public int getNameNodePort() {
+    return nameNode.getNameNodeAddress().getPort();
   }
-
+    
   /**
-   * Create the config and start up the servers.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @param nDatanodes Number of datanodes   
-   * @param dataNodeFirst should the datanode be brought up before the namenode?
-   * @param formatNamenode should the namenode be formatted before starting up ?
-   * @param racks array of strings indicating racks that each datanode is on
-   * @deprecated use {@link #MiniDFSCluster(Configuration, int, String[])}
+   * Shut down the servers that are up.
    */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        int nDatanodes,
-                        boolean dataNodeFirst,
-                        boolean formatNamenode,
-                        String[] racks) throws IOException {
-    this(namenodePort, conf, nDatanodes, 
-        ! cannotStartDataNodeFirst(dataNodeFirst) &&  
-        formatNamenode, racks);
+  public void shutdown() {
+    System.out.println("Shutting down the Mini HDFS Cluster");
+    shutdownDataNodes();
+    if (nameNode != null) {
+      nameNode.stop();
+      nameNode.join();
+      nameNode = null;
+    }
   }
-
+  
   /**
-   * NameNode should be always started first.
-   * Data-nodes need to handshake with the name-node before they can start.
-   * 
-   * @param dataNodeFirst should the datanode be brought up before the namenode?
-   * @return false if dataNodeFirst is false
-   * @throws IOException if dataNodeFirst is true
-   * 
-   * @deprecated should be removed when dataNodeFirst is gone.
+   * Shutdown all DataNodes started by this class.  The NameNode
+   * is left running so that new DataNodes may be started.
    */
-  private static boolean cannotStartDataNodeFirst( boolean dataNodeFirst 
-                                                  ) throws IOException {
-    if( dataNodeFirst )
-      throw new IOException( "NameNode should be always started first." );
-    return false;
+  public void shutdownDataNodes() {
+    for (int i = dataNodes.size()-1; i >= 0; i--) {
+      System.out.println("Shutting down DataNode " + i);
+      DataNode dn = dataNodes.remove(i);
+      dn.shutdown();
+    }
   }
-
+  
   /**
-   * Create the config and start up the servers.  If either the rpc or info port is already 
-   * in use, we will try new ports.
-   * @param namenodePort suggestion for which rpc port to use.  caller should use 
-   *                     getNameNodePort() to get the actual port used.
-   * @param nDatanodes Number of datanodes   
-   * @param formatNamenode should the namenode be formatted before starting up ?
-   * @param racks array of strings indicating racks that each datanode is on
+   * Returns true if the NameNode is running and is out of Safe Mode.
    */
-  public MiniDFSCluster(int namenodePort, 
-                        Configuration conf,
-                        int nDatanodes,
-                        boolean formatNamenode,
-                        String[] racks) throws IOException {
-    this.conf = conf;
-    
-    this.nDatanodes = nDatanodes;
-    this.nameNodePort = namenodePort;
-
-    this.conf.set("fs.default.name", "localhost:"+ Integer.toString(nameNodePort));
-    this.conf.setInt("dfs.info.port", nameNodeInfoPort);
-    this.conf.setInt("dfs.datanode.info.port", 0);
-
-    File base_dir = new File(System.getProperty("test.build.data"), "dfs/");
-    File data_dir = new File(base_dir, "data");
-    this.conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
-        new File(base_dir, "name2").getPath());
-    this.conf.setInt("dfs.replication", Math.min(3, nDatanodes));
-    this.conf.setInt("dfs.safemode.extension", 0);
-
-    // Create the NameNode
-    StartupOption startOpt = 
-      formatNamenode ? StartupOption.FORMAT : StartupOption.REGULAR;
-    conf.setObject( "dfs.namenode.startup", startOpt );
-    conf.setObject( "dfs.datanode.startup", startOpt );
-    nameNode = new NameNodeRunner();
-    nameNodeThread = new Thread(nameNode);
-
-    //
-    // Start the MiniDFSCluster
-    //
-    // Start the namenode and wait for it to be initialized
-    nameNodeThread.start();
-    while (!nameNode.isCrashed() && !nameNode.isInitialized()) {
-      try {                                     // let daemons get started
-        System.err.println("Waiting for the NameNode to initialize...");
-        Thread.sleep(1000);
-      } catch(InterruptedException e) {
-      }
-      if (nameNode.isCrashed()) {
-        throw new RuntimeException("Namenode crashed");
-      }
+  public boolean isClusterUp() {
+    if (nameNode == null) {
+      return false;
     }
-    
-    // Set up the right ports for the datanodes
-    InetSocketAddress nnAddr = nameNode.getAddress(); 
-    nameNodePort = nnAddr.getPort(); 
-    this.conf.set("fs.default.name", nnAddr.getHostName()+ ":" + Integer.toString(nameNodePort));
-    
-    // Start the datanodes
-    startDataNodes(conf, racks, data_dir);
-    
-    while (!nameNode.isCrashed() && !nameNode.isUp()) {
-      try {                                     // let daemons get started
-        System.err.println("Waiting for the Mini HDFS Cluster to start...");
-        Thread.sleep(1000);
-      } catch(InterruptedException e) {
-      }
-    }
-    
-    if (nameNode.isCrashed()) {
-      throw new RuntimeException("Namenode crashed");
-    }
-  }
-
-  private void startDataNodes(Configuration conf, String[] racks, File data_dir) {
-    // Create the DataNodes & start them
-    dataNodes = new DataNodeRunner[nDatanodes];
-    dataNodeThreads = new Thread[nDatanodes];
-    for (int idx = 0; idx < nDatanodes; idx++) {
-      if( racks == null || idx >= racks.length) {
-        dataNodes[idx] = new DataNodeRunner(conf, data_dir, idx);
-      } else {
-        dataNodes[idx] = new DataNodeRunner(conf, data_dir, racks[idx], idx);          
-      }
-      dataNodeThreads[idx] = new Thread(dataNodes[idx]);
-      dataNodeThreads[idx].start();
+    try {
+      long[] sizes = nameNode.getStats();
+      boolean isUp = false;
+      synchronized (this) {
+        isUp = (!nameNode.isInSafeMode() && sizes[0] != 0);
+      }
+      return isUp;
+    } catch (IOException ie) {
+      return false;
     }
   }
   
   /**
-   * Returns the rpc port used by the mini cluster, because the caller supplied port is 
-   * not necessarily the actual port used.
-   */     
-  public int getNameNodePort() {
-    return nameNode.getAddress().getPort();
-  }
-    
-  /**
-   * Shut down the servers.
+   * Returns true if there is at least one DataNode running.
    */
-  public void shutdown() {
-    System.out.println("Shutting down the cluster");
-    for (int idx = 0; idx < nDatanodes; idx++) {
-      dataNodes[idx].shutdown();
-    }
-    nameNode.shutdown();
-    for (int idx = 0; idx < nDatanodes; idx++) {
-      try {
-        dataNodeThreads[idx].join();
-      } catch(InterruptedException e) {
-      }
-    }
-    try {
-      nameNodeThread.join();
-    } catch (InterruptedException e) {
+  public boolean isDataNodeUp() {
+    if (dataNodes == null || dataNodes.size() == 0) {
+      return false;
     }
+    return true;
   }
   
   /**
@@ -412,14 +298,14 @@
   }
 
   /**
-   * Get the directories where the namenode stores image
+   * Get the directories where the namenode stores its state.
    */
   public File[] getNameDirs() {
     return NameNode.getDirs(conf);
   }
 
    /**
-   * Wait till the cluster is active and running.
+   * Wait until the cluster is active and running.
    */
   public void waitActive() throws IOException {
     InetSocketAddress addr = new InetSocketAddress("localhost",
@@ -430,7 +316,7 @@
     // get initial state of datanodes
     //  
     DatanodeInfo[] oldinfo = client.datanodeReport();
-    while (oldinfo.length != nDatanodes) {
+    while (oldinfo.length != dataNodes.size()) {
       try {
         Thread.sleep(500);
       } catch (Exception e) {
@@ -448,7 +334,7 @@
       } catch (Exception e) {
       }
       DatanodeInfo[] info = client.datanodeReport();
-      if (info.length != nDatanodes) {
+      if (info.length != dataNodes.size()) {
         continue;
       }
       numdead = 0;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java Fri Apr  6 14:09:28 2007
@@ -32,16 +32,15 @@
 /**
  * This test ensures the appropriate response from the system when 
  * the system is finalized.
- *
- * @author Nigel Daley
  */
 public class TestDFSFinalize extends TestCase {
  
   private static final Log LOG = LogFactory.getLog(
     "org.apache.hadoop.dfs.TestDFSFinalize");
-  Configuration conf;
+  private Configuration conf;
   private int testCounter = 0;
-  
+  private MiniDFSCluster cluster = null;
+    
   /**
    * Writes an INFO log message containing the parameters.
    */
@@ -94,22 +93,27 @@
       log("Finalize with existing previous dir",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      UpgradeUtilities.startCluster(DATA_NODE,StartupOption.REGULAR,conf);
-      UpgradeUtilities.finalizeCluster(conf);
+      cluster = new MiniDFSCluster(conf,1,StartupOption.REGULAR);
+      cluster.finalizeCluster(conf);
       checkResult(nameNodeDirs, dataNodeDirs);
-      
+
       log("Finalize without existing previous dir",numDirs);
-      UpgradeUtilities.finalizeCluster(conf);
+      cluster.finalizeCluster(conf);
       checkResult(nameNodeDirs, dataNodeDirs);
-      UpgradeUtilities.stopCluster(null);
+
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
     } // end numDir loop
   }
  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
   public static void main(String[] args) throws Exception {
     new TestDFSFinalize().testFinalize();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java Fri Apr  6 14:09:28 2007
@@ -27,21 +27,20 @@
 import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileUtil;
 
 /**
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is rolled back under various storage state and
 * version conditions.
-*
-* @author Nigel Daley
 */
 public class TestDFSRollback extends TestCase {
  
   private static final Log LOG = LogFactory.getLog(
     "org.apache.hadoop.dfs.TestDFSRollback");
-  Configuration conf;
+  private Configuration conf;
   private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
   
   /**
    * Writes an INFO log message containing the parameters.
@@ -83,28 +82,29 @@
   }
  
   /**
-   * Starts the given nodeType with the given operation.  The remaining 
-   * parameters are used to verify the expected result.
-   * 
-   * @param nodeType must not be null
+   * Attempts to start a NameNode with the given operation.  Starting
+   * the NameNode should throw an exception.
    */
-  void runTest(NodeType nodeType, StartupOption operation, boolean shouldStart) 
-    throws Exception 
-  {
-    if (shouldStart) {
-      UpgradeUtilities.startCluster(nodeType, operation, conf);
-      UpgradeUtilities.stopCluster(nodeType);
-    } else {
-      try {
-        UpgradeUtilities.startCluster(nodeType, operation, conf); // should fail
-        throw new AssertionError("Cluster should have failed to start");
-      } catch (Exception expected) {
-        // expected
-        //expected.printStackTrace();
-        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
-      } finally {
-        UpgradeUtilities.stopCluster(nodeType);
-      }
+  void startNameNodeShouldFail(StartupOption operation) {
+    try {
+      cluster = new MiniDFSCluster(conf,0,operation); // should fail
+      throw new AssertionError("NameNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+    }
+  }
+  
+  /**
+   * Attempts to start a DataNode with the given operation.  Starting
+   * the DataNode should throw an exception.
+   */
+  void startDataNodeShouldFail(StartupOption operation) {
+    try {
+      cluster.startDataNodes(conf,1,false,operation,null); // should fail
+      throw new AssertionError("DataNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+      assertFalse(cluster.isDataNodeUp());
     }
   }
  
@@ -124,63 +124,64 @@
       log("Normal NameNode rollback",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      runTest(NAME_NODE, StartupOption.ROLLBACK, true);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
       checkResult(NAME_NODE, nameNodeDirs);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("Normal DataNode rollback",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      runTest(DATA_NODE, StartupOption.ROLLBACK, true);
+      cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
       checkResult(DATA_NODE, dataNodeDirs);
-      UpgradeUtilities.stopCluster(null);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
       log("NameNode rollback without existing previous dir",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("DataNode rollback without existing previous dir",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      runTest(DATA_NODE, StartupOption.ROLLBACK, true);
-      UpgradeUtilities.stopCluster(null);
+      cluster.startDataNodes(conf,1,false,StartupOption.ROLLBACK,null);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
       log("DataNode rollback with future stored layout version in previous",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
         new StorageInfo(Integer.MIN_VALUE,
-                        UpgradeUtilities.getCurrentNamespaceID(),
-                        UpgradeUtilities.getCurrentFsscTime()));
-      runTest(DATA_NODE, StartupOption.ROLLBACK, false);
-      UpgradeUtilities.stopCluster(null);
+                        UpgradeUtilities.getCurrentNamespaceID(cluster),
+                        UpgradeUtilities.getCurrentFsscTime(cluster)));
+      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
       log("DataNode rollback with newer fsscTime in previous",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.ROLLBACK);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
-                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentNamespaceID(cluster),
                         Long.MAX_VALUE));
-      runTest(DATA_NODE, StartupOption.ROLLBACK, false);
-      UpgradeUtilities.stopCluster(null);
+      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
@@ -188,18 +189,18 @@
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       for (File f : baseDirs) { 
-        UpgradeUtilities.remove(new File(f,"edits"));
+        FileUtil.fullyDelete(new File(f,"edits"));
       }
-      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with no image file",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       for (File f : baseDirs) { 
-        UpgradeUtilities.remove(new File(f,"fsimage")); 
+        FileUtil.fullyDelete(new File(f,"fsimage")); 
       }
-      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with corrupt version file",numDirs);
@@ -208,7 +209,7 @@
       for (File f : baseDirs) { 
         UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
       }
-      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode rollback with old layout version in previous",numDirs);
@@ -216,13 +217,18 @@
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
         new StorageInfo(1,
-                        UpgradeUtilities.getCurrentNamespaceID(),
-                        UpgradeUtilities.getCurrentFsscTime()));
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+                        UpgradeUtilities.getCurrentNamespaceID(null),
+                        UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     } // end numDir loop
   }
  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
   public static void main(String[] args) throws Exception {
     new TestDFSRollback().testRollback();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java Fri Apr  6 14:09:28 2007
@@ -31,8 +31,6 @@
 /**
  * This test ensures the appropriate response (successful or failure) from 
  * a Datanode when the system is started with differing version combinations. 
- * 
- * @author Nigel Daley
  */
 public class TestDFSStartupVersions extends TestCase {
   
@@ -40,6 +38,7 @@
     "org.apache.hadoop.dfs.TestDFSStartupVersions");
   private static Path TEST_ROOT_DIR = new Path(
     System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+  private MiniDFSCluster cluster = null;
   
   /**
    * Writes an INFO log message containing the parameters.
@@ -69,10 +68,10 @@
     int layoutVersionOld = -3;
     int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
     int layoutVersionNew = Integer.MIN_VALUE;
-    int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID();
+    int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
     int namespaceIdOld = Integer.MIN_VALUE;
     long fsscTimeOld = Long.MIN_VALUE;
-    long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime();
+    long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
     long fsscTimeNew = Long.MAX_VALUE;
     
     return new StorageInfo[] {
@@ -170,34 +169,35 @@
     StorageInfo[] versions = initializeVersions();
     UpgradeUtilities.createStorageDirs(
       NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
-    UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
+    cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
     StorageInfo nameNodeVersion = new StorageInfo(
       UpgradeUtilities.getCurrentLayoutVersion(),
-      UpgradeUtilities.getCurrentNamespaceID(),
-      UpgradeUtilities.getCurrentFsscTime());
+      UpgradeUtilities.getCurrentNamespaceID(cluster),
+      UpgradeUtilities.getCurrentFsscTime(cluster));
     log("NameNode version info",NAME_NODE,null,nameNodeVersion);
-    try {
-      for (int i = 0; i < versions.length; i++) {
-        File[] storage = UpgradeUtilities.createStorageDirs(
-          DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
-        log("DataNode version info",DATA_NODE,i,versions[i]);
-        UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
-        try {
-          UpgradeUtilities.startCluster(DATA_NODE,StartupOption.REGULAR,conf);
-        } catch (Exception ignore) {
-          // Ignore.  The asserts below will check for problems.
-          // ignore.printStackTrace();
-        }
-        assertTrue(UpgradeUtilities.isNodeRunning(NAME_NODE));
-        assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
-          UpgradeUtilities.isNodeRunning(DATA_NODE));
-        UpgradeUtilities.stopCluster(DATA_NODE);
+    for (int i = 0; i < versions.length; i++) {
+      File[] storage = UpgradeUtilities.createStorageDirs(
+        DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+      log("DataNode version info",DATA_NODE,i,versions[i]);
+      UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
+      try {
+        cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+      } catch (Exception ignore) {
+        // Ignore.  The asserts below will check for problems.
+        // ignore.printStackTrace();
       }
-    } finally {
-      UpgradeUtilities.stopCluster(null);
+      assertTrue(cluster.getNameNode() != null);
+      assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
+        cluster.isDataNodeUp());
+      cluster.shutdownDataNodes();
     }
   }
-
+  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
   public static void main(String[] args) throws Exception {
     new TestDFSStartupVersions().testVersions();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java Fri Apr  6 14:09:28 2007
@@ -33,15 +33,14 @@
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is started under various storage state and
 * version conditions.
-*
-* @author Nigel Daley
 */
 public class TestDFSStorageStateRecovery extends TestCase {
  
   private static final Log LOG = LogFactory.getLog(
     "org.apache.hadoop.dfs.TestDFSStorageStateRecovery");
-  Configuration conf;
+  private Configuration conf;
   private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
   
   /**
    * The test case table.  Each row represents a test case.  This table is
@@ -172,30 +171,6 @@
   }
  
   /**
-   * Does a regular start of the given nodeType.
-   * 
-   * @param nodeType must not be null
-   * @param indicates whether or not the node should start
-   */
-  void runTest(NodeType nodeType, boolean shouldStart) throws Exception {
-    if (shouldStart) {
-      UpgradeUtilities.startCluster(nodeType, StartupOption.REGULAR, conf);
-      UpgradeUtilities.stopCluster(nodeType);
-    } else {
-      try {
-        UpgradeUtilities.startCluster(nodeType, StartupOption.REGULAR, conf); // should fail
-        throw new AssertionError("Cluster should have failed to start");
-      } catch (Exception expected) {
-        // expected
-        //expected.printStackTrace();
-        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
-      } finally {
-        UpgradeUtilities.stopCluster(nodeType);
-      }
-    }
-  }
- 
-  /**
    * This test iterates over the testCases table and attempts
    * to startup the NameNode and DataNode normally.
    */
@@ -213,29 +188,49 @@
 
         log("NAME_NODE recovery",numDirs,i,testCase);
         baseDirs = createStorageState(NAME_NODE, testCase);
-        runTest(NAME_NODE, shouldRecover);
         if (shouldRecover) {
+          cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
           checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+          cluster.shutdown();
+        } else {
+          try {
+            cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
+            throw new AssertionError("NameNode should have failed to start");
+          } catch (Exception expected) {
+            // expected
+          }
         }
         
         log("DATA_NODE recovery",numDirs,i,testCase);
         createStorageState(NAME_NODE, new boolean[] {true,true,false,false});
-        UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
+        cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
         baseDirs = createStorageState(DATA_NODE, testCase);
         if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
           // DataNode will create and format current if no directories exist
-          runTest(DATA_NODE, true);
+          cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
         } else {
-          runTest(DATA_NODE, shouldRecover);
           if (shouldRecover) {
+            cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
             checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+          } else {
+            try {
+              cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+              throw new AssertionError("DataNode should have failed to start");
+            } catch (Exception expected) {
+              // expected
+            }
           }
         }
-        UpgradeUtilities.stopCluster(null);
+        cluster.shutdown();
       } // end testCases loop
     } // end numDirs loop
   }
  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
   public static void main(String[] args) throws Exception {
     new TestDFSStorageStateRecovery().testStorageStates();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java Fri Apr  6 14:09:28 2007
@@ -27,22 +27,21 @@
 import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
 import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
 import org.apache.hadoop.dfs.FSConstants.StartupOption;
-import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileUtil;
 
 /**
 * This test ensures the appropriate response (successful or failure) from
 * the system when the system is upgraded under various storage state and
 * version conditions.
-*
-* @author Nigel Daley
 */
 public class TestDFSUpgrade extends TestCase {
  
   private static final Log LOG = LogFactory.getLog(
     "org.apache.hadoop.dfs.TestDFSUpgrade");
-  Configuration conf;
+  private Configuration conf;
   private int testCounter = 0;
-  
+  private MiniDFSCluster cluster = null;
+    
   /**
    * Writes an INFO log message containing the parameters.
    */
@@ -89,28 +88,29 @@
   }
  
   /**
-   * Starts the given nodeType with the given operation.  The remaining 
-   * parameters are used to verify the expected result.
-   * 
-   * @param nodeType must not be null
+   * Attempts to start a NameNode with the given operation.  Starting
+   * the NameNode should throw an exception.
    */
-  void runTest(NodeType nodeType, StartupOption operation, boolean shouldStart) 
-    throws Exception 
-  {
-    if (shouldStart) {
-      UpgradeUtilities.startCluster(nodeType, operation, conf);
-      UpgradeUtilities.stopCluster(nodeType);
-    } else {
-      try {
-        UpgradeUtilities.startCluster(nodeType, operation, conf); // should fail
-        throw new AssertionError("Cluster should have failed to start");
-      } catch (Exception expected) {
-        // expected
-        //expected.printStackTrace();
-        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
-      } finally {
-        UpgradeUtilities.stopCluster(nodeType);
-      }
+  void startNameNodeShouldFail(StartupOption operation) {
+    try {
+      cluster = new MiniDFSCluster(conf,0,operation); // should fail
+      throw new AssertionError("NameNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+    }
+  }
+  
+  /**
+   * Attempts to start a DataNode with the given operation.  Starting
+   * the DataNode should throw an exception.
+   */
+  void startDataNodeShouldFail(StartupOption operation) {
+    try {
+      cluster.startDataNodes(conf,1,false,operation,null); // should fail
+      throw new AssertionError("DataNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+      assertFalse(cluster.isDataNodeUp());
     }
   }
  
@@ -129,77 +129,78 @@
       
       log("Normal NameNode upgrade",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      runTest(NAME_NODE, StartupOption.UPGRADE, true);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       checkResult(NAME_NODE, nameNodeDirs);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("Normal DataNode upgrade",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      runTest(DATA_NODE, StartupOption.REGULAR, true);
+      cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
       checkResult(DATA_NODE, dataNodeDirs);
-      UpgradeUtilities.stopCluster(null);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
       log("NameNode upgrade with existing previous dir",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("DataNode upgrade with existing previous dir",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      runTest(DATA_NODE, StartupOption.REGULAR, true);
+      cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
       checkResult(DATA_NODE, dataNodeDirs);
-      UpgradeUtilities.stopCluster(null);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
       log("DataNode upgrade with future stored layout version in current",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
         new StorageInfo(Integer.MIN_VALUE,
-                        UpgradeUtilities.getCurrentNamespaceID(),
-                        UpgradeUtilities.getCurrentFsscTime()));
-      runTest(DATA_NODE, StartupOption.REGULAR, false);
-      UpgradeUtilities.stopCluster(null);
+                        UpgradeUtilities.getCurrentNamespaceID(cluster),
+                        UpgradeUtilities.getCurrentFsscTime(cluster)));
+      startDataNodeShouldFail(StartupOption.REGULAR);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
       log("DataNode upgrade with newer fsscTime in current",numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
-                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentNamespaceID(cluster),
                         Long.MAX_VALUE));
-      runTest(DATA_NODE, StartupOption.REGULAR, false);
-      UpgradeUtilities.stopCluster(null);
+      startDataNodeShouldFail(StartupOption.REGULAR);
+      cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
       log("NameNode upgrade with no edits file",numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       for (File f : baseDirs) { 
-        UpgradeUtilities.remove(new File(f,"edits"));
+        FileUtil.fullyDelete(new File(f,"edits"));
       }
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode upgrade with no image file",numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       for (File f : baseDirs) { 
-        UpgradeUtilities.remove(new File(f,"fsimage")); 
+        FileUtil.fullyDelete(new File(f,"fsimage")); 
       }
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode upgrade with corrupt version file",numDirs);
@@ -207,20 +208,25 @@
       for (File f : baseDirs) { 
         UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
       }
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("NameNode upgrade with future layout version in current",numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
         new StorageInfo(Integer.MIN_VALUE,
-                        UpgradeUtilities.getCurrentNamespaceID(),
-                        UpgradeUtilities.getCurrentFsscTime()));
-      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+                        UpgradeUtilities.getCurrentNamespaceID(null),
+                        UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
     } // end numDir loop
   }
  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+    
   public static void main(String[] args) throws Exception {
     new TestDFSUpgrade().testUpgrade();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=diff&rev=526281&r1=526280&r2=526281
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Fri Apr  6 14:09:28 2007
@@ -47,13 +47,9 @@
  * block files).  The master directories are lazily created.  They are then
  * copied by the createStorageDirs() method to create new storage
  * directories of the appropriate type (Namenode or Datanode).
- * 
- * @author Nigel Daley
  */
 public class UpgradeUtilities {
 
-  // The fs.default.name configuration host:port value for the Namenode
-  private static final String NAMENODE_HOST = "localhost:0";
   // Root scratch directory on local filesystem 
   private static File TEST_ROOT_DIR = new File(
     System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
@@ -69,25 +65,79 @@
   private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
   // A checksum of the contents in datanodeStorage directory
   private static long datanodeStorageChecksum;
-  // The NameNode started by this Utility class
-  private static NameNode namenode = null;
-  // The DataNode started by this Utility class
-  private static DataNode datanode = null;
   
   /**
    * Initialize the data structures used by this class.  
    * IMPORTANT NOTE: This method must be called once before calling 
    *                 any other public method on this class.  
+   * <p>
+   * Creates a singleton master populated storage
+   * directory for a Namenode (contains edits, fsimage,
+   * version, and time files) and a Datanode (contains version and
+   * block files).  This can be a lengthy operation.
    */
   public static void initialize() throws Exception {
     createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
-    initializeStorage();
+    Configuration config = new Configuration();
+    config.set("dfs.name.dir", namenodeStorage.toString());
+    config.set("dfs.data.dir", datanodeStorage.toString());
+    MiniDFSCluster cluster = null;
+    try {
+      // format data-node
+      createEmptyDirs(new String[] {datanodeStorage.toString()});
+      
+      // format and start NameNode and start DataNode
+      NameNode.format(config); 
+      cluster = new MiniDFSCluster(config,1,StartupOption.REGULAR);
+        
+      NameNode namenode = cluster.getNameNode();
+      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
+      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
+      
+      FileSystem fs = FileSystem.get(config);
+      Path baseDir = new Path("/TestUpgrade");
+      fs.mkdirs( baseDir );
+      
+      // write some files
+      int bufferSize = 4096;
+      byte[] buffer = new byte[bufferSize];
+      for( int i=0; i < bufferSize; i++ )
+        buffer[i] = (byte)('0' + i % 50);
+      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
+      
+      // save image
+      namenode.getFSImage().saveFSImage();
+      namenode.getFSImage().getEditLog().open();
+      
+      // write more files
+      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
+    } finally {
+      // shutdown
+      if (cluster != null) cluster.shutdown();
+      FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
+      FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
+    }
+    namenodeStorageChecksum = checksumContents(
+      NAME_NODE, new File(namenodeStorage,"current"));
+    datanodeStorageChecksum = checksumContents(
+      DATA_NODE, new File(datanodeStorage,"current"));
+  }
+  
+  // Private helper method that writes a file to the given file system.
+  private static void writeFile(FileSystem fs, Path path, byte[] buffer,
+    int bufferSize ) throws IOException 
+  {
+    OutputStream out;
+    out = fs.create(path, true, bufferSize, (short) 1, 1024);
+    out.write( buffer, 0, bufferSize );
+    out.close();
   }
   
   /**
    * Initialize dfs.name.dir and dfs.data.dir with the specified number of
-   * directory entries. Also initialize fs.default.name and 
-   * dfs.blockreport.intervalMsec.
+   * directory entries. Also initialize dfs.blockreport.intervalMsec.
    */
   public static Configuration initializeStorageStateConf(int numDirs) {
     StringBuffer nameNodeDirs =
@@ -106,150 +156,14 @@
   }
   
   /**
-   * Starts the given type of node or all nodes.
-   *
-   * The UpgradeUtilities.initialize() method must be called once before
-   * calling this method.
-   *
-   * @param nodeType
-   *    The type of node to start.  If NAME_NODE, then one
-   *    Namenode is started.  If DATA_NODE, then one Datanode
-   *    is started.
-   * @param operation
-   *    The operation with which to startup the given type
-   *    node. FORMAT and null are treated as a REGULAR startup. If nodeType
-   *    if DATA_NODE, then UPGRADE is also treated as REGULAR.
-   * @param conf
-   *    The configuration to be used in starting the node.
-   *
-   * @throw IllegalStateException
-   *    If this method is called to start a
-   *    node that is already running.
-   */
-  public static void startCluster(NodeType nodeType, StartupOption operation, Configuration conf) throws Exception {
-    if (isNodeRunning(nodeType)) {
-      throw new IllegalStateException("Attempting to start "
-        + nodeType + " but it is already running");
-    }
-    if (nodeType == DATA_NODE && operation == StartupOption.UPGRADE) {
-      operation = StartupOption.REGULAR;
-    }
-    String[] args = (operation == null ||
-      operation == StartupOption.FORMAT ||
-      operation == StartupOption.REGULAR) ?
-        new String[] {} : new String[] {"-"+operation.toString()};
-    switch (nodeType) {
-      case NAME_NODE:
-        // Set up the right ports for the datanodes
-        conf.set("fs.default.name",NAMENODE_HOST);
-        namenode = NameNode.createNameNode(args, conf);
-        break;
-      case DATA_NODE:
-        if (namenode == null) {
-          throw new IllegalStateException("Attempting to start DATA_NODE "
-            + "but NAME_NODE is not running");
-        }
-        // Set up the right ports for the datanodes
-        InetSocketAddress nnAddr = namenode.getNameNodeAddress(); 
-        conf.set("fs.default.name", nnAddr.getHostName()+ ":" + nnAddr.getPort());
-        conf.setInt("dfs.info.port", 0);
-        conf.setInt("dfs.datanode.info.port", 0);
-        datanode = DataNode.createDataNode(args, conf);
-        break;
-    }
-  }
-  
-  /**
-   * Stops the given type of node or all nodes.
-   *
-   * The UpgradeUtilities.initialize() method must be called once before
-   * calling this method.
-   *
-   * @param nodeType
-   *    The type of node to stop if it is running. If null, then both
-   *    Namenode and Datanodes are stopped if they are running.
-   */
-  public static void stopCluster(NodeType nodeType) {
-    if (nodeType == NAME_NODE || nodeType == null) {
-      if (namenode != null) {
-        namenode.stop();
-      }
-      namenode = null;
-    }
-    if (nodeType == DATA_NODE || nodeType == null) {
-      if (datanode != null) {
-        datanode.shutdown(); 
-      }
-      DataNode.shutdownAll();
-      datanode = null;
-    }
-  }
-  
-  /**
-   * If the Namenode is running, attempt to finalize a previous upgrade.
-   * When this method return, the NameNode should be finalized, but
-   * DataNodes may not be since that occurs asynchronously.
-   *
-   * @throw IllegalStateException if the Namenode is not running.
-   */
-  public static void finalizeCluster(Configuration conf) throws Exception {
-    if (! isNodeRunning(NAME_NODE)) {
-      throw new IllegalStateException("Attempting to finalize "
-        + "Namenode but it is not running");
-    }
-    new DFSAdmin().doMain(conf, new String[] {"-finalizeUpgrade"});
-  }
-  
-  /**
-   * Determines if the given node type is currently running.
-   * If the node type is DATA_NODE, then all started Datanodes
-   * must be running in-order for this method to return
-   * <code>true</code>.
-   *
-   * The UpgradeUtilities.initialize() method must be called once before
-   * calling this method.
-   */
-  public static boolean isNodeRunning(NodeType nodeType) {
-    switch( nodeType ) {
-      case NAME_NODE:
-        return namenode != null;
-      case DATA_NODE:
-        return datanode != null;
-      default:
-        assert false : "Invalid node type: " + nodeType;
-    }
-    return false;
-  }
-  
-  /**
-   * Format the given directories.  This is equivalent to the Namenode
-   * formatting the given directories.  If a given directory already exists,
-   * it is first deleted; otherwise if it does not exist, it is first created.
-   *
-   * @throw IOException if unable to format one of the given dirs
-   */
-  public static void format(File... dirs) throws IOException {
-    String imageDirs = "";
-    for (int i = 0; i < dirs.length; i++) {
-      if( i == 0 )
-        imageDirs = dirs[i].getCanonicalPath();
-      else
-        imageDirs += "," + dirs[i].getCanonicalPath();
-    }
-    Configuration conf = new Configuration();
-    conf.set("dfs.name.dir", imageDirs);
-    NameNode.format(conf);
-  }
-  
-  /**
    * Create empty directories.  If a specified directory already exists
    * then it is first removed.
    */
-  public static void createEmptyDirs(String[] dirs) {
+  public static void createEmptyDirs(String[] dirs) throws IOException {
     for (String d : dirs) {
       File dir = new File(d);
       if (dir.exists()) {
-        remove(dir);
+        FileUtil.fullyDelete(dir);
       }
       dir.mkdirs();
     }
@@ -333,7 +247,21 @@
     for (int i = 0; i < parents.length; i++) {
       File newDir = new File(parents[i], dirName);
       createEmptyDirs(new String[] {newDir.toString()});
-      populateDir(nodeType, newDir);
+      LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+      switch (nodeType) {
+        case NAME_NODE:
+          localFS.copyToLocalFile(
+            new Path(namenodeStorage.toString(), "current"),
+            new Path(newDir.toString()),
+            false);
+          break;
+        case DATA_NODE:
+          localFS.copyToLocalFile(
+            new Path(datanodeStorage.toString(), "current"),
+            new Path(newDir.toString()),
+            false);
+          break;
+      }
       retVal[i] = newDir;
     }
     return retVal;
@@ -343,8 +271,7 @@
    * Create a <code>version</code> file inside the specified parent
    * directory.  If such a file already exists, it will be overwritten.
    * The given version string will be written to the file as the layout
-   * version. If null, then the current layout version will be used.
-   * The parent and nodeType parameters must not be null.
+   * version. None of the parameters may be null.
    *
    * @param version
    *
@@ -353,20 +280,16 @@
   public static File[] createVersionFile(NodeType nodeType, File[] parent,
     StorageInfo version) throws IOException 
   {
-    if (version == null)
-      version = getCurrentNamespaceInfo();
     Storage storage = null;
     File[] versionFiles = new File[parent.length];
     for (int i = 0; i < parent.length; i++) {
       File versionFile = new File(parent[i], "VERSION");
-      remove(versionFile);
+      FileUtil.fullyDelete(versionFile);
       switch (nodeType) {
         case NAME_NODE:
-          System.out.println("HERE");
           storage = new FSImage( version );
           break;
         case DATA_NODE:
-                  System.out.println("HERE2");
           storage = new DataStorage( version, "doNotCare" );
           break;
       }
@@ -378,21 +301,6 @@
   }
   
   /**
-   * Remove the specified file.  If the given file is a directory,
-   * then the directory and all its contents will be removed.
-   */
-  public static boolean remove(File file) {
-    try {
-      boolean retVal = FileUtil.fullyDelete(file);
-      return retVal;
-    } catch (IOException ioe) {
-      // this should never happen
-      throw new IllegalStateException(
-        "WHAT? FileUtil.fullyDelete threw and IOException?",ioe);
-    }
-  }
-  
-  /**
    * Corrupt the specified file.  Some random bytes within the file
    * will be changed to some random values.
    *
@@ -416,15 +324,6 @@
   }
   
   /**
-   * Retrieve the current NamespaceInfo object from a running Namenode.
-   */
-  public static NamespaceInfo getCurrentNamespaceInfo() throws IOException {
-    if (isNodeRunning(NAME_NODE))
-      return namenode.versionRequest();
-    return null;
-  }
-  
-  /**
    * Return the layout version inherent in the current version
    * of the Namenode, whether it is running or not.
    */
@@ -440,9 +339,9 @@
    * The UpgradeUtilities.initialize() method must be called once before
    * calling this method.
    */
-  public static int getCurrentNamespaceID() throws IOException {
-    if (isNodeRunning(NAME_NODE)) {
-      return namenode.versionRequest().getNamespaceID();
+  public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getNamespaceID();
     }
     return namenodeStorageNamespaceID;
   }
@@ -455,120 +354,11 @@
    * The UpgradeUtilities.initialize() method must be called once before
    * calling this method.
    */
-  public static long getCurrentFsscTime() throws IOException {
-    if (isNodeRunning(NAME_NODE)) {
-      return namenode.versionRequest().getCTime();
+  public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getCTime();
     }
     return namenodeStorageFsscTime;
   }
-  
-  /**********************************************************************
-   ********************* PRIVATE METHODS ********************************
-   *********************************************************************/
-  
-  /**
-   * Populates the given directory with valid version, edits, and fsimage
-   * files.  The version file will contain the current layout version.
-   *
-   * The UpgradeUtilities.initialize() method must be called once before
-   * calling this method.
-   *
-   * @throw IllegalArgumentException if dir does not already exist
-   */
-  private static void populateDir(NodeType nodeType, File dir) throws Exception {
-    if (!dir.exists()) {
-      throw new IllegalArgumentException(
-        "Given argument is not an existing directory:" + dir);
-    }
-    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
-    switch (nodeType) {
-      case NAME_NODE:
-        localFS.copyToLocalFile(
-          new Path(namenodeStorage.toString(), "current"),
-          new Path(dir.toString()),
-          false);
-        break;
-      case DATA_NODE:
-        localFS.copyToLocalFile(
-          new Path(datanodeStorage.toString(), "current"),
-          new Path(dir.toString()),
-          false);
-        break;
-    }
-  }
-  
-  static void writeFile(FileSystem fs,
-    Path path,
-    byte[] buffer,
-    int bufferSize ) throws IOException {
-    OutputStream out;
-    out = fs.create(path, true, bufferSize, (short) 1, 1024);
-    out.write( buffer, 0, bufferSize );
-    out.close();
-  }
-  
-  /**
-   * Creates a singleton master populated storage
-   * directory for a Namenode (contains edits, fsimage,
-   * version, and time files) and a Datanode (contains version and
-   * block files).  This can be a lengthy operation.
-   *
-   * @param conf must not be null.  These properties will be set:
-   *    fs.default.name
-   *    dfs.name.dir
-   *    dfs.data.dir
-   */
-  private static void initializeStorage() throws Exception {
-    Configuration config = new Configuration();
-    config.set("fs.default.name",NAMENODE_HOST);
-    config.set("dfs.name.dir", namenodeStorage.toString());
-    config.set("dfs.data.dir", datanodeStorage.toString());
-
-    try {
-      // format data-node
-      createEmptyDirs(new String[] {datanodeStorage.toString()});
-
-      // format name-node
-      NameNode.format(config);
-      
-      // start name-node
-      startCluster(NAME_NODE, null, config);
-      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
-      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
-      
-      // start data-node
-      startCluster(DATA_NODE, null, config);
-      
-      FileSystem fs = FileSystem.get(config);
-      Path baseDir = new Path("/TestUpgrade");
-      fs.mkdirs( baseDir );
-      
-      // write some files
-      int bufferSize = 4096;
-      byte[] buffer = new byte[bufferSize];
-      for( int i=0; i < bufferSize; i++ )
-        buffer[i] = (byte)('0' + i % 50);
-      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
-      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
-      
-      // save image
-      namenode.getFSImage().saveFSImage();
-      namenode.getFSImage().getEditLog().open();
-      
-      // write more files
-      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
-      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
-    } finally {
-      // shutdown
-      stopCluster(null);
-      remove(new File(namenodeStorage,"in_use.lock"));
-      remove(new File(datanodeStorage,"in_use.lock"));
-    }
-    namenodeStorageChecksum = checksumContents(
-      NAME_NODE, new File(namenodeStorage,"current"));
-    datanodeStorageChecksum = checksumContents(
-      DATA_NODE, new File(datanodeStorage,"current"));
-  }
-
 }