You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/04/03 23:39:27 UTC

svn commit: r525290 [3/3] - in /lucene/hadoop/trunk: ./ bin/ src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/fs/ src/test/org/apache/hadoop/dfs/

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java?view=diff&rev=525290&r1=525289&r2=525290
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/fs/FileUtil.java Tue Apr  3 14:39:25 2007
@@ -24,6 +24,7 @@
 import java.util.zip.ZipFile;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * A collection of file-processing util methods
@@ -318,6 +319,65 @@
     }
   }
   
+  /**
+   * Class for creating hardlinks.
+   * Supports Unix, Cygwin, WindXP.
+   *  
+   * @author Konstantin Shvachko
+   */
+  public static class HardLink { 
+    enum OSType {
+      OS_TYPE_UNIX, 
+      OS_TYPE_WINXP; 
+    }
+  
+    private static String[] hardLinkCommand;
+    
+    static {
+      switch( getOSType() ) {
+      case OS_TYPE_WINXP:
+        hardLinkCommand = new String[] {"fsutil","hardlink","create",null,null};
+        break;
+      case OS_TYPE_UNIX:
+      default:
+        hardLinkCommand = new String[] {"ln",null,null};
+      }
+    }
+
+    static OSType getOSType() {
+      String osName = System.getProperty("os.name");
+      if( osName.indexOf( "Windows") >= 0 && 
+          (osName.indexOf( "XpP") >= 0 || osName.indexOf( "2003") >= 0 ) )
+        return OSType.OS_TYPE_WINXP;
+      else
+        return OSType.OS_TYPE_UNIX;
+    }
+    
+    public static void createHardLink(File target, 
+                                      File linkName ) throws IOException {
+      int len = hardLinkCommand.length;
+      hardLinkCommand[len-2] = target.getCanonicalPath();
+      hardLinkCommand[len-1] = linkName.getCanonicalPath();
+      // execute shell command
+      Process process = Runtime.getRuntime().exec( hardLinkCommand );
+      try {
+        if (process.waitFor() != 0) {
+          String errMsg = new BufferedReader(new InputStreamReader(
+              process.getInputStream())).readLine();
+          if( errMsg == null )  errMsg = "";
+          String inpMsg = new BufferedReader(new InputStreamReader(
+              process.getErrorStream())).readLine();
+          if( inpMsg == null )  inpMsg = "";
+          throw new IOException( errMsg + inpMsg );
+        }
+      } catch (InterruptedException e) {
+        throw new IOException( StringUtils.stringifyException( e ));
+      } finally {
+        process.destroy();
+      }
+    }
+  }
+
   /**
    * Create a soft link between a src and destination
    * only on a local disk. HDFS does not support this

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java?view=diff&rev=525290&r1=525289&r2=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java Tue Apr  3 14:39:25 2007
@@ -219,8 +219,8 @@
 
     int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
     String nameNodeSocketAddr = "localhost:" + nameNodePort;
-    NameNode nameNodeDaemon = new NameNode(new File[] { new File(nameFSDir) },
-        "localhost", nameNodePort, conf);
+    conf.set("dfs.name.dir", nameFSDir);
+    NameNode nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);
     DFSClient dfsClient = null;
     try {
       //

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java?view=diff&rev=525290&r1=525289&r2=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java Tue Apr  3 14:39:25 2007
@@ -352,8 +352,7 @@
 	
     NameNode.format(conf);
     
-    nameNodeDaemon = new NameNode(new File[] { new File(nameFSDir) },
-        "localhost", nameNodePort, conf);
+    nameNodeDaemon = new NameNode("localhost", nameNodePort, conf);
 
      //
       //        start DataNodes

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?view=diff&rev=525290&r1=525289&r2=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Tue Apr  3 14:39:25 2007
@@ -20,6 +20,7 @@
 import java.io.*;
 import java.net.*;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.net.NetworkTopology;
 
@@ -180,7 +181,7 @@
           int nDatanodes,
           boolean formatNamenode,
           String[] racks) throws IOException {
-    this(0, conf, nDatanodes, false, formatNamenode, racks);
+    this(0, conf, nDatanodes, formatNamenode, racks);
   }
   
   /**
@@ -254,6 +255,7 @@
    * @param dataNodeFirst should the datanode be brought up before the namenode?
    * @param formatNamenode should the namenode be formatted before starting up ?
    * @param racks array of strings indicating racks that each datanode is on
+   * @deprecated use {@link #MiniDFSCluster(Configuration, int, String[])}
    */
   public MiniDFSCluster(int namenodePort, 
                         Configuration conf,
@@ -261,7 +263,42 @@
                         boolean dataNodeFirst,
                         boolean formatNamenode,
                         String[] racks) throws IOException {
+    this(namenodePort, conf, nDatanodes, 
+        ! cannotStartDataNodeFirst(dataNodeFirst) &&  
+        formatNamenode, racks);
+  }
+
+  /**
+   * NameNode should be always started first.
+   * Data-nodes need to handshake with the name-node before they can start.
+   * 
+   * @param dataNodeFirst should the datanode be brought up before the namenode?
+   * @return false if dataNodeFirst is false
+   * @throws IOException if dataNodeFirst is true
+   * 
+   * @deprecated should be removed when dataNodeFirst is gone.
+   */
+  private static boolean cannotStartDataNodeFirst( boolean dataNodeFirst 
+                                                  ) throws IOException {
+    if( dataNodeFirst )
+      throw new IOException( "NameNode should be always started first." );
+    return false;
+  }
 
+  /**
+   * Create the config and start up the servers.  If either the rpc or info port is already 
+   * in use, we will try new ports.
+   * @param namenodePort suggestion for which rpc port to use.  caller should use 
+   *                     getNameNodePort() to get the actual port used.
+   * @param nDatanodes Number of datanodes   
+   * @param formatNamenode should the namenode be formatted before starting up ?
+   * @param racks array of strings indicating racks that each datanode is on
+   */
+  public MiniDFSCluster(int namenodePort, 
+                        Configuration conf,
+                        int nDatanodes,
+                        boolean formatNamenode,
+                        String[] racks) throws IOException {
     this.conf = conf;
     
     this.nDatanodes = nDatanodes;
@@ -279,18 +316,16 @@
     this.conf.setInt("dfs.safemode.extension", 0);
 
     // Create the NameNode
-    if (formatNamenode) { NameNode.format(conf); }
+    StartupOption startOpt = 
+      formatNamenode ? StartupOption.FORMAT : StartupOption.REGULAR;
+    conf.setObject( "dfs.namenode.startup", startOpt );
+    conf.setObject( "dfs.datanode.startup", startOpt );
     nameNode = new NameNodeRunner();
     nameNodeThread = new Thread(nameNode);
 
     //
     // Start the MiniDFSCluster
     //
-    
-    if (dataNodeFirst) {
-      startDataNodes(conf, racks, data_dir);
-    }
-    
     // Start the namenode and wait for it to be initialized
     nameNodeThread.start();
     while (!nameNode.isCrashed() && !nameNode.isInitialized()) {
@@ -310,9 +345,7 @@
     this.conf.set("fs.default.name", nnAddr.getHostName()+ ":" + Integer.toString(nameNodePort));
     
     // Start the datanodes
-    if (!dataNodeFirst) {
-      startDataNodes(conf, racks, data_dir);
-    }
+    startDataNodes(conf, racks, data_dir);
     
     while (!nameNode.isCrashed() && !nameNode.isUp()) {
       try {                                     // let daemons get started

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSFinalize.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,119 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This test ensures the appropriate response from the system when 
+ * the system is finalized.
+ *
+ * @author Nigel Daley
+ */
+public class TestDFSFinalize extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+    "org.apache.hadoop.dfs.TestDFSFinalize");
+  Configuration conf;
+  private int testCounter = 0;
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+      + label + ":"
+      + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the current directory exists and that the previous directory
+   * does not exist.  Verify that current hasn't been modified by comparing 
+   * the checksum of all it's containing files with their original checksum.
+   * Note that we do not check that previous is removed on the DataNode
+   * because its removal is asynchronous therefore we have no reliable
+   * way to know when it will happen.  
+   */
+  void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws IOException {
+    for (int i = 0; i < nameNodeDirs.length; i++) {
+      assertTrue(new File(nameNodeDirs[i],"current").isDirectory());
+      assertTrue(new File(nameNodeDirs[i],"current/VERSION").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/edits").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/fsimage").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/fstime").isFile());
+    }
+    for (int i = 0; i < dataNodeDirs.length; i++) {
+      assertEquals(
+        UpgradeUtilities.checksumContents(
+          DATA_NODE, new File(dataNodeDirs[i],"current")),
+        UpgradeUtilities.checksumMasterContents(DATA_NODE));
+    }
+    for (int i = 0; i < nameNodeDirs.length; i++) {
+      assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
+    }
+  }
+ 
+  /**
+   * This test attempts to finalize the NameNode and DataNode.
+   */
+  public void testFinalize() throws Exception {
+    File[] baseDirs;
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Finalize with existing previous dir",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      UpgradeUtilities.startCluster(DATA_NODE,StartupOption.REGULAR,conf);
+      UpgradeUtilities.finalizeCluster(conf);
+      checkResult(nameNodeDirs, dataNodeDirs);
+      
+      log("Finalize without existing previous dir",numDirs);
+      UpgradeUtilities.finalizeCluster(conf);
+      checkResult(nameNodeDirs, dataNodeDirs);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+    } // end numDir loop
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDFSFinalize().testFinalize();
+  }
+  
+}
+
+

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSRollback.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,232 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.fs.Path;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is rolled back under various storage state and
+* version conditions.
+*
+* @author Nigel Daley
+*/
+public class TestDFSRollback extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+    "org.apache.hadoop.dfs.TestDFSRollback");
+  Configuration conf;
+  private int testCounter = 0;
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+      + label + ":"
+      + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the new current directory is the old previous.  
+   * It is assumed that the server has recovered and rolled back.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
+    switch (nodeType) {
+      case NAME_NODE:
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertTrue(new File(baseDirs[i],"current").isDirectory());
+          assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+          assertTrue(new File(baseDirs[i],"current/edits").isFile());
+          assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+          assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+        }
+        break;
+      case DATA_NODE:
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertEquals(
+            UpgradeUtilities.checksumContents(
+              nodeType, new File(baseDirs[i],"current")),
+            UpgradeUtilities.checksumMasterContents(nodeType));
+        }
+        break;
+    }
+    for (int i = 0; i < baseDirs.length; i++) {
+      assertFalse(new File(baseDirs[i],"previous").isDirectory());
+    }
+  }
+ 
+  /**
+   * Starts the given nodeType with the given operation.  The remaining 
+   * parameters are used to verify the expected result.
+   * 
+   * @param nodeType must not be null
+   */
+  void runTest(NodeType nodeType, StartupOption operation, boolean shouldStart) 
+    throws Exception 
+  {
+    if (shouldStart) {
+      UpgradeUtilities.startCluster(nodeType, operation, conf);
+      UpgradeUtilities.stopCluster(nodeType);
+    } else {
+      try {
+        UpgradeUtilities.startCluster(nodeType, operation, conf); // should fail
+        throw new AssertionError("Cluster should have failed to start");
+      } catch (Exception expected) {
+        // expected
+        //expected.printStackTrace();
+        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
+      } finally {
+        UpgradeUtilities.stopCluster(nodeType);
+      }
+    }
+  }
+ 
+  /**
+   * This test attempts to rollback the NameNode and DataNode under
+   * a number of valid and invalid conditions.
+   */
+  public void testRollback() throws Exception {
+    File[] baseDirs;
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Normal NameNode rollback",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      runTest(NAME_NODE, StartupOption.ROLLBACK, true);
+      checkResult(NAME_NODE, nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("Normal DataNode rollback",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      runTest(DATA_NODE, StartupOption.ROLLBACK, true);
+      checkResult(DATA_NODE, dataNodeDirs);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode rollback without existing previous dir",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("DataNode rollback without existing previous dir",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      runTest(DATA_NODE, StartupOption.ROLLBACK, true);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("DataNode rollback with future stored layout version in previous",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+        new StorageInfo(Integer.MIN_VALUE,
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentFsscTime()));
+      runTest(DATA_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("DataNode rollback with newer fsscTime in previous",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.ROLLBACK,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+        new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        Long.MAX_VALUE));
+      runTest(DATA_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode rollback with no edits file",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.remove(new File(f,"edits"));
+      }
+      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with no image file",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.remove(new File(f,"fsimage")); 
+      }
+      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with corrupt version file",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
+      }
+      runTest(NAME_NODE, StartupOption.ROLLBACK, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with old layout version in previous",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
+        new StorageInfo(1,
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentFsscTime()));
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+    } // end numDir loop
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDFSRollback().testRollback();
+  }
+  
+}
+
+

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStartupVersions.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This test ensures the appropriate response (successful or failure) from 
+ * a Datanode when the system is started with differing version combinations. 
+ * 
+ * @author Nigel Daley
+ */
+public class TestDFSStartupVersions extends TestCase {
+  
+  private static final Log LOG = LogFactory.getLog(
+    "org.apache.hadoop.dfs.TestDFSStartupVersions");
+  private static Path TEST_ROOT_DIR = new Path(
+    System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
+    String testCaseLine = "";
+    if (testCase != null) {
+      testCaseLine = " testCase="+testCase;
+    }
+    LOG.info("============================================================");
+    LOG.info("***TEST*** " + label + ":"
+      + testCaseLine
+      + " nodeType="+nodeType
+      + " layoutVersion="+version.getLayoutVersion()
+      + " namespaceID="+version.getNamespaceID()
+      + " fsscTime="+version.getCTime());
+  }
+  
+  /**
+   * Initialize the versions array.  This array stores all combinations 
+   * of cross product:
+   *  {oldLayoutVersion,currentLayoutVersion,futureLayoutVersion} X
+   *    {currentNamespaceId,incorrectNamespaceId} X
+   *      {pastFsscTime,currentFsscTime,futureFsscTime}
+   */
+  private StorageInfo[] initializeVersions() throws Exception {
+    int layoutVersionOld = -3;
+    int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
+    int layoutVersionNew = Integer.MIN_VALUE;
+    int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID();
+    int namespaceIdOld = Integer.MIN_VALUE;
+    long fsscTimeOld = Long.MIN_VALUE;
+    long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime();
+    long fsscTimeNew = Long.MAX_VALUE;
+    
+    return new StorageInfo[] {
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeOld), // 0
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeCur), // 1
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeNew), // 2
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeOld), // 3
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeCur), // 4
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeNew), // 5
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeOld), // 6
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeCur), // 7
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeNew), // 8
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeOld), // 9
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeCur), // 10
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeNew), // 11
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeOld), // 12
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeCur), // 13
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeNew), // 14
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeOld), // 15
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeCur), // 16
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeNew), // 17
+    };
+  }
+  
+  /**
+   * Determines if the given Namenode version and Datanode version
+   * are compatible with each other. Compatibility in this case mean
+   * that the Namenode and Datanode will successfully start up and
+   * will work together. The rules for compatibility,
+   * taken from the DFS Upgrade Design, are as follows:
+   * <pre>
+   * 1. The data-node does regular startup (no matter which options 
+   *    it is started with) if
+   *       softwareLV == storedLV AND 
+   *       DataNode.FSSCTime == NameNode.FSSCTime
+   * 2. The data-node performs an upgrade if it is started without any 
+   *    options and
+   *       |softwareLV| > |storedLV| OR 
+   *       (softwareLV == storedLV AND
+   *        DataNode.FSSCTime < NameNode.FSSCTime)
+   * 3. NOT TESTED: The data-node rolls back if it is started with
+   *    the -rollback option and
+   *       |softwareLV| >= |previous.storedLV| AND 
+   *       DataNode.previous.FSSCTime <= NameNode.FSSCTime
+   * 4. In all other cases the startup fails.
+   * </pre>
+   */
+  boolean isVersionCompatible(StorageInfo namenodeVer, StorageInfo datanodeVer) {
+    // check #0
+    if (namenodeVer.getNamespaceID() != datanodeVer.getNamespaceID()) {
+      LOG.info("namespaceIDs are not equal: isVersionCompatible=false");
+      return false;
+    }
+    // check #1
+    int softwareLV = FSConstants.LAYOUT_VERSION;  // will also be Namenode's LV
+    int storedLV = datanodeVer.getLayoutVersion();
+    if (softwareLV == storedLV &&  
+        datanodeVer.getCTime() == namenodeVer.getCTime()) 
+    {
+      LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
+      return true;
+    }
+    // check #2
+    long absSoftwareLV = Math.abs((long)softwareLV);
+    long absStoredLV = Math.abs((long)storedLV);
+    if (absSoftwareLV > absStoredLV ||
+        (softwareLV == storedLV &&
+         datanodeVer.getCTime() < namenodeVer.getCTime())) 
+    {
+      LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
+      return true;
+    }
+    // check #4
+    LOG.info("default case: isVersionCompatible=false");
+    return false;
+  }
+  
+  /**
+   * This test ensures the appropriate response (successful or failure) from 
+   * a Datanode when the system is started with differing version combinations. 
+   * <pre>
+   * For each 3-tuple in the cross product
+   *   ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
+   *    {currentNamespaceId,incorrectNamespaceId},
+   *    {pastFsscTime,currentFsscTime,futureFsscTime})
+   *      1. Startup Namenode with version file containing 
+   *         (currentLayoutVersion,currentNamespaceId,currentFsscTime)
+   *      2. Attempt to startup Datanode with version file containing 
+   *         this iterations version 3-tuple
+   * </pre>
+   */
+  public void testVersions() throws Exception {
+    UpgradeUtilities.initialize();
+    Configuration conf = UpgradeUtilities.initializeStorageStateConf(1);
+    StorageInfo[] versions = initializeVersions();
+    UpgradeUtilities.createStorageDirs(
+      NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
+    UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
+    StorageInfo nameNodeVersion = new StorageInfo(
+      UpgradeUtilities.getCurrentLayoutVersion(),
+      UpgradeUtilities.getCurrentNamespaceID(),
+      UpgradeUtilities.getCurrentFsscTime());
+    log("NameNode version info",NAME_NODE,null,nameNodeVersion);
+    try {
+      for (int i = 0; i < versions.length; i++) {
+        File[] storage = UpgradeUtilities.createStorageDirs(
+          DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+        log("DataNode version info",DATA_NODE,i,versions[i]);
+        UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
+        try {
+          UpgradeUtilities.startCluster(DATA_NODE,StartupOption.REGULAR,conf);
+        } catch (Exception ignore) {
+          // Ignore.  The asserts below will check for problems.
+          // ignore.printStackTrace();
+        }
+        assertTrue(UpgradeUtilities.isNodeRunning(NAME_NODE));
+        assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
+          UpgradeUtilities.isNodeRunning(DATA_NODE));
+        UpgradeUtilities.stopCluster(DATA_NODE);
+      }
+    } finally {
+      UpgradeUtilities.stopCluster(null);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestDFSStartupVersions().testVersions();
+  }
+  
+}
+

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,245 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.fs.Path;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is started under various storage state and
+* version conditions.
+*
+* @author Nigel Daley
+*/
+public class TestDFSStorageStateRecovery extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+    "org.apache.hadoop.dfs.TestDFSStorageStateRecovery");
+  Configuration conf;
+  private int testCounter = 0;
+  
+  /**
+   * The test case table.  Each row represents a test case.  This table is
+   * taken from the table in Apendix A of the HDFS Upgrade Test Plan
+   * (TestPlan-HdfsUpgrade.html) attached to
+   * http://issues.apache.org/jira/browse/HADOOP-702
+   * The column meanings are:
+   *  0) current directory exists
+   *  1) previous directory exists
+   *  2) previous.tmp directory exists
+   *  3) removed.tmp directory exists
+   *  4) node should recover and startup
+   *  5) current directory should exist after recovery but before startup
+   *  6) previous directory should exist after recovery but before startup
+   */
+  static boolean[][] testCases = new boolean[][] {
+    new boolean[] {true,  false, false, false, true,  true,  false}, // 1
+    new boolean[] {true,  true,  false, false, true,  true,  true }, // 2
+    new boolean[] {true,  false, true,  false, true,  true,  true }, // 3
+    new boolean[] {true,  true,  true,  true,  false, false, false }, // 4
+    new boolean[] {true,  true,  true,  false, false, false, false }, // 4
+    new boolean[] {false, true,  true,  true,  false, false, false }, // 4
+    new boolean[] {false, true,  true,  false, false, false, false }, // 4
+    new boolean[] {false, false, false, false, false, false, false }, // 5
+    new boolean[] {false, true,  false, false, false, false, false }, // 6
+    new boolean[] {false, false, true,  false, true,  true,  false}, // 7
+    new boolean[] {true,  false, false, true,  true,  true,  false}, // 8
+    new boolean[] {true,  true,  false, true,  false, false, false }, // 9
+    new boolean[] {true,  true,  true,  true,  false, false, false }, // 10
+    new boolean[] {true,  false, true,  true,  false, false, false }, // 10
+    new boolean[] {false, true,  true,  true,  false, false, false }, // 10
+    new boolean[] {false, false, true,  true,  false, false, false }, // 10
+    new boolean[] {false, false, false, true,  false, false, false }, // 11
+    new boolean[] {false, true,  false, true,  true,  true,  true }, // 12
+  };
+  
+  /**
+   * Writes an INFO log message containing the parameters. Only
+   * the first 4 elements of the state array are included in the message.
+   */
+  void log(String label, int numDirs, int testCaseNum, boolean[] state) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+      + label + ":"
+      + " numDirs="+numDirs
+      + " testCase="+testCaseNum
+      + " current="+state[0]
+      + " previous="+state[1]
+      + " previous.tmp="+state[2]
+      + " removed.tmp="+state[3]);
+  }
+  
+  /**
+   * Sets up the storage directories for the given node type, either
+   * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
+   * dfs.data.dir, the subdirectories represented by the first four elements 
+   * of the <code>state</code> array will be created and populated.
+   * See UpgradeUtilities.createStorageDirs().
+   * 
+   * @param nodeType
+   *   the type of node that storage should be created for. Based on this
+   *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
+   * @param state
+   *   a row from the testCases table which indicates which directories
+   *   to setup for the node
+   * @return file paths representing either dfs.name.dir or dfs.data.dir
+   *   directories
+   */
+  String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
+    String[] baseDirs = (nodeType == NAME_NODE ?
+      conf.getStrings("dfs.name.dir") :
+      conf.getStrings("dfs.data.dir"));
+    UpgradeUtilities.createEmptyDirs(baseDirs);
+    if (state[0])  // current
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
+    if (state[1])  // previous
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
+    if (state[2])  // previous.tmp
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
+    if (state[3])  // removed.tmp
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
+    return baseDirs;
+  }
+ 
+  /**
+   * Verify that the current and/or previous exist as indicated by 
+   * the method parameters.  If previous exists, verify that
+   * it hasn't been modified by comparing the checksum of all it's
+   * containing files with their original checksum.  It is assumed that
+   * the server has recovered.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs, 
+                   boolean currentShouldExist, boolean previousShouldExist) 
+                     throws IOException
+  {
+    switch (nodeType) {
+      case NAME_NODE:
+        if (currentShouldExist) {
+          for (int i = 0; i < baseDirs.length; i++) {
+            assertTrue(new File(baseDirs[i],"current").isDirectory());
+            assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+            assertTrue(new File(baseDirs[i],"current/edits").isFile());
+            assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+            assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+          }
+        }
+        break;
+      case DATA_NODE:
+        if (currentShouldExist) {
+          for (int i = 0; i < baseDirs.length; i++) {
+            assertEquals(
+              UpgradeUtilities.checksumContents(
+                nodeType, new File(baseDirs[i],"current")),
+              UpgradeUtilities.checksumMasterContents(nodeType));
+          }
+        }
+        break;
+    }
+    if (previousShouldExist) {
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertTrue(new File(baseDirs[i],"previous").isDirectory());
+        assertEquals(
+          UpgradeUtilities.checksumContents(
+            nodeType, new File(baseDirs[i],"previous")),
+          UpgradeUtilities.checksumMasterContents(nodeType));
+      }
+    }
+  }
+ 
+  /**
+   * Does a regular start of the given nodeType.
+   * 
+   * @param nodeType must not be null
+   * @param indicates whether or not the node should start
+   */
+  void runTest(NodeType nodeType, boolean shouldStart) throws Exception {
+    if (shouldStart) {
+      UpgradeUtilities.startCluster(nodeType, StartupOption.REGULAR, conf);
+      UpgradeUtilities.stopCluster(nodeType);
+    } else {
+      try {
+        UpgradeUtilities.startCluster(nodeType, StartupOption.REGULAR, conf); // should fail
+        throw new AssertionError("Cluster should have failed to start");
+      } catch (Exception expected) {
+        // expected
+        //expected.printStackTrace();
+        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
+      } finally {
+        UpgradeUtilities.stopCluster(nodeType);
+      }
+    }
+  }
+ 
+  /**
+   * This test iterates over the testCases table and attempts
+   * to startup the NameNode and DataNode normally.
+   */
+  public void testStorageStates() throws Exception {
+    String[] baseDirs;
+    UpgradeUtilities.initialize();
+
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs);
+      for (int i = 0; i < testCases.length; i++) {
+        boolean[] testCase = testCases[i];
+        boolean shouldRecover = testCase[4];
+        boolean curAfterRecover = testCase[5];
+        boolean prevAfterRecover = testCase[6];
+
+        log("NAME_NODE recovery",numDirs,i,testCase);
+        baseDirs = createStorageState(NAME_NODE, testCase);
+        runTest(NAME_NODE, shouldRecover);
+        if (shouldRecover) {
+          checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+        }
+        
+        log("DATA_NODE recovery",numDirs,i,testCase);
+        createStorageState(NAME_NODE, new boolean[] {true,true,false,false});
+        UpgradeUtilities.startCluster(NAME_NODE,StartupOption.REGULAR,conf);
+        baseDirs = createStorageState(DATA_NODE, testCase);
+        if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
+          // DataNode will create and format current if no directories exist
+          runTest(DATA_NODE, true);
+        } else {
+          runTest(DATA_NODE, shouldRecover);
+          if (shouldRecover) {
+            checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+          }
+        }
+        UpgradeUtilities.stopCluster(null);
+      } // end testCases loop
+    } // end numDirs loop
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDFSStorageStateRecovery().testStorageStates();
+  }
+  
+}
+
+

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,230 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.fs.Path;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is upgraded under various storage state and
+* version conditions.
+*
+* @author Nigel Daley
+*/
+public class TestDFSUpgrade extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+    "org.apache.hadoop.dfs.TestDFSUpgrade");
+  Configuration conf;
+  private int testCounter = 0;
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+      + label + ":"
+      + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the current and previous directories exist.  Verify that 
+   * previous hasn't been modified by comparing the checksum of all it's
+   * containing files with their original checksum.  It is assumed that
+   * the server has recovered and upgraded.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
+    switch (nodeType) {
+      case NAME_NODE:
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertTrue(new File(baseDirs[i],"current").isDirectory());
+          assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+          assertTrue(new File(baseDirs[i],"current/edits").isFile());
+          assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+          assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+        }
+        break;
+      case DATA_NODE:
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertEquals(
+            UpgradeUtilities.checksumContents(
+              nodeType, new File(baseDirs[i],"current")),
+            UpgradeUtilities.checksumMasterContents(nodeType));
+        }
+        break;
+    }
+    for (int i = 0; i < baseDirs.length; i++) {
+      assertTrue(new File(baseDirs[i],"previous").isDirectory());
+      assertEquals(
+        UpgradeUtilities.checksumContents(
+          nodeType, new File(baseDirs[i],"previous")),
+        UpgradeUtilities.checksumMasterContents(nodeType));
+    }
+  }
+ 
+  /**
+   * Starts the given nodeType with the given operation.  The remaining 
+   * parameters are used to verify the expected result.
+   * 
+   * @param nodeType must not be null
+   */
+  void runTest(NodeType nodeType, StartupOption operation, boolean shouldStart) 
+    throws Exception 
+  {
+    if (shouldStart) {
+      UpgradeUtilities.startCluster(nodeType, operation, conf);
+      UpgradeUtilities.stopCluster(nodeType);
+    } else {
+      try {
+        UpgradeUtilities.startCluster(nodeType, operation, conf); // should fail
+        throw new AssertionError("Cluster should have failed to start");
+      } catch (Exception expected) {
+        // expected
+        //expected.printStackTrace();
+        assertFalse(UpgradeUtilities.isNodeRunning(nodeType));
+      } finally {
+        UpgradeUtilities.stopCluster(nodeType);
+      }
+    }
+  }
+ 
+  /**
+   * This test attempts to upgrade the NameNode and DataNode under
+   * a number of valid and invalid conditions.
+   */
+  public void testUpgrade() throws Exception {
+    File[] baseDirs;
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Normal NameNode upgrade",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      runTest(NAME_NODE, StartupOption.UPGRADE, true);
+      checkResult(NAME_NODE, nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("Normal DataNode upgrade",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      runTest(DATA_NODE, StartupOption.REGULAR, true);
+      checkResult(DATA_NODE, dataNodeDirs);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("NameNode upgrade with existing previous dir",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("DataNode upgrade with existing previous dir",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      runTest(DATA_NODE, StartupOption.REGULAR, true);
+      checkResult(DATA_NODE, dataNodeDirs);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("DataNode upgrade with future stored layout version in current",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+        new StorageInfo(Integer.MIN_VALUE,
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentFsscTime()));
+      runTest(DATA_NODE, StartupOption.REGULAR, false);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("DataNode upgrade with newer fsscTime in current",numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.startCluster(NAME_NODE,StartupOption.UPGRADE,conf);
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+        new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        Long.MAX_VALUE));
+      runTest(DATA_NODE, StartupOption.REGULAR, false);
+      UpgradeUtilities.stopCluster(null);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode upgrade with no edits file",numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.remove(new File(f,"edits"));
+      }
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with no image file",numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.remove(new File(f,"fsimage")); 
+      }
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with corrupt version file",numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
+      }
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with future layout version in current",numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
+        new StorageInfo(Integer.MIN_VALUE,
+                        UpgradeUtilities.getCurrentNamespaceID(),
+                        UpgradeUtilities.getCurrentFsscTime()));
+      runTest(NAME_NODE, StartupOption.UPGRADE, false);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+    } // end numDir loop
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDFSUpgrade().testUpgrade();
+  }
+  
+}
+
+

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java?view=diff&rev=525290&r1=525289&r2=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java Tue Apr  3 14:39:25 2007
@@ -52,7 +52,7 @@
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
       File data_dir = new File(System.getProperty("test.build.data"),
-          "dfs/data/data5/data");
+          "dfs/data/data5/current");
       assertTrue("data directory does not exist", data_dir.exists());
       File[] blocks = data_dir.listFiles();
       assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));

Added: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=auto&rev=525290
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (added)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Tue Apr  3 14:39:25 2007
@@ -0,0 +1,574 @@
+/*
+ * UpgradeUtilities.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.dfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.util.Random;
+import java.util.zip.CRC32;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.dfs.FSConstants.NodeType;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.dfs.FSConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.dfs.FSConstants.StartupOption;
+import org.apache.hadoop.dfs.Storage.StorageDirectory;
+
+/**
+ * This class defines a number of static helper methods used by the
+ * DFS Upgrade unit tests.  By default, a singleton master populated storage
+ * directory is created for a Namenode (contains edits, fsimage,
+ * version, and time files) and a Datanode (contains version and
+ * block files).  The master directories are lazily created.  They are then
+ * copied by the createStorageDirs() method to create new storage
+ * directories of the appropriate type (Namenode or Datanode).
+ * 
+ * @author Nigel Daley
+ */
+public class UpgradeUtilities {
+
+  // The fs.default.name configuration host:port value for the Namenode
+  private static final String NAMENODE_HOST = "localhost:0";
+  // Root scratch directory on local filesystem 
+  private static File TEST_ROOT_DIR = new File(
+    System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+  // The singleton master storage directory for Namenode
+  private static File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
+  // A checksum of the contents in namenodeStorage directory
+  private static long namenodeStorageChecksum;
+  // The namespaceId of the namenodeStorage directory
+  private static int namenodeStorageNamespaceID;
+  // The fsscTime of the namenodeStorage directory
+  private static long namenodeStorageFsscTime;
+  // The singleton master storage directory for Datanode
+  private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
+  // A checksum of the contents in datanodeStorage directory
+  private static long datanodeStorageChecksum;
+  // The NameNode started by this Utility class
+  private static NameNode namenode = null;
+  // The DataNode started by this Utility class
+  private static DataNode datanode = null;
+  
+  /**
+   * Initialize the data structures used by this class.  
+   * IMPORTANT NOTE: This method must be called once before calling 
+   *                 any other public method on this class.  
+   */
+  public static void initialize() throws Exception {
+    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
+    initializeStorage();
+  }
+  
+  /**
+   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
+   * directory entries. Also initialize fs.default.name and 
+   * dfs.blockreport.intervalMsec.
+   */
+  public static Configuration initializeStorageStateConf(int numDirs) {
+    StringBuffer nameNodeDirs =
+      new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
+    StringBuffer dataNodeDirs =
+      new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
+    for (int i = 2; i <= numDirs; i++) {
+      nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
+      dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
+    }
+    Configuration conf = new Configuration();
+    conf.set("dfs.name.dir", nameNodeDirs.toString());
+    conf.set("dfs.data.dir", dataNodeDirs.toString());
+    conf.set("dfs.blockreport.intervalMsec", 10000);
+    return conf;
+  }
+  
+  /**
+   * Starts the given type of node or all nodes.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   *
+   * @param nodeType
+   *    The type of node to start.  If NAME_NODE, then one
+   *    Namenode is started.  If DATA_NODE, then one Datanode
+   *    is started.
+   * @param operation
+   *    The operation with which to startup the given type
+   *    node. FORMAT and null are treated as a REGULAR startup. If nodeType
+   *    if DATA_NODE, then UPGRADE is also treated as REGULAR.
+   * @param conf
+   *    The configuration to be used in starting the node.
+   *
+   * @throw IllegalStateException
+   *    If this method is called to start a
+   *    node that is already running.
+   */
+  public static void startCluster(NodeType nodeType, StartupOption operation, Configuration conf) throws Exception {
+    if (isNodeRunning(nodeType)) {
+      throw new IllegalStateException("Attempting to start "
+        + nodeType + " but it is already running");
+    }
+    if (nodeType == DATA_NODE && operation == StartupOption.UPGRADE) {
+      operation = StartupOption.REGULAR;
+    }
+    String[] args = (operation == null ||
+      operation == StartupOption.FORMAT ||
+      operation == StartupOption.REGULAR) ?
+        new String[] {} : new String[] {"-"+operation.toString()};
+    switch (nodeType) {
+      case NAME_NODE:
+        // Set up the right ports for the datanodes
+        conf.set("fs.default.name",NAMENODE_HOST);
+        namenode = NameNode.createNameNode(args, conf);
+        break;
+      case DATA_NODE:
+        if (namenode == null) {
+          throw new IllegalStateException("Attempting to start DATA_NODE "
+            + "but NAME_NODE is not running");
+        }
+        // Set up the right ports for the datanodes
+        InetSocketAddress nnAddr = namenode.getNameNodeAddress(); 
+        conf.set("fs.default.name", nnAddr.getHostName()+ ":" + nnAddr.getPort());
+        conf.setInt("dfs.info.port", 0);
+        conf.setInt("dfs.datanode.info.port", 0);
+        datanode = DataNode.createDataNode(args, conf);
+        break;
+    }
+  }
+  
+  /**
+   * Stops the given type of node or all nodes.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   *
+   * @param nodeType
+   *    The type of node to stop if it is running. If null, then both
+   *    Namenode and Datanodes are stopped if they are running.
+   */
+  public static void stopCluster(NodeType nodeType) {
+    if (nodeType == NAME_NODE || nodeType == null) {
+      if (namenode != null) {
+        namenode.stop();
+      }
+      namenode = null;
+    }
+    if (nodeType == DATA_NODE || nodeType == null) {
+      if (datanode != null) {
+        datanode.shutdown(); 
+      }
+      DataNode.shutdownAll();
+      datanode = null;
+    }
+  }
+  
+  /**
+   * If the Namenode is running, attempt to finalize a previous upgrade.
+   * When this method return, the NameNode should be finalized, but
+   * DataNodes may not be since that occurs asynchronously.
+   *
+   * @throw IllegalStateException if the Namenode is not running.
+   */
+  public static void finalizeCluster(Configuration conf) throws Exception {
+    if (! isNodeRunning(NAME_NODE)) {
+      throw new IllegalStateException("Attempting to finalize "
+        + "Namenode but it is not running");
+    }
+    new DFSAdmin().doMain(conf, new String[] {"-finalizeUpgrade"});
+  }
+  
+  /**
+   * Determines if the given node type is currently running.
+   * If the node type is DATA_NODE, then all started Datanodes
+   * must be running in-order for this method to return
+   * <code>true</code>.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   */
+  public static boolean isNodeRunning(NodeType nodeType) {
+    switch( nodeType ) {
+      case NAME_NODE:
+        return namenode != null;
+      case DATA_NODE:
+        return datanode != null;
+      default:
+        assert false : "Invalid node type: " + nodeType;
+    }
+    return false;
+  }
+  
+  /**
+   * Format the given directories.  This is equivalent to the Namenode
+   * formatting the given directories.  If a given directory already exists,
+   * it is first deleted; otherwise if it does not exist, it is first created.
+   *
+   * @throw IOException if unable to format one of the given dirs
+   */
+  public static void format(File... dirs) throws IOException {
+    String imageDirs = "";
+    for (int i = 0; i < dirs.length; i++) {
+      if( i == 0 )
+        imageDirs = dirs[i].getCanonicalPath();
+      else
+        imageDirs += "," + dirs[i].getCanonicalPath();
+    }
+    Configuration conf = new Configuration();
+    conf.set("dfs.name.dir", imageDirs);
+    NameNode.format(conf);
+  }
+  
+  /**
+   * Create empty directories.  If a specified directory already exists
+   * then it is first removed.
+   */
+  public static void createEmptyDirs(String[] dirs) {
+    for (String d : dirs) {
+      File dir = new File(d);
+      if (dir.exists()) {
+        remove(dir);
+      }
+      dir.mkdirs();
+    }
+  }
+  
+  /**
+   * Return the checksum for the singleton master storage directory
+   * of the given node type.
+   */
+  public static long checksumMasterContents(NodeType nodeType) throws IOException {
+    if (nodeType == NAME_NODE) {
+      return namenodeStorageChecksum;
+    } else {
+      return datanodeStorageChecksum;
+    }
+  }
+  
+  /**
+   * Compute the checksum of all the files in the specified directory.
+   * The contents of subdirectories are not included. This method provides
+   * an easy way to ensure equality between the contents of two directories.
+   *
+   * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
+   *    This is because this file file is changed every time
+   *    the Datanode is started.
+   * @param dir must be a directory. Subdirectories are ignored.
+   *
+   * @throw IllegalArgumentException if specified directory is not a directory
+   * @throw IOException if an IOException occurs while reading the files
+   * @return the computed checksum value
+   */
+  public static long checksumContents(NodeType nodeType, File dir) throws IOException {
+    if (!dir.isDirectory()) {
+      throw new IllegalArgumentException(
+        "Given argument is not a directory:" + dir);
+    }
+    File[] list = dir.listFiles();
+    CRC32 checksum = new CRC32();
+    for (int i = 0; i < list.length; i++) {
+      if (list[i].isFile()) {
+        // skip VERSION file for DataNodes
+        if (nodeType == DATA_NODE &&
+          list[i].getName().equals("VERSION")) 
+        {
+          continue; 
+        }
+        FileInputStream fis = new FileInputStream(list[i]);
+        byte[] buffer = new byte[1024];
+        int bytesRead;
+        while ((bytesRead = fis.read(buffer)) != -1) {
+          checksum.update(buffer,0,bytesRead);
+        }
+        fis.close();
+      }
+    }
+    return checksum.getValue();
+  }
+  
+  /**
+   * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
+   * of a populated DFS filesystem.
+   *
+   * This method creates and populates the directory specified by
+   *  <code>parent/dirName</code>, for each parent directory.
+   * The contents of the new directories will be
+   * appropriate for the given node type.  If the directory does not
+   * exist, it will be created.  If the directory already exists, it
+   * will first be deleted.
+   *
+   * By default, a singleton master populated storage
+   * directory is created for a Namenode (contains edits, fsimage,
+   * version, and time files) and a Datanode (contains version and
+   * block files).  These directories are then
+   * copied by this method to create new storage
+   * directories of the appropriate type (Namenode or Datanode).
+   *
+   * @return the array of created directories
+   */
+  public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
+    File[] retVal = new File[parents.length];
+    for (int i = 0; i < parents.length; i++) {
+      File newDir = new File(parents[i], dirName);
+      createEmptyDirs(new String[] {newDir.toString()});
+      populateDir(nodeType, newDir);
+      retVal[i] = newDir;
+    }
+    return retVal;
+  }
+  
+  /**
+   * Create a <code>version</code> file inside the specified parent
+   * directory.  If such a file already exists, it will be overwritten.
+   * The given version string will be written to the file as the layout
+   * version. If null, then the current layout version will be used.
+   * The parent and nodeType parameters must not be null.
+   *
+   * @param version
+   *
+   * @return the created version file
+   */
+  public static File[] createVersionFile(NodeType nodeType, File[] parent,
+    StorageInfo version) throws IOException 
+  {
+    if (version == null)
+      version = getCurrentNamespaceInfo();
+    Storage storage = null;
+    File[] versionFiles = new File[parent.length];
+    for (int i = 0; i < parent.length; i++) {
+      File versionFile = new File(parent[i], "VERSION");
+      remove(versionFile);
+      switch (nodeType) {
+        case NAME_NODE:
+          System.out.println("HERE");
+          storage = new FSImage( version );
+          break;
+        case DATA_NODE:
+                  System.out.println("HERE2");
+          storage = new DataStorage( version, "doNotCare" );
+          break;
+      }
+      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
+      sd.write(versionFile);
+      versionFiles[i] = versionFile;
+    }
+    return versionFiles;
+  }
+  
+  /**
+   * Remove the specified file.  If the given file is a directory,
+   * then the directory and all its contents will be removed.
+   */
+  public static boolean remove(File file) {
+    try {
+      boolean retVal = FileUtil.fullyDelete(file);
+      return retVal;
+    } catch (IOException ioe) {
+      // this should never happen
+      throw new IllegalStateException(
+        "WHAT? FileUtil.fullyDelete threw and IOException?",ioe);
+    }
+  }
+  
+  /**
+   * Corrupt the specified file.  Some random bytes within the file
+   * will be changed to some random values.
+   *
+   * @throw IllegalArgumentException if the given file is not a file
+   * @throw IOException if an IOException occurs while reading or writing the file
+   */
+  public static void corruptFile(File file) throws IOException {
+    if (!file.isFile()) {
+      throw new IllegalArgumentException(
+        "Given argument is not a file:" + file);
+    }
+    RandomAccessFile raf = new RandomAccessFile(file,"rws");
+    Random random = new Random();
+    for (long i = 0; i < raf.length(); i++) {
+      raf.seek(i);
+      if (random.nextBoolean()) {
+        raf.writeByte(random.nextInt());
+      }
+    }
+    raf.close();
+  }
+  
+  /**
+   * Retrieve the current NamespaceInfo object from a running Namenode.
+   */
+  public static NamespaceInfo getCurrentNamespaceInfo() throws IOException {
+    if (isNodeRunning(NAME_NODE))
+      return namenode.versionRequest();
+    return null;
+  }
+  
+  /**
+   * Return the layout version inherent in the current version
+   * of the Namenode, whether it is running or not.
+   */
+  public static int getCurrentLayoutVersion() {
+    return FSConstants.LAYOUT_VERSION;
+  }
+  
+  /**
+   * Return the namespace ID inherent in the currently running
+   * Namenode.  If no Namenode is running, return the namespace ID of
+   * the master Namenode storage directory.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   */
+  public static int getCurrentNamespaceID() throws IOException {
+    if (isNodeRunning(NAME_NODE)) {
+      return namenode.versionRequest().getNamespaceID();
+    }
+    return namenodeStorageNamespaceID;
+  }
+  
+  /**
+   * Return the File System State Creation Timestamp (FSSCTime) inherent
+   * in the currently running Namenode.  If no Namenode is running,
+   * return the FSSCTime of the master Namenode storage directory.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   */
+  public static long getCurrentFsscTime() throws IOException {
+    if (isNodeRunning(NAME_NODE)) {
+      return namenode.versionRequest().getCTime();
+    }
+    return namenodeStorageFsscTime;
+  }
+  
+  /**********************************************************************
+   ********************* PRIVATE METHODS ********************************
+   *********************************************************************/
+  
+  /**
+   * Populates the given directory with valid version, edits, and fsimage
+   * files.  The version file will contain the current layout version.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   *
+   * @throw IllegalArgumentException if dir does not already exist
+   */
+  private static void populateDir(NodeType nodeType, File dir) throws Exception {
+    if (!dir.exists()) {
+      throw new IllegalArgumentException(
+        "Given argument is not an existing directory:" + dir);
+    }
+    LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+    switch (nodeType) {
+      case NAME_NODE:
+        localFS.copyToLocalFile(
+          new Path(namenodeStorage.toString(), "current"),
+          new Path(dir.toString()),
+          false);
+        break;
+      case DATA_NODE:
+        localFS.copyToLocalFile(
+          new Path(datanodeStorage.toString(), "current"),
+          new Path(dir.toString()),
+          false);
+        break;
+    }
+  }
+  
+  static void writeFile(FileSystem fs,
+    Path path,
+    byte[] buffer,
+    int bufferSize ) throws IOException {
+    OutputStream out;
+    out = fs.create(path, true, bufferSize, (short) 1, 1024);
+    out.write( buffer, 0, bufferSize );
+    out.close();
+  }
+  
+  /**
+   * Creates a singleton master populated storage
+   * directory for a Namenode (contains edits, fsimage,
+   * version, and time files) and a Datanode (contains version and
+   * block files).  This can be a lengthy operation.
+   *
+   * @param conf must not be null.  These properties will be set:
+   *    fs.default.name
+   *    dfs.name.dir
+   *    dfs.data.dir
+   */
+  private static void initializeStorage() throws Exception {
+    Configuration config = new Configuration();
+    config.set("fs.default.name",NAMENODE_HOST);
+    config.set("dfs.name.dir", namenodeStorage.toString());
+    config.set("dfs.data.dir", datanodeStorage.toString());
+
+    try {
+      // format data-node
+      createEmptyDirs(new String[] {datanodeStorage.toString()});
+
+      // format name-node
+      NameNode.format(config);
+      
+      // start name-node
+      startCluster(NAME_NODE, null, config);
+      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
+      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
+      
+      // start data-node
+      startCluster(DATA_NODE, null, config);
+      
+      FileSystem fs = FileSystem.get(config);
+      Path baseDir = new Path("/TestUpgrade");
+      fs.mkdirs( baseDir );
+      
+      // write some files
+      int bufferSize = 4096;
+      byte[] buffer = new byte[bufferSize];
+      for( int i=0; i < bufferSize; i++ )
+        buffer[i] = (byte)('0' + i % 50);
+      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
+      
+      // save image
+      namenode.getFSImage().saveFSImage();
+      namenode.getFSImage().getEditLog().open();
+      
+      // write more files
+      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
+    } finally {
+      // shutdown
+      stopCluster(null);
+      remove(new File(namenodeStorage,"in_use.lock"));
+      remove(new File(datanodeStorage,"in_use.lock"));
+    }
+    namenodeStorageChecksum = checksumContents(
+      NAME_NODE, new File(namenodeStorage,"current"));
+    datanodeStorageChecksum = checksumContents(
+      DATA_NODE, new File(datanodeStorage,"current"));
+  }
+
+}
+