You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/10/31 23:11:21 UTC

svn commit: r469673 - in /lucene/hadoop/trunk: ./ conf/ src/java/org/apache/hadoop/dfs/ src/test/org/apache/hadoop/dfs/

Author: cutting
Date: Tue Oct 31 14:11:20 2006
New Revision: 469673

URL: http://svn.apache.org/viewvc?view=rev&rev=469673
Log:
HADOOP-90.  Permit dfs.name.dir to list multiple directories where namenode data is to be replicated.  Contributed by Milind.

Modified:
    lucene/hadoop/trunk/CHANGES.txt
    lucene/hadoop/trunk/conf/hadoop-default.xml
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
    lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Tue Oct 31 14:11:20 2006
@@ -94,6 +94,9 @@
 25. HADOOP-482.  Fix unit tests to work when a cluster is running on
     the same machine, removing port conflicts.  (Wendy Chien via cutting)
 
+26. HADOOP-90.  Permit dfs.name.dir to list multiple directories,
+    where namenode data is to be replicated. (Milind Bhandarkar via cutting)
+
 
 Release 0.7.2 - 2006-10-18
 

Modified: lucene/hadoop/trunk/conf/hadoop-default.xml
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/conf/hadoop-default.xml?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/conf/hadoop-default.xml (original)
+++ lucene/hadoop/trunk/conf/hadoop-default.xml Tue Oct 31 14:11:20 2006
@@ -176,7 +176,9 @@
   <name>dfs.name.dir</name>
   <value>${hadoop.tmp.dir}/dfs/name</value>
   <description>Determines where on the local filesystem the DFS name node
-      should store the name table.</description>
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
 </property>
 
 <property>

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSDirectory.java Tue Oct 31 14:11:20 2006
@@ -312,8 +312,8 @@
     private int numFilesDeleted = 0;
     
     /** Access an existing dfs name directory. */
-    public FSDirectory(File dir) throws IOException {
-      this.fsImage = new FSImage( dir );
+    public FSDirectory(File[] dirs) throws IOException {
+      this.fsImage = new FSImage( dirs );
     }
     
     void loadFSImage( Configuration conf ) throws IOException {
@@ -326,11 +326,13 @@
       metricsRecord = Metrics.createRecord("dfs", "namenode");
     }
 
-    /** Create a new dfs name directory.  Caution: this destroys all files
+    /** Create new dfs name directories.  Caution: this destroys all files
      * in this filesystem.
-     * @deprecated use @link FSImage#format(File, Configuration) instead */
-    public static void format(File dir, Configuration conf) throws IOException {
-      FSImage.format( dir, conf );
+     * @deprecated use @link FSImage#format(File[], Configuration) instead */
+    public static void format(File[] dirs, Configuration conf) throws IOException {
+      for (int idx = 0; idx < dirs.length; idx++) {
+        FSImage.format( dirs[idx] );
+      }
     }
     
     /**

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSEditLog.java Tue Oct 31 14:11:20 2006
@@ -21,9 +21,12 @@
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.File;
+import java.io.FileDescriptor;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.util.Iterator;
+import java.util.Vector;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.ArrayWritable;
@@ -44,15 +47,16 @@
   private static final byte OP_DATANODE_ADD = 5;
   private static final byte OP_DATANODE_REMOVE = 6;
   
-  private File editsFile;
-  DataOutputStream editsStream = null;
+  private File[] editFiles;
+  DataOutputStream[] editStreams = null;
+  FileDescriptor[] editDescriptors = null;
   
-  FSEditLog( File edits ) {
-    this.editsFile = edits;
+  FSEditLog( File[] edits ) {
+    this.editFiles = edits;
   }
   
-  File getEditsFile() {
-    return this.editsFile;
+  File[] getEditFiles() {
+    return this.editFiles;
   }
 
   /**
@@ -61,18 +65,62 @@
    * @throws IOException
    */
   void create() throws IOException {
-    editsStream = new DataOutputStream(new FileOutputStream(editsFile));
-    editsStream.writeInt( FSConstants.DFS_CURRENT_VERSION );
+    editStreams = new DataOutputStream[editFiles.length];
+    editDescriptors = new FileDescriptor[editFiles.length];
+    for (int idx = 0; idx < editStreams.length; idx++) {
+      FileOutputStream stream = new FileOutputStream(editFiles[idx]);
+      editStreams[idx] = new DataOutputStream(stream);
+      editDescriptors[idx] = stream.getFD();
+      editStreams[idx].writeInt( FSConstants.DFS_CURRENT_VERSION );
+    }
   }
   
   /**
    * Shutdown the filestore
    */
   void close() throws IOException {
-    editsStream.close();
+    for (int idx = 0; idx < editStreams.length; idx++) {
+      editStreams[idx].flush();
+      editDescriptors[idx].sync();
+      editStreams[idx].close();
+    }
   }
 
   /**
+   * Delete specified editLog
+   */
+  void delete(int idx) throws IOException {
+    if (editStreams != null) {
+      editStreams[idx].close();
+    }
+    editFiles[idx].delete();
+  }
+  
+  /**
+   * Delete all editLogs
+   */
+  void deleteAll() throws IOException {
+    for (int idx = 0; idx < editFiles.length; idx++ ) {
+      if (editStreams != null) {
+        editStreams[idx].close();
+      }
+      editFiles[idx].delete();
+    }
+  }
+  
+  /**
+   * check if ANY edits log exists
+   */
+  boolean exists() throws IOException {
+    for (int idx = 0; idx < editFiles.length; idx++) {
+      if (editFiles[idx].exists()) { 
+        return true;
+      }
+    }
+    return false;
+  }
+  
+  /**
    * Load an edit log, and apply the changes to the in-memory structure
    *
    * This is where we apply edits that we've been writing to disk all
@@ -84,10 +132,29 @@
     int numEdits = 0;
     int logVersion = 0;
     
-    if (editsFile.exists()) {
+    // first check how many editFiles exist
+    // and choose the largest editFile, because it is the most recent
+    Vector<File> files = new Vector<File>();
+    for (int idx = 0; idx < editFiles.length; idx++) {
+      if (editFiles[idx].exists()) {
+        files.add(editFiles[idx]);
+      }
+    }
+    long maxLength = Long.MIN_VALUE;
+    File edits = null;
+    for (Iterator<File> it = files.iterator(); it.hasNext();) {
+      File f = it.next();
+      long length = f.length();
+      if (length > maxLength) {
+        maxLength = length;
+        edits = f;
+      }
+    }
+    
+    if (edits != null) {
       DataInputStream in = new DataInputStream(
           new BufferedInputStream(
-              new FileInputStream(editsFile)));
+              new FileInputStream(edits)));
       // Read log file version. Could be missing. 
       in.mark( 4 );
       if( in.available() > 0 ) {
@@ -228,17 +295,21 @@
    * Write an operation to the edit log
    */
   void logEdit(byte op, Writable w1, Writable w2) {
-    synchronized (editsStream) {
-      try {
-        editsStream.write(op);
-        if (w1 != null) {
-          w1.write(editsStream);
-        }
-        if (w2 != null) {
-          w2.write(editsStream);
+    for (int idx = 0; idx < editStreams.length; idx++) {
+      synchronized (editStreams[idx]) {
+        try {
+          editStreams[idx].write(op);
+          if (w1 != null) {
+            w1.write(editStreams[idx]);
+          }
+          if (w2 != null) {
+            w2.write(editStreams[idx]);
+          }
+          editStreams[idx].flush();
+          editDescriptors[idx].sync();
+        } catch (IOException ie) {
+          // TODO: Must report an error here
         }
-      } catch (IOException ie) {
-        // TODO: Must report an error here
       }
     }
     // TODO: initialize checkpointing if the log is large enough

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSImage.java Tue Oct 31 14:11:20 2006
@@ -43,20 +43,27 @@
   private static final String FS_IMAGE = "fsimage";
   private static final String NEW_FS_IMAGE = "fsimage.new";
   private static final String OLD_FS_IMAGE = "fsimage.old";
+  private static final String FS_TIME = "fstime";
 
-  private File imageDir;  /// directory that contains the image file 
+  private File[] imageDirs;  /// directories that contains the image file 
   private FSEditLog editLog;
   // private int namespaceID = 0;    /// a persistent attribute of the namespace
 
   /**
    * 
    */
-  FSImage( File fsDir ) throws IOException {
-    this.imageDir = new File(fsDir, "image");
-    if (! imageDir.exists()) {
-      throw new IOException("NameNode not formatted: " + fsDir);
+  FSImage( File[] fsDirs ) throws IOException {
+    this.imageDirs = new File[fsDirs.length];
+    for (int idx = 0; idx < imageDirs.length; idx++) {
+      imageDirs[idx] = new File(fsDirs[idx], "image");
+      if (! imageDirs[idx].exists()) {
+        throw new IOException("NameNode not formatted: " + imageDirs[idx]);
+      }
+    }
+    File[] edits = new File[fsDirs.length];
+    for (int idx = 0; idx < edits.length; idx++) {
+      edits[idx] = new File(fsDirs[idx], "edits");
     }
-    File edits = new File(fsDir, "edits");
     this.editLog = new FSEditLog( edits );
   }
   
@@ -72,27 +79,52 @@
   void loadFSImage( Configuration conf ) throws IOException {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
-    File edits = editLog.getEditsFile();
-    //
-    // Atomic move sequence, to recover from interrupted save
-    //
-    File curFile = new File(imageDir, FS_IMAGE);
-    File newFile = new File(imageDir, NEW_FS_IMAGE);
-    File oldFile = new File(imageDir, OLD_FS_IMAGE);
-
-    // Maybe we were interrupted between 2 and 4
-    if (oldFile.exists() && curFile.exists()) {
-      oldFile.delete();
-      if (edits.exists()) {
-        edits.delete();
+    for (int idx = 0; idx < imageDirs.length; idx++) {
+      //
+      // Atomic move sequence, to recover from interrupted save
+      //
+      File curFile = new File(imageDirs[idx], FS_IMAGE);
+      File newFile = new File(imageDirs[idx], NEW_FS_IMAGE);
+      File oldFile = new File(imageDirs[idx], OLD_FS_IMAGE);
+
+      // Maybe we were interrupted between 2 and 4
+      if (oldFile.exists() && curFile.exists()) {
+        oldFile.delete();
+        if (editLog.exists()) {
+          editLog.deleteAll();
+        }
+      } else if (oldFile.exists() && newFile.exists()) {
+        // Or maybe between 1 and 2
+        newFile.renameTo(curFile);
+        oldFile.delete();
+      } else if (curFile.exists() && newFile.exists()) {
+        // Or else before stage 1, in which case we lose the edits
+        newFile.delete();
+      }
+    }
+    
+    // Now check all curFiles and see which is the newest
+    File curFile = null;
+    long maxTimeStamp = 0;
+    for (int idx = 0; idx < imageDirs.length; idx++) {
+      File file = new File(imageDirs[idx], FS_IMAGE);
+      if (file.exists()) {
+        long timeStamp = 0;
+        File timeFile = new File(imageDirs[idx], FS_TIME);
+        if (timeFile.exists() && timeFile.canRead()) {
+          DataInputStream in = new DataInputStream(
+              new FileInputStream(timeFile));
+          try {
+            timeStamp = in.readLong();
+          } finally {
+            in.close();
+          }
+        }
+        if (maxTimeStamp < timeStamp) {
+          maxTimeStamp = timeStamp;
+          curFile = file;
+        }
       }
-    } else if (oldFile.exists() && newFile.exists()) {
-      // Or maybe between 1 and 2
-      newFile.renameTo(curFile);
-      oldFile.delete();
-    } else if (curFile.exists() && newFile.exists()) {
-      // Or else before stage 1, in which case we lose the edits
-      newFile.delete();
     }
 
     //
@@ -100,7 +132,7 @@
     //
     boolean needToSave = true;
     int imgVersion = FSConstants.DFS_CURRENT_VERSION;
-    if (curFile.exists()) {
+    if (curFile != null) {
       DataInputStream in = new DataInputStream(
                               new BufferedInputStream(
                                   new FileInputStream(curFile)));
@@ -156,7 +188,7 @@
     if( fsDir.namespaceID == 0 )
       fsDir.namespaceID = newNamespaceID();
     
-    needToSave |= ( edits.exists() && editLog.loadFSEdits(conf) > 0 );
+    needToSave |= ( editLog.exists() && editLog.loadFSEdits(conf) > 0 );
     if( needToSave )
       saveFSImage();
   }
@@ -167,35 +199,51 @@
   void saveFSImage() throws IOException {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
-    File curFile = new File(imageDir, FS_IMAGE);
-    File newFile = new File(imageDir, NEW_FS_IMAGE);
-    File oldFile = new File(imageDir, OLD_FS_IMAGE);
-    
-    //
-    // Write out data
-    //
-    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(newFile)));
-    try {
-      out.writeInt(FSConstants.DFS_CURRENT_VERSION);
-      out.writeInt(fsDir.namespaceID);
-      out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
-      saveImage( "", fsDir.rootDir, out );
-      saveDatanodes( out );
-    } finally {
-      out.close();
+    for (int idx = 0; idx < imageDirs.length; idx++) {
+      File newFile = new File(imageDirs[idx], NEW_FS_IMAGE);
+      
+      //
+      // Write out data
+      //
+      DataOutputStream out = new DataOutputStream(
+            new BufferedOutputStream(
+            new FileOutputStream(newFile)));
+      try {
+        out.writeInt(FSConstants.DFS_CURRENT_VERSION);
+        out.writeInt(fsDir.namespaceID);
+        out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
+        saveImage( "", fsDir.rootDir, out );
+        saveDatanodes( out );
+      } finally {
+        out.close();
+      }
     }
     
     //
     // Atomic move sequence
     //
-    // 1.  Move cur to old
-    curFile.renameTo(oldFile);
-    // 2.  Move new to cur
-    newFile.renameTo(curFile);
-    // 3.  Remove pending-edits file (it's been integrated with newFile)
-    editLog.getEditsFile().delete();
-    // 4.  Delete old
-    oldFile.delete();
+    for (int idx = 0; idx < imageDirs.length; idx++) {
+      File curFile = new File(imageDirs[idx], FS_IMAGE);
+      File newFile = new File(imageDirs[idx], NEW_FS_IMAGE);
+      File oldFile = new File(imageDirs[idx], OLD_FS_IMAGE);
+      File timeFile = new File(imageDirs[idx], FS_TIME);
+      // 1.  Move cur to old and delete timeStamp
+      curFile.renameTo(oldFile);
+      if (timeFile.exists()) { timeFile.delete(); }
+      // 2.  Move new to cur and write timestamp
+      newFile.renameTo(curFile);
+      DataOutputStream out = new DataOutputStream(
+            new FileOutputStream(timeFile));
+      try {
+        out.writeLong(System.currentTimeMillis());
+      } finally {
+        out.close();
+      }
+      // 3.  Remove pending-edits file (it's been integrated with newFile)
+      editLog.delete(idx);
+      // 4.  Delete old
+      oldFile.delete();
+    }
   }
 
   /**
@@ -219,9 +267,9 @@
     return newID;
   }
   
-  /** Create a new dfs name directory.  Caution: this destroys all files
+  /** Create new dfs name directory.  Caution: this destroys all files
    * in this filesystem. */
-  static void format(File dir, Configuration conf) throws IOException {
+  static void format(File dir) throws IOException {
     File image = new File(dir, "image");
     File edits = new File(dir, "edits");
     

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Tue Oct 31 14:11:20 2006
@@ -196,10 +196,10 @@
     private SafeModeInfo safeMode;  // safe mode information
 
     /**
-     * dir is where the filesystem directory state 
+     * dirs is a list oif directories where the filesystem directory state 
      * is stored
      */
-    public FSNamesystem(File dir, NameNode nn, Configuration conf) throws IOException {
+    public FSNamesystem(File[] dirs, NameNode nn, Configuration conf) throws IOException {
         fsNamesystemObject = this;
         InetSocketAddress addr = DataNode.createSocketAddr(conf.get("fs.default.name", "local"));
         this.maxReplication = conf.getInt("dfs.replication.max", 512);
@@ -224,7 +224,7 @@
 
         this.localMachine = addr.getHostName();
         this.port = addr.getPort();
-        this.dir = new FSDirectory(dir);
+        this.dir = new FSDirectory(dirs);
         this.dir.loadFSImage( conf );
         this.safeMode = new SafeModeInfo( conf );
         setBlockTotal();

Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue Oct 31 14:11:20 2006
@@ -82,7 +82,16 @@
     /** Format a new filesystem.  Destroys any filesystem that may already
      * exist at this location.  **/
     public static void format(Configuration conf) throws IOException {
-      FSDirectory.format(getDir(conf), conf);
+      File[] dirs = getDirs(conf);
+      for (int idx = 0; idx < dirs.length; idx++) {
+        FSImage.format(dirs[idx]);
+      }
+    }
+
+    /** Format a new filesystem.  Destroys any filesystem that may already
+     * exist at this location.  **/
+    public static void format(File dir) throws IOException {
+      FSImage.format(dir);
     }
 
     private class NameNodeMetrics {
@@ -121,24 +130,30 @@
      * Create a NameNode at the default location
      */
     public NameNode(Configuration conf) throws IOException {
-       this(getDir(conf),DataNode.createSocketAddr(conf.get("fs.default.name", "local")).getHostName(),
+       this(getDirs(conf),DataNode.createSocketAddr(conf.get("fs.default.name", "local")).getHostName(),
                        DataNode.createSocketAddr(conf.get("fs.default.name", "local")).getPort(), conf);
     }
 
     /**
      * Create a NameNode at the specified location and start it.
      */
-    public NameNode(File dir, String bindAddress, int port, Configuration conf) throws IOException {
-        this.namesystem = new FSNamesystem(dir, this, conf);
+    public NameNode(File[] dirs, String bindAddress, int port, Configuration conf) throws IOException {
+        this.namesystem = new FSNamesystem(dirs, this, conf);
         this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
         this.server = RPC.getServer(this, bindAddress, port, handlerCount, false, conf);
         this.server.start();
         myMetrics = new NameNodeMetrics();
     }
 
-    /** Return the configured directory where name data is stored. */
-    private static File getDir(Configuration conf) {
-      return new File(conf.get("dfs.name.dir", "/tmp/hadoop/dfs/name"));
+    /** Return the configured directories where name data is stored. */
+    private static File[] getDirs(Configuration conf) {
+      String[] dirNames = conf.getStrings("dfs.name.dir");
+      if (dirNames == null) { dirNames = new String[] {"/tmp/hadoop/dfs/name"}; }
+      File[] dirs = new File[dirNames.length];
+      for (int idx = 0; idx < dirs.length; idx++) {
+        dirs[idx] = new File(dirNames[idx]);
+      }
+      return dirs;
     }
 
     /**
@@ -556,17 +571,22 @@
         Configuration conf = new Configuration();
 
         if (argv.length == 1 && argv[0].equals("-format")) {
-          File dir = getDir(conf);
-          if (dir.exists()) {
-            System.err.print("Re-format filesystem in " + dir +" ? (Y or N) ");
-            if (!(System.in.read() == 'Y')) {
-              System.err.println("Format aborted.");
-              System.exit(1);
+          boolean aborted = false;
+          File[] dirs = getDirs(conf);
+          for (int idx = 0; idx < dirs.length; idx++) {
+            if (dirs[idx].exists()) {
+              System.err.print("Re-format filesystem in " + dirs[idx] +" ? (Y or N) ");
+              if (!(System.in.read() == 'Y')) {
+                System.err.println("Format aborted in "+ dirs[idx]);
+                aborted = true;
+              } else {
+                format(dirs[idx]);
+                System.err.println("Formatted "+dirs[idx]);
+              }
+              System.in.read(); // discard the enter-key
             }
           }
-          format(conf);
-          System.err.println("Formatted "+dir);
-          System.exit(0);
+          System.exit(aborted ? 1 : 0);
         }
         
         NameNode namenode = new NameNode(conf);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFS.java Tue Oct 31 14:11:20 2006
@@ -219,7 +219,8 @@
 
     int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
     String nameNodeSocketAddr = "localhost:" + nameNodePort;
-    NameNode nameNodeDaemon = new NameNode(new File(nameFSDir), "localhost", nameNodePort, conf);
+    NameNode nameNodeDaemon = new NameNode(new File[] { new File(nameFSDir) },
+        "localhost", nameNodePort, conf);
     DFSClient dfsClient = null;
     try {
       //

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java Tue Oct 31 14:11:20 2006
@@ -352,7 +352,8 @@
 	
     NameNode.format(conf);
     
-    nameNodeDaemon = new NameNode(new File(nameFSDir), "localhost", nameNodePort, conf);
+    nameNodeDaemon = new NameNode(new File[] { new File(nameFSDir) },
+        "localhost", nameNodePort, conf);
 
      //
       //        start DataNodes

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java?view=diff&rev=469673&r1=469672&r2=469673
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/MiniDFSCluster.java Tue Oct 31 14:11:20 2006
@@ -135,7 +135,8 @@
     this.nameNodeInfoPort = 50080;   // We just want this port to be different from the default. 
     File base_dir = new File(System.getProperty("test.build.data"),
                              "dfs/");
-    conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
+    conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
+        new File(base_dir, "name2").getPath());
     conf.set("dfs.data.dir", new File(base_dir, "data1").getPath()+","+
         new File(base_dir, "data2").getPath());
     conf.setInt("dfs.replication", 1);