You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2006/02/03 21:03:01 UTC

svn commit: r374735 - in /lucene/hadoop/trunk/src/test: ./ org/ org/apache/ org/apache/hadoop/ org/apache/hadoop/fs/ org/apache/hadoop/io/ org/apache/hadoop/ipc/ org/apache/hadoop/mapred/ org/apache/hadoop/ndfs/

Author: cutting
Date: Fri Feb  3 12:02:58 2006
New Revision: 374735

URL: http://svn.apache.org/viewcvs?rev=374735&view=rev
Log:
Initial commit of code copied from Nutch.

Added:
    lucene/hadoop/trunk/src/test/
    lucene/hadoop/trunk/src/test/org/
    lucene/hadoop/trunk/src/test/org/apache/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/
      - copied from r374710, lucene/nutch/trunk/src/test/org/apache/nutch/fs/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/
      - copied from r374710, lucene/nutch/trunk/src/test/org/apache/nutch/io/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/
      - copied from r374710, lucene/nutch/trunk/src/test/org/apache/nutch/ipc/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/
      - copied from r374710, lucene/nutch/trunk/src/test/org/apache/nutch/mapred/
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/
      - copied from r374710, lucene/nutch/trunk/src/test/org/apache/nutch/ndfs/
Modified:
    lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestNutchFileSystem.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/RandomDatum.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestUTF8.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MapredLoadTest.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java
    lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/TestNDFS.java

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestNutchFileSystem.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestNutchFileSystem.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestNutchFileSystem.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestNutchFileSystem.java Fri Feb  3 12:02:58 2006
@@ -14,24 +14,24 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.fs;
+package org.apache.hadoop.fs.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.mapred.*;
-import org.apache.nutch.mapred.lib.*;
-import org.apache.nutch.io.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.mapred.*;
+import org.apache.hadoop.mapred.lib.*;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.conf.*;
 
 public class TestNutchFileSystem extends TestCase {
   private static final Logger LOG = InputFormatBase.LOG;
 
-  private static NutchConf nutchConf = new NutchConf();
-  private static int BUFFER_SIZE = nutchConf.getInt("io.file.buffer.size", 4096);
+  private static Configuration conf = new Configuration();
+  private static int BUFFER_SIZE = conf.getInt("io.file.buffer.size", 4096);
 
   private static final long MEGA = 1024 * 1024;
   private static final int SEEKS_PER_FILE = 4;
@@ -49,7 +49,7 @@
   public static void testFs(long megaBytes, int numFiles, long seed)
     throws Exception {
 
-    NutchFileSystem fs = NutchFileSystem.get(nutchConf);
+    NutchFileSystem fs = NutchFileSystem.get(conf);
 
     if (seed == 0)
       seed = new Random().nextLong();
@@ -99,7 +99,7 @@
     LOG.info("created control file for: "+totalSize+" bytes");
   }
 
-  public static class WriteMapper extends NutchConfigured implements Mapper {
+  public static class WriteMapper extends Configured implements Mapper {
     private Random random = new Random();
     private byte[] buffer = new byte[BUFFER_SIZE];
     private NutchFileSystem fs;
@@ -107,7 +107,7 @@
     
     {
       try {
-        fs = NutchFileSystem.get(nutchConf);
+        fs = NutchFileSystem.get(conf);
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
@@ -115,7 +115,7 @@
 
     public WriteMapper() { super(null); }
     
-    public WriteMapper(NutchConf conf) { super(conf); }
+    public WriteMapper(Configuration conf) { super(conf); }
 
     public void configure(JobConf job) {
       setConf(job);
@@ -164,7 +164,7 @@
     fs.delete(DATA_DIR);
     fs.delete(WRITE_DIR);
     
-    JobConf job = new JobConf(nutchConf);
+    JobConf job = new JobConf(conf);
     job.setBoolean("fs.test.fastCheck", fastCheck);
 
     job.setInputDir(CONTROL_DIR);
@@ -182,7 +182,7 @@
     JobClient.runJob(job);
   }
 
-  public static class ReadMapper extends NutchConfigured implements Mapper {
+  public static class ReadMapper extends Configured implements Mapper {
     private Random random = new Random();
     private byte[] buffer = new byte[BUFFER_SIZE];
     private byte[] check  = new byte[BUFFER_SIZE];
@@ -191,7 +191,7 @@
 
     {
       try {
-        fs = NutchFileSystem.get(nutchConf);
+        fs = NutchFileSystem.get(conf);
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
@@ -199,7 +199,7 @@
 
     public ReadMapper() { super(null); }
     
-    public ReadMapper(NutchConf conf) { super(conf); }
+    public ReadMapper(Configuration conf) { super(conf); }
 
     public void configure(JobConf job) {
       setConf(job);
@@ -255,7 +255,7 @@
 
     fs.delete(READ_DIR);
 
-    JobConf job = new JobConf(nutchConf);
+    JobConf job = new JobConf(conf);
     job.setBoolean("fs.test.fastCheck", fastCheck);
 
 
@@ -275,7 +275,7 @@
   }
 
 
-  public static class SeekMapper extends NutchConfigured implements Mapper {
+  public static class SeekMapper extends Configured implements Mapper {
     private Random random = new Random();
     private byte[] check  = new byte[BUFFER_SIZE];
     private NutchFileSystem fs;
@@ -283,7 +283,7 @@
 
     {
       try {
-        fs = NutchFileSystem.get(nutchConf);
+        fs = NutchFileSystem.get(conf);
       } catch (IOException e) {
         throw new RuntimeException(e);
       }
@@ -291,7 +291,7 @@
 
     public SeekMapper() { super(null); }
     
-    public SeekMapper(NutchConf conf) { super(conf); }
+    public SeekMapper(Configuration conf) { super(conf); }
 
     public void configure(JobConf job) {
       setConf(job);
@@ -347,7 +347,7 @@
 
     fs.delete(READ_DIR);
 
-    JobConf job = new JobConf(nutchConf);
+    JobConf job = new JobConf(conf);
     job.setBoolean("fs.test.fastCheck", fastCheck);
 
     job.setInputDir(CONTROL_DIR);
@@ -401,7 +401,7 @@
     LOG.info("files = " + files);
     LOG.info("megaBytes = " + megaBytes);
   
-    NutchFileSystem fs = NutchFileSystem.get(nutchConf);
+    NutchFileSystem fs = NutchFileSystem.get(conf);
 
     if (!noWrite) {
       createControlFile(fs, megaBytes*MEGA, files, seed);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/RandomDatum.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/RandomDatum.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/RandomDatum.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/RandomDatum.java Fri Feb  3 12:02:58 2006
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.util.*;
 import java.io.*;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestArrayFile.java Fri Feb  3 12:02:58 2006
@@ -14,15 +14,15 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.conf.*;
 
 /** Support for flat files of binary key/value pairs. */
 public class TestArrayFile extends TestCase {
@@ -35,18 +35,18 @@
   }
 
   public void testArrayFile() throws Exception {
-      NutchConf nutchConf = new NutchConf();
-    NutchFileSystem nfs = new LocalFileSystem(nutchConf);
+      Configuration conf = new Configuration();
+    NutchFileSystem nfs = new LocalFileSystem(conf);
     RandomDatum[] data = generate(10000);
     writeTest(nfs, data, FILE);
-    readTest(nfs, data, FILE, nutchConf);
+    readTest(nfs, data, FILE, conf);
   }
 
   public void testEmptyFile() throws Exception {
-    NutchConf nutchConf = new NutchConf();
-    NutchFileSystem nfs = new LocalFileSystem(nutchConf);
+    Configuration conf = new Configuration();
+    NutchFileSystem nfs = new LocalFileSystem(conf);
     writeTest(nfs, new RandomDatum[0], FILE);
-    ArrayFile.Reader reader = new ArrayFile.Reader(nfs, FILE, nutchConf);
+    ArrayFile.Reader reader = new ArrayFile.Reader(nfs, FILE, conf);
     assertNull(reader.get(0, new RandomDatum()));
     reader.close();
   }
@@ -73,11 +73,11 @@
     writer.close();
   }
 
-  private static void readTest(NutchFileSystem nfs, RandomDatum[] data, String file, NutchConf nutchConf)
+  private static void readTest(NutchFileSystem nfs, RandomDatum[] data, String file, Configuration conf)
     throws IOException {
     RandomDatum v = new RandomDatum();
     LOG.fine("reading " + data.length + " records");
-    ArrayFile.Reader reader = new ArrayFile.Reader(nfs, file, nutchConf);
+    ArrayFile.Reader reader = new ArrayFile.Reader(nfs, file, conf);
     for (int i = 0; i < data.length; i++) {       // try forwards
       reader.get(i, v);
       if (!v.equals(data[i])) {
@@ -101,16 +101,16 @@
     boolean create = true;
     boolean check = true;
     String file = FILE;
-    String usage = "Usage: TestArrayFile (-local | -ndfs <namenode:port>) [-count N] [-nocreate] [-nocheck] file";
+    String usage = "Usage: TestArrayFile (-local | -dfs <namenode:port>) [-count N] [-nocreate] [-nocheck] file";
       
     if (args.length == 0) {
       System.err.println(usage);
       System.exit(-1);
     }
 
-    NutchConf nutchConf = new NutchConf();
+    Configuration conf = new Configuration();
     int i = 0;
-    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, nutchConf);
+    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, conf);
     try {
         for (; i < args.length; i++) {       // parse command line
             if (args[i] == null) {
@@ -141,7 +141,7 @@
         }
 
         if (check) {
-            readTest(nfs, data, file, nutchConf);
+            readTest(nfs, data, file, conf);
         }
     } finally {
         nfs.close();

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java Fri Feb  3 12:02:58 2006
@@ -14,9 +14,9 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
-import org.apache.nutch.io.TestWritable;
+import org.apache.hadoop.io.TestWritable;
 import junit.framework.TestCase;
 import java.security.MessageDigest;
 import java.util.Random;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java Fri Feb  3 12:02:58 2006
@@ -14,21 +14,22 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.conf.*;
+
 
 /** Support for flat files of binary key/value pairs. */
 public class TestSequenceFile extends TestCase {
   private static Logger LOG = SequenceFile.LOG;
 
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
   
   public TestSequenceFile(String name) { super(name); }
 
@@ -41,7 +42,7 @@
  
     int seed = new Random().nextInt();
 
-    NutchFileSystem nfs = new LocalFileSystem(new NutchConf());
+    NutchFileSystem nfs = new LocalFileSystem(new Configuration());
     try {
         //LOG.setLevel(Level.FINE);
         writeTest(nfs, count, seed, file, false);
@@ -87,7 +88,7 @@
     RandomDatum k = new RandomDatum();
     RandomDatum v = new RandomDatum();
     LOG.fine("reading " + count + " records");
-    SequenceFile.Reader reader = new SequenceFile.Reader(nfs, file, nutchConf);
+    SequenceFile.Reader reader = new SequenceFile.Reader(nfs, file, conf);
     RandomDatum.Generator generator = new RandomDatum.Generator(seed);
     for (int i = 0; i < count; i++) {
       generator.next();
@@ -131,7 +132,7 @@
     RandomDatum k = new RandomDatum();
     RandomDatum v = new RandomDatum();
     Iterator iterator = map.entrySet().iterator();
-    SequenceFile.Reader reader = new SequenceFile.Reader(nfs, file + ".sorted", nutchConf);
+    SequenceFile.Reader reader = new SequenceFile.Reader(nfs, file + ".sorted", conf);
     for (int i = 0; i < count; i++) {
       Map.Entry entry = (Map.Entry)iterator.next();
       RandomDatum key = (RandomDatum)entry.getKey();
@@ -197,8 +198,8 @@
                                                int megabytes, int factor) {
     SequenceFile.Sorter sorter = 
       fast
-      ? new SequenceFile.Sorter(nfs, new RandomDatum.Comparator(),RandomDatum.class, nutchConf)
-      : new SequenceFile.Sorter(nfs, RandomDatum.class, RandomDatum.class, nutchConf);
+      ? new SequenceFile.Sorter(nfs, new RandomDatum.Comparator(),RandomDatum.class, conf)
+      : new SequenceFile.Sorter(nfs, RandomDatum.class, RandomDatum.class, conf);
     sorter.setMemory(megabytes * 1024*1024);
     sorter.setFactor(factor);
     return sorter;
@@ -216,14 +217,14 @@
     boolean merge = false;
     boolean compress = false;
     String file = null;
-    String usage = "Usage: SequenceFile (-local | -ndfs <namenode:port>) [-count N] [-megabytes M] [-factor F] [-nocreate] [-check] [-fast] [-merge] [-compress] file";
+    String usage = "Usage: SequenceFile (-local | -dfs <namenode:port>) [-count N] [-megabytes M] [-factor F] [-nocreate] [-check] [-fast] [-merge] [-compress] file";
     
     if (args.length == 0) {
         System.err.println(usage);
         System.exit(-1);
     }
     int i = 0;
-    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, nutchConf);      
+    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, conf);      
     try {
       for (; i < args.length; i++) {       // parse command line
           if (args[i] == null) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSetFile.java Fri Feb  3 12:02:58 2006
@@ -14,15 +14,15 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.util.LogFormatter;
 
 /** Support for flat files of binary key/value pairs. */
 public class TestSetFile extends TestCase {
@@ -30,12 +30,12 @@
   private static String FILE =
     System.getProperty("test.build.data",".") + "/test.set";
 
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
   
   public TestSetFile(String name) { super(name); }
 
   public void testSetFile() throws Exception {
-    NutchFileSystem nfs = new LocalFileSystem(nutchConf);
+    NutchFileSystem nfs = new LocalFileSystem(conf);
     try {
         RandomDatum[] data = generate(10000);
         writeTest(nfs, data, FILE);
@@ -72,7 +72,7 @@
     throws IOException {
     RandomDatum v = new RandomDatum();
     LOG.fine("reading " + data.length + " records");
-    SetFile.Reader reader = new SetFile.Reader(nfs, file, nutchConf);
+    SetFile.Reader reader = new SetFile.Reader(nfs, file, conf);
     for (int i = 0; i < data.length; i++) {
       if (!reader.seek(data[i]))
         throw new RuntimeException("wrong value at " + i);
@@ -88,7 +88,7 @@
     boolean create = true;
     boolean check = true;
     String file = FILE;
-    String usage = "Usage: TestSetFile (-local | -ndfs <namenode:port>) [-count N] [-nocreate] [-nocheck] file";
+    String usage = "Usage: TestSetFile (-local | -dfs <namenode:port>) [-count N] [-nocreate] [-nocheck] file";
       
     if (args.length == 0) {
       System.err.println(usage);
@@ -96,7 +96,7 @@
     }
       
     int i = 0;
-    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, nutchConf);      
+    NutchFileSystem nfs = NutchFileSystem.parseArgs(args, i, conf);      
     try {
       for (; i < args.length; i++) {       // parse command line
         if (args[i] == null) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestUTF8.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestUTF8.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestUTF8.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestUTF8.java Fri Feb  3 12:02:58 2006
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import junit.framework.TestCase;
 import java.util.Random;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java Fri Feb  3 12:02:58 2006
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.io.*;
 import java.util.Random;

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestWritable.java Fri Feb  3 12:02:58 2006
@@ -14,14 +14,14 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.io;
+package org.apache.hadoop.io.
 
 import java.io.*;
 import java.util.Random;
 import junit.framework.TestCase;
-import org.apache.nutch.io.*;
+import org.apache.hadoop.io.*;
 import org.apache.nutch.parse.ParseData;
-import org.apache.nutch.util.NutchConf;
+import org.apache.hadoop.conf.Configuration;
 
 /** Unit tests for Writable. */
 public class TestWritable extends TestCase {
@@ -72,7 +72,7 @@
       Writable after = (Writable)before.getClass().newInstance();
       if(after instanceof ParseData) {
         ParseData parseData = (ParseData) after;
-        parseData.setConf(new NutchConf());
+        parseData.setConf(new Configuration());
       }
       after.readFields(dib);
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestIPC.java Fri Feb  3 12:02:58 2006
@@ -14,10 +14,10 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.ipc;
+package org.apache.hadoop.ipc.
 
-import org.apache.nutch.io.Writable;
-import org.apache.nutch.io.LongWritable;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.LongWritable;
 
 import java.util.Random;
 import java.io.IOException;
@@ -28,15 +28,15 @@
 import java.util.logging.Logger;
 import java.util.logging.Level;
 
-import org.apache.nutch.util.LogFormatter;
-import org.apache.nutch.util.NutchConf;
+import org.apache.hadoop.util.LogFormatter;
+import org.apache.hadoop.conf.Configuration;
 
 /** Unit tests for IPC. */
 public class TestIPC extends TestCase {
   public static final Logger LOG =
-    LogFormatter.getLogger("org.apache.nutch.ipc.TestIPC");
+    LogFormatter.getLogger("org.apache.hadoop.ipc.TestIPC");
 
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
   
   // quiet during testing, since output ends up on console
   static {
@@ -55,7 +55,7 @@
     private boolean sleep;
 
     public TestServer(int port, int handlerCount, boolean sleep) {
-      super(port, LongWritable.class, handlerCount, nutchConf);
+      super(port, LongWritable.class, handlerCount, conf);
       this.setTimeout(1000);
       this.sleep = sleep;
     }
@@ -148,7 +148,7 @@
 
     Client[] clients = new Client[clientCount];
     for (int i = 0; i < clientCount; i++) {
-      clients[i] = new Client(LongWritable.class, nutchConf);
+      clients[i] = new Client(LongWritable.class, conf);
     }
     
     SerialCaller[] callers = new SerialCaller[callerCount];
@@ -187,7 +187,7 @@
 
     Client[] clients = new Client[clientCount];
     for (int i = 0; i < clientCount; i++) {
-      clients[i] = new Client(LongWritable.class, nutchConf);
+      clients[i] = new Client(LongWritable.class, conf);
     }
     
     ParallelCaller[] callers = new ParallelCaller[callerCount];

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/ipc/TestRPC.java Fri Feb  3 12:02:58 2006
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.ipc;
+package org.apache.hadoop.ipc.
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -26,17 +26,17 @@
 import java.util.logging.Level;
 import java.util.Arrays;
 
-import org.apache.nutch.util.LogFormatter;
-import org.apache.nutch.util.NutchConf;
+import org.apache.hadoop.util.LogFormatter;
+import org.apache.hadoop.conf.Configuration;
 
 /** Unit tests for RPC. */
 public class TestRPC extends TestCase {
   private static final int PORT = 1234;
 
   public static final Logger LOG =
-    LogFormatter.getLogger("org.apache.nutch.ipc.TestRPC");
+    LogFormatter.getLogger("org.apache.hadoop.ipc.TestRPC");
   
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
 
   // quiet during testing, since output ends up on console
   static {
@@ -83,12 +83,12 @@
   }
 
   public void testCalls() throws Exception {
-    Server server = RPC.getServer(new TestImpl(), PORT, nutchConf);
+    Server server = RPC.getServer(new TestImpl(), PORT, conf);
     server.start();
 
     InetSocketAddress addr = new InetSocketAddress(PORT);
     TestProtocol proxy =
-      (TestProtocol)RPC.getProxy(TestProtocol.class, addr, nutchConf);
+      (TestProtocol)RPC.getProxy(TestProtocol.class, addr, conf);
     
     proxy.ping();
 
@@ -117,12 +117,12 @@
     Method echo =
       TestProtocol.class.getMethod("echo", new Class[] { String.class });
     String[] strings = (String[])RPC.call(echo, new String[][]{{"a"},{"b"}},
-                                         new InetSocketAddress[] {addr, addr}, nutchConf);
+                                         new InetSocketAddress[] {addr, addr}, conf);
     assertTrue(Arrays.equals(strings, new String[]{"a","b"}));
 
     Method ping = TestProtocol.class.getMethod("ping", new Class[] {});
     Object[] voids = (Object[])RPC.call(ping, new Object[][]{{},{}},
-                                        new InetSocketAddress[] {addr, addr}, nutchConf);
+                                        new InetSocketAddress[] {addr, addr}, conf);
     assertEquals(voids, null);
 
     server.stop();

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MapredLoadTest.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MapredLoadTest.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MapredLoadTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MapredLoadTest.java Fri Feb  3 12:02:58 2006
@@ -13,12 +13,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.nutch.mapred;
+package org.apache.hadoop.mapred.
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.io.*;
-import org.apache.nutch.util.*;
-import org.apache.nutch.mapred.lib.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.conf.*;
+import org.apache.hadoop.mapred.lib.*;
 
 import java.io.*;
 import java.util.*;
@@ -26,7 +26,7 @@
 
 /**********************************************************
  * MapredLoadTest generates a bunch of work that exercises
- * a Nutch Map-Reduce system (and NDFS, too).  It goes through
+ * a Nutch Map-Reduce system (and DFS, too).  It goes through
  * the following steps:
  *
  * 1) Take inputs 'range' and 'counts'.
@@ -113,15 +113,15 @@
     int range;
     int counts;
     Random r = new Random();
-    NutchConf nutchConf;
+    Configuration conf;
 
     /**
      * MapredLoadTest
      */
-    public MapredLoadTest(int range, int counts, NutchConf nutchConf) throws IOException {
+    public MapredLoadTest(int range, int counts, Configuration conf) throws IOException {
         this.range = range;
         this.counts = counts;
-        this.nutchConf = nutchConf;
+        this.conf = conf;
     }
 
     /**
@@ -145,7 +145,7 @@
         //
         // Write the answer key to a file.  
         //
-        NutchFileSystem fs = NutchFileSystem.get(nutchConf);
+        NutchFileSystem fs = NutchFileSystem.get(conf);
         File testdir = new File("mapred.loadtest");
         fs.mkdirs(testdir);
 
@@ -184,7 +184,7 @@
         fs.mkdirs(randomOuts);
 
 
-        JobConf genJob = new JobConf(nutchConf);
+        JobConf genJob = new JobConf(conf);
         genJob.setInputDir(randomIns);
         genJob.setInputKeyClass(IntWritable.class);
         genJob.setInputValueClass(IntWritable.class);
@@ -223,7 +223,7 @@
         //
         File finalOuts = new File(testdir, "finalouts");
         fs.mkdirs(finalOuts);
-        JobConf checkJob = new JobConf(nutchConf);
+        JobConf checkJob = new JobConf(conf);
         checkJob.setInputDir(randomOuts);
         checkJob.setInputKeyClass(LongWritable.class);
         checkJob.setInputValueClass(UTF8.class);
@@ -246,7 +246,7 @@
         //
         boolean success = true;
         File recomputedkey = new File(finalOuts, "part-00000");
-        SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey.getPath(), nutchConf);
+        SequenceFile.Reader in = new SequenceFile.Reader(fs, recomputedkey.getPath(), conf);
         int totalseen = 0;
         try {
             IntWritable key = new IntWritable();
@@ -311,7 +311,7 @@
         int range = Integer.parseInt(argv[i++]);
         int counts = Integer.parseInt(argv[i++]);
 
-        MapredLoadTest mlt = new MapredLoadTest(range, counts, new NutchConf());
+        MapredLoadTest mlt = new MapredLoadTest(range, counts, new Configuration());
         mlt.launch();
     }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestSequenceFileInputFormat.java Fri Feb  3 12:02:58 2006
@@ -14,26 +14,26 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.mapred;
+package org.apache.hadoop.mapred.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.io.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.conf.*;
 
 public class TestSequenceFileInputFormat extends TestCase {
   private static final Logger LOG = InputFormatBase.LOG;
 
   private static int MAX_LENGTH = 10000;
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
 
   public void testFormat() throws Exception {
-    JobConf job = new JobConf(nutchConf);
-    NutchFileSystem fs = NutchFileSystem.getNamed("local", nutchConf);
+    JobConf job = new JobConf(conf);
+    NutchFileSystem fs = NutchFileSystem.getNamed("local", conf);
     File dir = new File(System.getProperty("test.build.data",".") + "/mapred");
     File file = new File(dir, "test.seq");
     

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestTextInputFormat.java Fri Feb  3 12:02:58 2006
@@ -14,26 +14,26 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.mapred;
+package org.apache.hadoop.mapred.
 
 import java.io.*;
 import java.util.*;
 import junit.framework.TestCase;
 import java.util.logging.*;
 
-import org.apache.nutch.fs.*;
-import org.apache.nutch.io.*;
-import org.apache.nutch.util.*;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.io.*;
+import org.apache.hadoop.conf.*;
 
 public class TestTextInputFormat extends TestCase {
   private static final Logger LOG = InputFormatBase.LOG;
 
   private static int MAX_LENGTH = 10000;
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
   
   public void testFormat() throws Exception {
-    JobConf job = new JobConf(nutchConf);
-    NutchFileSystem fs = NutchFileSystem.getNamed("local", nutchConf);
+    JobConf job = new JobConf(conf);
+    NutchFileSystem fs = NutchFileSystem.getNamed("local", conf);
     File dir = new File(System.getProperty("test.build.data",".") + "/mapred");
     File file = new File(dir, "test.txt");
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/TestNDFS.java
URL: http://svn.apache.org/viewcvs/lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/TestNDFS.java?rev=374735&r1=374710&r2=374735&view=diff
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/TestNDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/ndfs/TestNDFS.java Fri Feb  3 12:02:58 2006
@@ -14,16 +14,16 @@
  * limitations under the License.
  */
 
-package org.apache.nutch.ndfs;
+package org.apache.hadoop.dfs.
 
 import junit.framework.TestCase;
 import junit.framework.AssertionFailedError;
-import org.apache.nutch.fs.NFSInputStream;
-import org.apache.nutch.fs.NFSOutputStream;
-import org.apache.nutch.fs.FileUtil;
-import org.apache.nutch.io.UTF8;
-import org.apache.nutch.util.LogFormatter;
-import org.apache.nutch.util.NutchConf;
+import org.apache.hadoop.fs.NFSInputStream;
+import org.apache.hadoop.fs.NFSOutputStream;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.util.LogFormatter;
+import org.apache.hadoop.conf.Configuration;
 
 import java.io.File;
 import java.io.FilenameFilter;
@@ -36,8 +36,8 @@
 import java.lang.reflect.InvocationTargetException;
 
 /**
- * Test NDFS.
- * TestNDFS is a JUnit test for NDFS using "pseudo multiprocessing" (or 
+ * Test DFS.
+ * TestDFS is a JUnit test for DFS using "pseudo multiprocessing" (or 
  more strictly, pseudo distributed) meaning all daemons run in one process 
  and sockets are used to communicate between daemons.  The test permutes
  * various block sizes, number of files, file sizes, and number of
@@ -51,7 +51,7 @@
  * less testing of permutations.  The ceiling of useful permutation is
  * TEST_PERMUTATION_MAX_CEILING.
  * <p>
- * NDFSClient emits many messages that can be ignored like:
+ * DFSClient emits many messages that can be ignored like:
  * "Failed to connect to *:7000:java.net.ConnectException: Connection refused: connect"
  * because a datanode is forced to close during testing.
  * <p>
@@ -65,21 +65,21 @@
  * <p>Bring the namenode down and restart it to verify that datanodes reconnect.
  * <p>
  * <p>For a another approach to filesystem testing, see the high level
- * (NutchFS level) test {@link org.apache.nutch.fs.TestNutchFileSystem}.
+ * (NutchFS level) test {@link org.apache.hadoop.fs.TestNutchFileSystem}.
  * @author Paul Baclace
  */
-public class TestNDFS extends TestCase implements FSConstants {
+public class TestDFS extends TestCase implements FSConstants {
   private static final Logger LOG =
-      LogFormatter.getLogger("org.apache.nutch.ndfs.TestNDFS");
+      LogFormatter.getLogger("org.apache.hadoop.dfs.TestDFS");
 
-  private static NutchConf nutchConf = new NutchConf();
+  private static Configuration conf = new Configuration();
   private static int BUFFER_SIZE =
-      nutchConf.getInt("io.file.buffer.size", 4096);
+      conf.getInt("io.file.buffer.size", 4096);
 
   private static int testCycleNumber = 0;
 
   /**
-   * all NDFS test files go under this base directory
+   * all DFS test files go under this base directory
    */
   private static String baseDirSpecified;
 
@@ -88,18 +88,18 @@
    */
   private static File baseDir;
 
-  /** NDFS block sizes to permute over in multiple test cycles
+  /** DFS block sizes to permute over in multiple test cycles
    * (array length should be prime).
    */
   private static final int[] BLOCK_SIZES = {100000, 4096};
 
-  /** NDFS file sizes to permute over in multiple test cycles
+  /** DFS file sizes to permute over in multiple test cycles
    * (array length should be prime).
    */
   private static final int[] FILE_SIZES =
       {100000, 100001, 4095, 4096, 4097, 1000000, 1000001};
 
-  /** NDFS file counts to permute over in multiple test cycles
+  /** DFS file counts to permute over in multiple test cycles
    * (array length should be prime).
    */
   private static final int[] FILE_COUNTS = {1, 10, 100};
@@ -111,7 +111,7 @@
   private static final int TEST_PERMUTATION_MAX_CEILING =
     BLOCK_SIZES.length * FILE_SIZES.length * FILE_COUNTS.length * 2;
 
-  /** Number of permutations of NDFS test parameters to perform.
+  /** Number of permutations of DFS test parameters to perform.
    * If this is greater than ceiling TEST_PERMUTATION_MAX_CEILING, then the
    * ceiling value is used.
    */
@@ -119,13 +119,13 @@
   private Constructor randomDataGeneratorCtor = null;
 
   static {
-    baseDirSpecified = System.getProperty("test.ndfs.data", "/tmp/ndfs_test");
+    baseDirSpecified = System.getProperty("test.dfs.data", "/tmp/dfs_test");
     baseDir = new File(baseDirSpecified);
   }
 
   protected void setUp() throws Exception {
     super.setUp();
-    nutchConf.setBoolean("test.ndfs.same.host.targets.allowed", true);
+    conf.setBoolean("test.dfs.same.host.targets.allowed", true);
   }
 
  /**
@@ -135,20 +135,20 @@
   protected void prepareTempFileSpace() {
     if (baseDir.exists()) {
       try { // start from a blank slate
-        FileUtil.fullyDelete(baseDir, nutchConf);
+        FileUtil.fullyDelete(baseDir, conf);
       } catch (Exception ignored) {
       }
     }
     baseDir.mkdirs();
     if (!baseDir.isDirectory()) {
-      throw new RuntimeException("Value of root directory property test.ndfs.data for ndfs test is not a directory: "
+      throw new RuntimeException("Value of root directory property test.dfs.data for dfs test is not a directory: "
           + baseDirSpecified);
     }
   }
 
   /**
    * Pseudo Distributed FS Test.
-   * Test NDFS by running all the necessary daemons in one process.
+   * Test DFS by running all the necessary daemons in one process.
    * Test various block sizes, number of files, disk space consumption,
    * and leakage.
    *
@@ -193,21 +193,21 @@
 
     //
     // set given config param to override other config settings
-    nutchConf.setInt("test.ndfs.block_size", blockSize);
+    conf.setInt("test.dfs.block_size", blockSize);
     // verify that config changed
-    assertTrue(blockSize == nutchConf.getInt("test.ndfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
+    assertTrue(blockSize == conf.getInt("test.dfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
     // downsize for testing (just to save resources)
-    nutchConf.setInt("ndfs.namenode.handler.count", 3);
+    conf.setInt("dfs.namenode.handler.count", 3);
     if (false) { //  use MersenneTwister, if present
-      nutchConf.set("nutch.random.class",
+      conf.set("nutch.random.class",
                           "org.apache.nutch.util.MersenneTwister");
     }
-    nutchConf.setLong("ndfs.blockreport.intervalMsec", 50*1000L);
-    nutchConf.setLong("ndfs.datanode.startupMsec", 15*1000L);
+    conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
+    conf.setLong("dfs.datanode.startupMsec", 15*1000L);
 
     String nameFSDir = baseDirSpecified + "/name";
     msg("----Start Test Cycle=" + currentTestCycleNumber +
-        " test.ndfs.block_size=" + blockSize +
+        " test.dfs.block_size=" + blockSize +
         " nBytes=" + nBytes +
         " numFiles=" + numFiles +
         " initialDNcount=" + initialDNcount);
@@ -217,19 +217,19 @@
 
     int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
     String nameNodeSocketAddr = "localhost:" + nameNodePort;
-    NameNode nameNodeDaemon = new NameNode(new File(nameFSDir), nameNodePort, nutchConf);
-    NDFSClient ndfsClient = null;
+    NameNode nameNodeDaemon = new NameNode(new File(nameFSDir), nameNodePort, conf);
+    DFSClient dfsClient = null;
     try {
       //
       //        start some DataNodes
       //
       ArrayList listOfDataNodeDaemons = new ArrayList();
-      nutchConf.set("fs.default.name", nameNodeSocketAddr);
+      conf.set("fs.default.name", nameNodeSocketAddr);
       for (int i = 0; i < initialDNcount; i++) {
         // uniquely config real fs path for data storage for this datanode
         String dataDir = baseDirSpecified + "/datanode" + i;
-        nutchConf.set("ndfs.data.dir", dataDir);
-        DataNode dn = DataNode.makeInstanceForDir(dataDir, nutchConf);
+        conf.set("dfs.data.dir", dataDir);
+        DataNode dn = DataNode.makeInstanceForDir(dataDir, conf);
         if (dn != null) {
           listOfDataNodeDaemons.add(dn);
           (new Thread(dn, "DataNode" + i + ": " + dataDir)).start();
@@ -244,7 +244,7 @@
         awaitQuiescence();
 
         //  act as if namenode is a remote process
-        ndfsClient = new NDFSClient(new InetSocketAddress("localhost", nameNodePort), nutchConf);
+        dfsClient = new DFSClient(new InetSocketAddress("localhost", nameNodePort), conf);
 
         //
         //           write nBytes of data using randomDataGenerator to numFiles
@@ -255,7 +255,7 @@
         for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
           testFileName = new UTF8("/f" + iFileNumber);
           testfilesList.add(testFileName);
-          NFSOutputStream nos = ndfsClient.create(testFileName, false);
+          NFSOutputStream nos = dfsClient.create(testFileName, false);
           try {
             for (long nBytesWritten = 0L;
                  nBytesWritten < nBytes;
@@ -304,7 +304,7 @@
         ListIterator li = testfilesList.listIterator();
         while (li.hasNext()) {
           testFileName = (UTF8) li.next();
-          NFSInputStream nis = ndfsClient.open(testFileName);
+          NFSInputStream nis = dfsClient.open(testFileName);
           byte[] bufferGolden = new byte[bufferSize];
           int m = 42;
           try {
@@ -330,11 +330,11 @@
 
         //
         //                    now delete all the created files
-        msg("Delete all random test files under NDFS via remaining datanodes");
+        msg("Delete all random test files under DFS via remaining datanodes");
         li = testfilesList.listIterator();
         while (li.hasNext()) {
           testFileName = (UTF8) li.next();
-          assertTrue(ndfsClient.delete(testFileName));
+          assertTrue(dfsClient.delete(testFileName));
         }
 
         //
@@ -378,12 +378,12 @@
       }
       msg("finished shutdown of all datanode daemons for test cycle " +
           currentTestCycleNumber);
-      if (ndfsClient != null) {
+      if (dfsClient != null) {
         try {
-          msg("close down subthreads of NDFSClient");
-          ndfsClient.close();
+          msg("close down subthreads of DFSClient");
+          dfsClient.close();
         } catch (Exception ignored) { }
-        msg("finished close down of NDFSClient");
+        msg("finished close down of DFSClient");
       }
     } catch (AssertionFailedError afe) {
       throw afe;
@@ -427,16 +427,16 @@
    * Allows optional use of high quality PRNG by setting property
    * nutch.random.class to the full class path of a subclass of
    * java.util.Random such as "...util.MersenneTwister".
-   * The property test.ndfs.random.seed can supply a seed for reproducible
+   * The property test.dfs.random.seed can supply a seed for reproducible
    * testing (a default is set here if property is not set.)
    */
   private Random makeRandomDataGenerator() {
-    long seed = nutchConf.getLong("test.ndfs.random.seed", 0xB437EF);
+    long seed = conf.getLong("test.dfs.random.seed", 0xB437EF);
     try {
       if (randomDataGeneratorCtor == null) {
         // lazy init
         String rndDataGenClassname =
-            nutchConf.get("nutch.random.class", "java.util.Random");
+            conf.get("nutch.random.class", "java.util.Random");
         Class clazz = Class.forName(rndDataGenClassname);
         randomDataGeneratorCtor = clazz.getConstructor(new Class[]{Long.TYPE});
       }
@@ -458,7 +458,7 @@
     return new java.util.Random(seed);
   }
 
-  /** Wait for the NDFS datanodes to become quiescent.
+  /** Wait for the DFS datanodes to become quiescent.
    * The initial implementation is to sleep for some fixed amount of time,
    * but a better implementation would be to really detect when distributed
    * operations are completed.
@@ -518,12 +518,12 @@
   }
 
   public static void main(String[] args) throws Exception {
-    String usage = "Usage: TestNDFS (no args)";
+    String usage = "Usage: TestDFS (no args)";
     if (args.length != 0) {
       System.err.println(usage);
       System.exit(-1);
     }
-    String[] testargs = {"org.apache.nutch.ndfs.TestNDFS"};
+    String[] testargs = {"org.apache.hadoop.dfs.TestDFS"};
     junit.textui.TestRunner.main(testargs);
   }