You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/04/19 23:34:53 UTC

svn commit: r530556 [11/12] - in /lucene/hadoop/trunk: ./ src/contrib/abacus/src/java/org/apache/hadoop/abacus/ src/contrib/hbase/src/java/org/apache/hadoop/hbase/ src/contrib/hbase/src/test/org/apache/hadoop/hbase/ src/contrib/streaming/src/java/org/a...

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSStorageStateRecovery.java Thu Apr 19 14:34:41 2007
@@ -186,35 +186,35 @@
         boolean curAfterRecover = testCase[5];
         boolean prevAfterRecover = testCase[6];
 
-        log("NAME_NODE recovery",numDirs,i,testCase);
+        log("NAME_NODE recovery", numDirs, i, testCase);
         baseDirs = createStorageState(NAME_NODE, testCase);
         if (shouldRecover) {
-          cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
+          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
           checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
           cluster.shutdown();
         } else {
           try {
-            cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
+            cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
             throw new AssertionError("NameNode should have failed to start");
           } catch (Exception expected) {
             // expected
           }
         }
         
-        log("DATA_NODE recovery",numDirs,i,testCase);
-        createStorageState(NAME_NODE, new boolean[] {true,true,false,false});
-        cluster = new MiniDFSCluster(conf,0,StartupOption.REGULAR);
+        log("DATA_NODE recovery", numDirs, i, testCase);
+        createStorageState(NAME_NODE, new boolean[] {true, true, false, false});
+        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
         baseDirs = createStorageState(DATA_NODE, testCase);
         if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
           // DataNode will create and format current if no directories exist
-          cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
         } else {
           if (shouldRecover) {
-            cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+            cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
             checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
           } else {
             try {
-              cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+              cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
               throw new AssertionError("DataNode should have failed to start");
             } catch (Exception expected) {
               // expected

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestDFSUpgrade.java Thu Apr 19 14:34:41 2007
@@ -93,7 +93,7 @@
    */
   void startNameNodeShouldFail(StartupOption operation) {
     try {
-      cluster = new MiniDFSCluster(conf,0,operation); // should fail
+      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
       throw new AssertionError("NameNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -106,7 +106,7 @@
    */
   void startDataNodeShouldFail(StartupOption operation) {
     try {
-      cluster.startDataNodes(conf,1,false,operation,null); // should fail
+      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
       throw new AssertionError("DataNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -127,45 +127,45 @@
       String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
       String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
       
-      log("Normal NameNode upgrade",numDirs);
+      log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       checkResult(NAME_NODE, nameNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("Normal DataNode upgrade",numDirs);
+      log("Normal DataNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
-      log("NameNode upgrade with existing previous dir",numDirs);
+      log("NameNode upgrade with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("DataNode upgrade with existing previous dir",numDirs);
+      log("DataNode upgrade with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      cluster.startDataNodes(conf,1,false,StartupOption.REGULAR,null);
+      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
-      log("DataNode upgrade with future stored layout version in current",numDirs);
+      log("DataNode upgrade with future stored layout version in current", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          UpgradeUtilities.getCurrentFsscTime(cluster)));
@@ -174,11 +174,11 @@
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
       
-      log("DataNode upgrade with newer fsscTime in current",numDirs);
+      log("DataNode upgrade with newer fsscTime in current", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf,0,StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
-      UpgradeUtilities.createVersionFile(DATA_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
                                                          UpgradeUtilities.getCurrentNamespaceID(cluster),
                                                          Long.MAX_VALUE));
@@ -187,7 +187,7 @@
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       UpgradeUtilities.createEmptyDirs(dataNodeDirs);
 
-      log("NameNode upgrade with no edits file",numDirs);
+      log("NameNode upgrade with no edits file", numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       for (File f : baseDirs) { 
         FileUtil.fullyDelete(new File(f,"edits"));
@@ -195,7 +195,7 @@
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode upgrade with no image file",numDirs);
+      log("NameNode upgrade with no image file", numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       for (File f : baseDirs) { 
         FileUtil.fullyDelete(new File(f,"fsimage")); 
@@ -203,7 +203,7 @@
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode upgrade with corrupt version file",numDirs);
+      log("NameNode upgrade with corrupt version file", numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       for (File f : baseDirs) { 
         UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
@@ -211,9 +211,9 @@
       startNameNodeShouldFail(StartupOption.UPGRADE);
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
-      log("NameNode upgrade with future layout version in current",numDirs);
+      log("NameNode upgrade with future layout version in current", numDirs);
       baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      UpgradeUtilities.createVersionFile(NAME_NODE,baseDirs,
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
                                                          UpgradeUtilities.getCurrentNamespaceID(null),
                                                          UpgradeUtilities.getCurrentFsscTime(null)));

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestFileCorruption.java Thu Apr 19 14:34:41 2007
@@ -84,7 +84,7 @@
     dos.writeBytes("corruption");
     dos.close();
     // Now attempt to read the file
-    DataInputStream dis = fs.open(file,512);
+    DataInputStream dis = fs.open(file, 512);
     try {
       System.out.println("A ChecksumException is expected to be logged.");
       dis.readByte();

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHost2NodesMap.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHost2NodesMap.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHost2NodesMap.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestHost2NodesMap.java Thu Apr 19 14:34:41 2007
@@ -16,17 +16,17 @@
 
   static {
     for(DatanodeDescriptor node:dataNodes) {
-      map.add( node );
+      map.add(node);
     }
     map.add(NULL_NODE);
   }
   
   public void testContains() throws Exception {
     for(int i=0; i<dataNodes.length; i++) {
-      assertTrue( map.contains(dataNodes[i]) );
+      assertTrue(map.contains(dataNodes[i]));
     }
-    assertFalse( map.contains( NULL_NODE ) );
-    assertFalse( map.contains( NODE ) );
+    assertFalse(map.contains(NULL_NODE));
+    assertFalse(map.contains(NODE));
   }
 
   public void testGetDatanodeByHost() throws Exception {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestLocalDFS.java Thu Apr 19 14:34:41 2007
@@ -39,7 +39,7 @@
   private void readFile(FileSystem fileSys, Path name) throws IOException {
     DataInputStream stm = fileSys.open(name);
     byte[] buffer = new byte[4];
-    int bytesRead = stm.read(buffer, 0 ,4);
+    int bytesRead = stm.read(buffer, 0 , 4);
     assertEquals("oom", new String(buffer, 0 , bytesRead));
     stm.close();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Thu Apr 19 14:34:41 2007
@@ -76,7 +76,7 @@
       isReplicationDone = true;
       for (int idx = 0; idx < locations.length; idx++) {
         DatanodeInfo[] datanodes = locations[idx].getLocations();
-        if(Math.min(numDatanodes, repl) != datanodes.length) {
+        if (Math.min(numDatanodes, repl) != datanodes.length) {
           isReplicationDone=false;
           LOG.warn("File has "+datanodes.length+" replicas, expecting "
                    +Math.min(numDatanodes, repl));
@@ -93,10 +93,10 @@
     boolean isOnSameRack = true, isNotOnSameRack = true;
     for (int idx = 0; idx < locations.length; idx++) {
       DatanodeInfo[] datanodes = locations[idx].getLocations();
-      if(datanodes.length <= 1) break;
-      if(datanodes.length == 2) {
-        isNotOnSameRack = !( datanodes[0].getNetworkLocation().equals(
-                                                                      datanodes[1].getNetworkLocation() ) );
+      if (datanodes.length <= 1) break;
+      if (datanodes.length == 2) {
+        isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(
+                                                                     datanodes[1].getNetworkLocation()));
         break;
       }
       isOnSameRack = false;
@@ -104,16 +104,16 @@
       for (int idy = 0; idy < datanodes.length-1; idy++) {
         LOG.info("datanode "+ idy + ": "+ datanodes[idy].getName());
         boolean onRack = datanodes[idy].getNetworkLocation().equals(
-                                                                    datanodes[idy+1].getNetworkLocation() );
-        if( onRack ) {
+                                                                    datanodes[idy+1].getNetworkLocation());
+        if (onRack) {
           isOnSameRack = true;
         }
-        if( !onRack ) {
+        if (!onRack) {
           isNotOnSameRack = true;                      
         }
-        if( isOnSameRack && isNotOnSameRack ) break;
+        if (isOnSameRack && isNotOnSameRack) break;
       }
-      if( !isOnSameRack || !isNotOnSameRack ) break;
+      if (!isOnSameRack || !isNotOnSameRack) break;
     }
     assertTrue(isOnSameRack);
     assertTrue(isNotOnSameRack);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java Thu Apr 19 14:34:41 2007
@@ -42,10 +42,10 @@
     replicator = fsNamesystem.replicator;
     cluster = fsNamesystem.clusterMap;
     // construct network topology
-    for( int i=0; i<NUM_OF_DATANODES; i++) {
-      cluster.add( dataNodes[i] );
+    for(int i=0; i<NUM_OF_DATANODES; i++) {
+      cluster.add(dataNodes[i]);
     }
-    for( int i=0; i<NUM_OF_DATANODES; i++) {
+    for(int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
                                    2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
                                    2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/UpgradeUtilities.java Thu Apr 19 14:34:41 2007
@@ -88,7 +88,7 @@
       
       // format and start NameNode and start DataNode
       NameNode.format(config); 
-      cluster = new MiniDFSCluster(config,1,StartupOption.REGULAR);
+      cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);
         
       NameNode namenode = cluster.getNameNode();
       namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
@@ -96,12 +96,12 @@
       
       FileSystem fs = FileSystem.get(config);
       Path baseDir = new Path("/TestUpgrade");
-      fs.mkdirs( baseDir );
+      fs.mkdirs(baseDir);
       
       // write some files
       int bufferSize = 4096;
       byte[] buffer = new byte[bufferSize];
-      for( int i=0; i < bufferSize; i++ )
+      for(int i=0; i < bufferSize; i++)
         buffer[i] = (byte)('0' + i % 50);
       writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
       writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
@@ -127,11 +127,11 @@
   
   // Private helper method that writes a file to the given file system.
   private static void writeFile(FileSystem fs, Path path, byte[] buffer,
-                                int bufferSize ) throws IOException 
+                                int bufferSize) throws IOException 
   {
     OutputStream out;
     out = fs.create(path, true, bufferSize, (short) 1, 1024);
-    out.write( buffer, 0, bufferSize );
+    out.write(buffer, 0, bufferSize);
     out.close();
   }
   
@@ -214,7 +214,7 @@
         byte[] buffer = new byte[1024];
         int bytesRead;
         while ((bytesRead = fis.read(buffer)) != -1) {
-          checksum.update(buffer,0,bytesRead);
+          checksum.update(buffer, 0, bytesRead);
         }
         fis.close();
       }
@@ -287,10 +287,10 @@
       FileUtil.fullyDelete(versionFile);
       switch (nodeType) {
       case NAME_NODE:
-        storage = new FSImage( version );
+        storage = new FSImage(version);
         break;
       case DATA_NODE:
-        storage = new DataStorage( version, "doNotCare" );
+        storage = new DataStorage(version, "doNotCare");
         break;
       }
       StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/AccumulatingReducer.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/AccumulatingReducer.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/AccumulatingReducer.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/AccumulatingReducer.java Thu Apr 19 14:34:41 2007
@@ -58,11 +58,11 @@
     TaskTracker.LOG.info("Starting AccumulatingReducer on " + hostName);
   }
   
-  public void reduce( WritableComparable key, 
-                      Iterator values,
-                      OutputCollector output, 
-                      Reporter reporter
-                      ) throws IOException {
+  public void reduce(WritableComparable key, 
+                     Iterator values,
+                     OutputCollector output, 
+                     Reporter reporter
+                     ) throws IOException {
     String field = ((UTF8) key).toString();
 
     reporter.setStatus("starting " + field + " ::host = " + hostName);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DFSCIOTest.java Thu Apr 19 14:34:41 2007
@@ -81,7 +81,7 @@
   private static Path HDFS_TEST_DIR = new Path("/tmp/DFSCIOTest");
   private static String HDFS_LIB_VERSION = System.getProperty("libhdfs.version", "1");
   private static String CHMOD = new String("chmod");
-  private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION );
+  private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION);
   private static Path HDFS_READ = new Path(HDFS_TEST_DIR + "/hdfs_read");
   private static Path HDFS_WRITE = new Path(HDFS_TEST_DIR + "/hdfs_write");
 
@@ -120,7 +120,7 @@
 
     fs.delete(CONTROL_DIR);
 
-    for( int i=0; i < nrFiles; i++ ) {
+    for(int i=0; i < nrFiles; i++) {
       String name = getFileName(i);
       Path controlFile = new Path(CONTROL_DIR, "in_file_" + name);
       SequenceFile.Writer writer = null;
@@ -132,7 +132,7 @@
       } catch(Exception e) {
         throw new IOException(e.getLocalizedMessage());
       } finally {
-    	if( writer != null )
+    	if (writer != null)
           writer.close();
     	writer = null;
       }
@@ -140,7 +140,7 @@
     LOG.info("created control files for: "+nrFiles+" files");
   }
 
-  private static String getFileName( int fIdx ) {
+  private static String getFileName(int fIdx) {
     return BASE_FILE_NAME + Integer.toString(fIdx);
   }
   
@@ -164,12 +164,12 @@
     void collectStats(OutputCollector output, 
                       String name,
                       long execTime, 
-                      Object objSize ) throws IOException {
+                      Object objSize) throws IOException {
       long totalSize = ((Long)objSize).longValue();
       float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
-      LOG.info("Number of bytes processed = " + totalSize );
-      LOG.info("Exec time = " + execTime );
-      LOG.info("IO rate = " + ioRateMbSec );
+      LOG.info("Number of bytes processed = " + totalSize);
+      LOG.info("Exec time = " + execTime);
+      LOG.info("IO rate = " + ioRateMbSec);
       
       output.collect(new UTF8("l:tasks"), new UTF8(String.valueOf(1)));
       output.collect(new UTF8("l:size"), new UTF8(String.valueOf(totalSize)));
@@ -186,14 +186,14 @@
 
     public WriteMapper() { 
       super(); 
-      for( int i=0; i < bufferSize; i++ )
+      for(int i=0; i < bufferSize; i++)
         buffer[i] = (byte)('0' + i % 50);
     }
 
-    public Object doIO( Reporter reporter, 
-                        String name, 
-                        long totalSize 
-                        ) throws IOException {
+    public Object doIO(Reporter reporter, 
+                       String name, 
+                       long totalSize 
+                       ) throws IOException {
       // create file
       totalSize *= MEGA;
       
@@ -261,13 +261,13 @@
     fs.delete(DATA_DIR);
     fs.delete(WRITE_DIR);
     
-    runIOTest( WriteMapper.class, WRITE_DIR );
+    runIOTest(WriteMapper.class, WRITE_DIR);
   }
   
-  private static void runIOTest(  Class mapperClass, 
-                                  Path outputDir
-                                  ) throws IOException {
-    JobConf job = new JobConf( fsConfig, DFSCIOTest.class );
+  private static void runIOTest( Class mapperClass, 
+                                 Path outputDir
+                                 ) throws IOException {
+    JobConf job = new JobConf(fsConfig, DFSCIOTest.class);
 
     job.setInputPath(CONTROL_DIR);
     job.setInputFormat(SequenceFileInputFormat.class);
@@ -293,10 +293,10 @@
       super(); 
     }
 
-    public Object doIO( Reporter reporter, 
-                        String name, 
-                        long totalSize 
-                        ) throws IOException {
+    public Object doIO(Reporter reporter, 
+                       String name, 
+                       long totalSize 
+                       ) throws IOException {
       totalSize *= MEGA;
       
       // create instance of local filesystem 
@@ -324,7 +324,7 @@
             Process process = runTime.exec(chmodCmd);
             int exitStatus = process.waitFor();
             if (exitStatus != 0) {
-              throw new IOException( chmodCmd + ": Failed with exitStatus: " + exitStatus );
+              throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
             }
           }
         }
@@ -366,7 +366,7 @@
 
   private static void readTest(FileSystem fs) throws IOException {
     fs.delete(READ_DIR);
-    runIOTest( ReadMapper.class, READ_DIR );
+    runIOTest(ReadMapper.class, READ_DIR);
   }
 
   private static void sequentialTest(
@@ -376,16 +376,16 @@
                                      int nrFiles
                                      ) throws Exception {
     IOStatMapper ioer = null;
-    if( testType == TEST_TYPE_READ )
+    if (testType == TEST_TYPE_READ)
       ioer = new ReadMapper();
-    else if( testType == TEST_TYPE_WRITE )
+    else if (testType == TEST_TYPE_WRITE)
       ioer = new WriteMapper();
     else
       return;
-    for( int i=0; i < nrFiles; i++)
+    for(int i=0; i < nrFiles; i++)
       ioer.doIO(Reporter.NULL,
                 BASE_FILE_NAME+Integer.toString(i), 
-                MEGA*fileSize );
+                MEGA*fileSize);
   }
 
   public static void main(String[] args) {
@@ -447,45 +447,45 @@
         fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE);
       }
 
-      if( isSequential ) {
+      if (isSequential) {
         long tStart = System.currentTimeMillis();
-        sequentialTest( fs, testType, fileSize, nrFiles );
+        sequentialTest(fs, testType, fileSize, nrFiles);
         long execTime = System.currentTimeMillis() - tStart;
         String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
-        LOG.info( resultLine );
+        LOG.info(resultLine);
         return;
       }
-      if( testType == TEST_TYPE_CLEANUP ) {
-        cleanup( fs );
+      if (testType == TEST_TYPE_CLEANUP) {
+        cleanup(fs);
         return;
       }
       createControlFile(fs, fileSize, nrFiles);
       long tStart = System.currentTimeMillis();
-      if( testType == TEST_TYPE_WRITE )
+      if (testType == TEST_TYPE_WRITE)
         writeTest(fs);
-      if( testType == TEST_TYPE_READ )
+      if (testType == TEST_TYPE_READ)
         readTest(fs);
       long execTime = System.currentTimeMillis() - tStart;
     
-      analyzeResult( fs, testType, execTime, resFileName );
-    } catch( Exception e ) {
-      System.err.print( e.getLocalizedMessage());
+      analyzeResult(fs, testType, execTime, resFileName);
+    } catch(Exception e) {
+      System.err.print(e.getLocalizedMessage());
       System.exit(-1);
     }
   }
   
-  private static void analyzeResult(  FileSystem fs, 
-                                      int testType,
-                                      long execTime,
-                                      String resFileName
-                                      ) throws IOException {
+  private static void analyzeResult( FileSystem fs, 
+                                     int testType,
+                                     long execTime,
+                                     String resFileName
+                                     ) throws IOException {
     Path reduceFile;
-    if( testType == TEST_TYPE_WRITE )
-      reduceFile = new Path( WRITE_DIR, "part-00000" );
+    if (testType == TEST_TYPE_WRITE)
+      reduceFile = new Path(WRITE_DIR, "part-00000");
     else
-      reduceFile = new Path( READ_DIR, "part-00000" );
+      reduceFile = new Path(READ_DIR, "part-00000");
     DataInputStream in;
-    in = new DataInputStream(fs.open( reduceFile ));
+    in = new DataInputStream(fs.open(reduceFile));
   
     BufferedReader lines;
     lines = new BufferedReader(new InputStreamReader(in));
@@ -495,23 +495,23 @@
     float rate = 0;
     float sqrate = 0;
     String line;
-    while( (line = lines.readLine()) != null ) {
+    while((line = lines.readLine()) != null) {
       StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
       String attr = tokens.nextToken(); 
-      if( attr.endsWith(":tasks") )
-        tasks = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith(":size") )
-        size = Long.parseLong( tokens.	nextToken() );
-      else if( attr.endsWith(":time") )
-        time = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith(":rate") )
-        rate = Float.parseFloat( tokens.nextToken() );
-      else if( attr.endsWith(":sqrate") )
-        sqrate = Float.parseFloat( tokens.nextToken() );
+      if (attr.endsWith(":tasks"))
+        tasks = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith(":size"))
+        size = Long.parseLong(tokens.	nextToken());
+      else if (attr.endsWith(":time"))
+        time = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith(":rate"))
+        rate = Float.parseFloat(tokens.nextToken());
+      else if (attr.endsWith(":sqrate"))
+        sqrate = Float.parseFloat(tokens.nextToken());
     }
     
     double med = rate / 1000 / tasks;
-    double stdDev = Math.sqrt( Math.abs(sqrate / 1000 / tasks - med*med ));
+    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
     String resultLines[] = {
       "----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
                                      (testType == TEST_TYPE_READ) ? "read" : 
@@ -525,17 +525,17 @@
       "    Test exec time sec: " + (float)execTime / 1000,
       "" };
 
-    PrintStream res = new PrintStream( 
-                                      new FileOutputStream( 
-                                                           new File(resFileName), true )); 
-    for( int i = 0; i < resultLines.length; i++ ) {
-      LOG.info( resultLines[i] );
-      res.println( resultLines[i] );
+    PrintStream res = new PrintStream(
+                                      new FileOutputStream(
+                                                           new File(resFileName), true)); 
+    for(int i = 0; i < resultLines.length; i++) {
+      LOG.info(resultLines[i]);
+      res.println(resultLines[i]);
     }
   }
 
-  private static void cleanup( FileSystem fs ) throws Exception {
-    LOG.info( "Cleaning up test files" );
+  private static void cleanup(FileSystem fs) throws Exception {
+    LOG.info("Cleaning up test files");
     fs.delete(new Path(TEST_ROOT_DIR));
     fs.delete(HDFS_TEST_DIR);
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/DistributedFSCheck.java Thu Apr 19 14:34:41 2007
@@ -63,7 +63,7 @@
   
   DistributedFSCheck(Configuration conf) throws Exception {
     fsConfig = conf;
-    this.fs = FileSystem.get( conf );
+    this.fs = FileSystem.get(conf);
   }
 
   /**
@@ -81,13 +81,13 @@
    * @param rootName root directory name
    * @throws Exception
    */
-  public void testFSBlocks( String rootName ) throws Exception {
+  public void testFSBlocks(String rootName) throws Exception {
     createInputFile(rootName);
     runDistributedFSCheck();
     cleanup();  // clean up after all to restore the system state
   }
 
-  private void createInputFile( String rootName ) throws IOException {
+  private void createInputFile(String rootName) throws IOException {
     cleanup();  // clean up if previous run failed
 
     Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
@@ -97,31 +97,31 @@
     
     try {
       nrFiles = 0;
-      listSubtree( new Path( rootName ), writer );
+      listSubtree(new Path(rootName), writer);
     } finally {
       writer.close();
     }
-    LOG.info( "Created map input files." );
+    LOG.info("Created map input files.");
   }
   
-  private void listSubtree( Path rootFile,
-                            SequenceFile.Writer writer
-                            ) throws IOException {
-    if( ! fs.isDirectory(rootFile) ) {
+  private void listSubtree(Path rootFile,
+                           SequenceFile.Writer writer
+                           ) throws IOException {
+    if (!fs.isDirectory(rootFile)) {
       nrFiles++;
       // For a regular file generate <fName,offset> pairs
       long blockSize = fs.getDefaultBlockSize();
-      long fileLength = fs.getLength( rootFile );
-      for( long offset = 0; offset < fileLength; offset += blockSize )
+      long fileLength = fs.getLength(rootFile);
+      for(long offset = 0; offset < fileLength; offset += blockSize)
         writer.append(new UTF8(rootFile.toString()), new LongWritable(offset));
       return;
     }
     
-    Path children[] = fs.listPaths( rootFile );
-    if( children == null )
+    Path children[] = fs.listPaths(rootFile);
+    if (children == null)
       throw new IOException("Could not get listing for " + rootFile);
     for (int i = 0; i < children.length; i++)
-      listSubtree( children[i], writer );
+      listSubtree(children[i], writer);
   }
 
   /**
@@ -133,52 +133,52 @@
       super(fsConfig); 
     }
 
-    public Object doIO( Reporter reporter, 
-                        String name, 
-                        long offset 
-                        ) throws IOException {
+    public Object doIO(Reporter reporter, 
+                       String name, 
+                       long offset 
+                       ) throws IOException {
       // open file
       FSDataInputStream in = null;
       try {
         in = fs.open(new Path(name));
-      } catch( IOException e ) {
+      } catch(IOException e) {
         return name + "@(missing)";
       }
-      in.seek( offset );
+      in.seek(offset);
       long actualSize = 0;
       try {
         long blockSize = fs.getDefaultBlockSize();
-        reporter.setStatus( "reading " + name + "@" + 
-                            offset + "/" + blockSize );
-        for(  int curSize = bufferSize; 
-              curSize == bufferSize && actualSize < blockSize;
-              actualSize += curSize) {
-          curSize = in.read( buffer, 0, bufferSize );
+        reporter.setStatus("reading " + name + "@" + 
+                           offset + "/" + blockSize);
+        for( int curSize = bufferSize; 
+             curSize == bufferSize && actualSize < blockSize;
+             actualSize += curSize) {
+          curSize = in.read(buffer, 0, bufferSize);
         }
-      } catch( IOException e ) {
-        LOG.info( "Corrupted block detected in \"" + name + "\" at " + offset );
+      } catch(IOException e) {
+        LOG.info("Corrupted block detected in \"" + name + "\" at " + offset);
         return name + "@" + offset;
       } finally {
         in.close();
       }
-      return new Long( actualSize );
+      return new Long(actualSize);
     }
     
     void collectStats(OutputCollector output, 
                       String name, 
                       long execTime, 
-                      Object corruptedBlock ) throws IOException {
+                      Object corruptedBlock) throws IOException {
       output.collect(new UTF8("l:blocks"), new UTF8(String.valueOf(1)));
 
-      if( corruptedBlock.getClass().getName().endsWith("String") ) {
+      if (corruptedBlock.getClass().getName().endsWith("String")) {
         output.collect(new UTF8("s:badBlocks"), new UTF8((String)corruptedBlock));
         return;
       }
       long totalSize = ((Long)corruptedBlock).longValue();
       float ioRateMbSec = (float)totalSize * 1000 / (execTime * 0x100000);
-      LOG.info( "Number of bytes processed = " + totalSize );
-      LOG.info( "Exec time = " + execTime );
-      LOG.info( "IO rate = " + ioRateMbSec );
+      LOG.info("Number of bytes processed = " + totalSize);
+      LOG.info("Exec time = " + execTime);
+      LOG.info("IO rate = " + ioRateMbSec);
       
       output.collect(new UTF8("l:size"), new UTF8(String.valueOf(totalSize)));
       output.collect(new UTF8("l:time"), new UTF8(String.valueOf(execTime)));
@@ -187,7 +187,7 @@
   }
   
   private void runDistributedFSCheck() throws Exception {
-    JobConf job = new JobConf( fs.getConf(), DistributedFSCheck.class );
+    JobConf job = new JobConf(fs.getConf(), DistributedFSCheck.class);
 
     job.setInputPath(MAP_INPUT_DIR);
     job.setInputFormat(SequenceFileInputFormat.class);
@@ -213,20 +213,20 @@
 
     String usage = "Usage: DistributedFSCheck [-root name] [-clean] [-resFile resultFileName] [-bufferSize Bytes] [-stats] ";
     
-    if(args.length == 1 && args[0].startsWith("-h")) {
+    if (args.length == 1 && args[0].startsWith("-h")) {
       System.err.println(usage);
       System.exit(-1);
     }
     for(int i = 0; i < args.length; i++) {       // parse command line
-      if(args[i].equals("-root")) {
+      if (args[i].equals("-root")) {
         rootName = args[++i];
       } else if (args[i].startsWith("-clean")) {
         testType = TEST_TYPE_CLEANUP;
-      } else if(args[i].equals("-bufferSize")) {
+      } else if (args[i].equals("-bufferSize")) {
         bufferSize = Integer.parseInt(args[++i]);
-      } else if(args[i].equals("-resFile")) {
+      } else if (args[i].equals("-resFile")) {
         resFileName = args[++i];
-      } else if(args[i].startsWith("-stat")) {
+      } else if (args[i].startsWith("-stat")) {
         viewStats = true;
       }
     }
@@ -236,28 +236,28 @@
   
     Configuration conf = new Configuration();  
     conf.setInt("test.io.file.buffer.size", bufferSize);
-    DistributedFSCheck test = new DistributedFSCheck( conf );
+    DistributedFSCheck test = new DistributedFSCheck(conf);
 
-    if( testType == TEST_TYPE_CLEANUP ) {
+    if (testType == TEST_TYPE_CLEANUP) {
       test.cleanup();
       return;
     }
-    test.createInputFile( rootName );
+    test.createInputFile(rootName);
     long tStart = System.currentTimeMillis();
     test.runDistributedFSCheck();
     long execTime = System.currentTimeMillis() - tStart;
     
-    test.analyzeResult( execTime, resFileName, viewStats );
+    test.analyzeResult(execTime, resFileName, viewStats);
     // test.cleanup();  // clean up after all to restore the system state
   }
   
-  private void analyzeResult( long execTime,
-                              String resFileName,
-                              boolean viewStats
-                              ) throws IOException {
-    Path reduceFile= new Path( READ_DIR, "part-00000" );
+  private void analyzeResult(long execTime,
+                             String resFileName,
+                             boolean viewStats
+                             ) throws IOException {
+    Path reduceFile= new Path(READ_DIR, "part-00000");
     DataInputStream in;
-    in = new DataInputStream(fs.open( reduceFile ));
+    in = new DataInputStream(fs.open(reduceFile));
   
     BufferedReader lines;
     lines = new BufferedReader(new InputStreamReader(in));
@@ -268,67 +268,67 @@
     StringTokenizer  badBlocks = null;
     long nrBadBlocks = 0;
     String line;
-    while( (line = lines.readLine()) != null ) {
+    while((line = lines.readLine()) != null) {
       StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
       String attr = tokens.nextToken(); 
-      if( attr.endsWith("blocks") )
-        blocks = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith("size") )
-        size = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith("time") )
-        time = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith("rate") )
-        rate = Float.parseFloat( tokens.nextToken() );
-      else if( attr.endsWith("badBlocks") ) {
+      if (attr.endsWith("blocks"))
+        blocks = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith("size"))
+        size = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith("time"))
+        time = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith("rate"))
+        rate = Float.parseFloat(tokens.nextToken());
+      else if (attr.endsWith("badBlocks")) {
         badBlocks = new StringTokenizer(tokens.nextToken(), ";");
         nrBadBlocks = badBlocks.countTokens();
       }
     }
     
     Vector resultLines = new Vector();
-    resultLines.add(  "----- DistributedFSCheck ----- : " );
-    resultLines.add(  "               Date & time: " + new Date(System.currentTimeMillis()) );
-    resultLines.add(  "    Total number of blocks: " + blocks );
-    resultLines.add(  "    Total number of  files: " + nrFiles );
-    resultLines.add(  "Number of corrupted blocks: " + nrBadBlocks );
+    resultLines.add( "----- DistributedFSCheck ----- : ");
+    resultLines.add( "               Date & time: " + new Date(System.currentTimeMillis()));
+    resultLines.add( "    Total number of blocks: " + blocks);
+    resultLines.add( "    Total number of  files: " + nrFiles);
+    resultLines.add( "Number of corrupted blocks: " + nrBadBlocks);
     
     int nrBadFilesPos = resultLines.size();
     TreeSet badFiles = new TreeSet();
     long nrBadFiles = 0;
-    if( nrBadBlocks > 0 ) {
-      resultLines.add("" );
+    if (nrBadBlocks > 0) {
+      resultLines.add("");
       resultLines.add("----- Corrupted Blocks (file@offset) ----- : ");
-      while( badBlocks.hasMoreTokens() ) {
+      while(badBlocks.hasMoreTokens()) {
         String curBlock = badBlocks.nextToken();
-        resultLines.add( curBlock );
-        badFiles.add( curBlock.substring(0, curBlock.indexOf('@')) );
+        resultLines.add(curBlock);
+        badFiles.add(curBlock.substring(0, curBlock.indexOf('@')));
       }
       nrBadFiles = badFiles.size();
     }
     
-    resultLines.insertElementAt( " Number of corrupted files: " + nrBadFiles, nrBadFilesPos );
+    resultLines.insertElementAt(" Number of corrupted files: " + nrBadFiles, nrBadFilesPos);
     
-    if( viewStats ) {
-      resultLines.add("" );
-      resultLines.add("-----   Performance  ----- : " );
-      resultLines.add("         Total MBytes read: " + size/MEGA );
-      resultLines.add("         Throughput mb/sec: " + (float)size * 1000.0 / (time * MEGA) );
-      resultLines.add("    Average IO rate mb/sec: " + rate / 1000 / blocks );
-      resultLines.add("        Test exec time sec: " + (float)execTime / 1000 );
+    if (viewStats) {
+      resultLines.add("");
+      resultLines.add("-----   Performance  ----- : ");
+      resultLines.add("         Total MBytes read: " + size/MEGA);
+      resultLines.add("         Throughput mb/sec: " + (float)size * 1000.0 / (time * MEGA));
+      resultLines.add("    Average IO rate mb/sec: " + rate / 1000 / blocks);
+      resultLines.add("        Test exec time sec: " + (float)execTime / 1000);
     }
 
-    PrintStream res = new PrintStream( 
-                                      new FileOutputStream( 
-                                                           new File(resFileName), true )); 
-    for( int i = 0; i < resultLines.size(); i++ ) {
+    PrintStream res = new PrintStream(
+                                      new FileOutputStream(
+                                                           new File(resFileName), true)); 
+    for(int i = 0; i < resultLines.size(); i++) {
       String cur = (String)resultLines.get(i);
-      LOG.info( cur );
-      res.println( cur );
+      LOG.info(cur);
+      res.println(cur);
     }
   }
 
   private void cleanup() throws IOException {
-    LOG.info( "Cleaning up test files" );
+    LOG.info("Cleaning up test files");
     fs.delete(TEST_ROOT_DIR);
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/IOMapperBase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/IOMapperBase.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/IOMapperBase.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/IOMapperBase.java Thu Apr 19 14:34:41 2007
@@ -33,7 +33,7 @@
 /**
  * Base mapper class for IO operations.
  * <p>
- * Two abstract method {@link #doIO(Reporter,String,long)} and 
+ * Two abstract method {@link #doIO(Reporter, String, long)} and 
  * {@link #collectStats(OutputCollector,String,long,Object)} should be
  * overloaded in derived classes to define the IO operation and the
  * statistics data to be collected by subsequent reducers.
@@ -51,7 +51,7 @@
     try {
       fs = FileSystem.get(conf);
     } catch (Exception e) {
-      throw new RuntimeException( "Cannot create file system.", e );
+      throw new RuntimeException("Cannot create file system.", e);
     }
     bufferSize = conf.getInt("test.io.file.buffer.size", 4096);
     buffer = new byte[bufferSize];
@@ -79,9 +79,9 @@
    *          {@link #collectStats(OutputCollector,String,long,Object)}
    * @throws IOException
    */
-  abstract Object doIO( Reporter reporter, 
-                        String name, 
-                        long value ) throws IOException;
+  abstract Object doIO(Reporter reporter, 
+                       String name, 
+                       long value) throws IOException;
 
   /**
    * Collect stat data to be combined by a subsequent reducer.
@@ -92,10 +92,10 @@
    * @param doIOReturnValue value returned by {@link #doIO(Reporter,String,long)}
    * @throws IOException
    */
-  abstract void collectStats( OutputCollector output, 
-                              String name, 
-                              long execTime, 
-                              Object doIOReturnValue ) throws IOException;
+  abstract void collectStats(OutputCollector output, 
+                             String name, 
+                             long execTime, 
+                             Object doIOReturnValue) throws IOException;
   
   /**
    * Map file name and offset into statistical data.
@@ -120,10 +120,10 @@
     reporter.setStatus("starting " + name + " ::host = " + hostName);
     
     long tStart = System.currentTimeMillis();
-    Object statValue = doIO( reporter, name, longValue );
+    Object statValue = doIO(reporter, name, longValue);
     long tEnd = System.currentTimeMillis();
     long execTime = tEnd - tStart;
-    collectStats( output, name, execTime, statValue );
+    collectStats(output, name, execTime, statValue);
     
     reporter.setStatus("finished " + name + " ::host = " + hostName);
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestCopyFiles.java Thu Apr 19 14:34:41 2007
@@ -54,7 +54,7 @@
     
     MyFile() {
       int nLevels = gen.nextInt(MAX_LEVELS);
-      if(nLevels != 0) {
+      if (nLevels != 0) {
         int[] levels = new int[nLevels];
         for (int idx = 0; idx < nLevels; idx++) {
           levels[idx] = gen.nextInt(10);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestDFSIO.java Thu Apr 19 14:34:41 2007
@@ -114,7 +114,7 @@
 
     fs.delete(CONTROL_DIR);
 
-    for( int i=0; i < nrFiles; i++ ) {
+    for(int i=0; i < nrFiles; i++) {
       String name = getFileName(i);
       Path controlFile = new Path(CONTROL_DIR, "in_file_" + name);
       SequenceFile.Writer writer = null;
@@ -126,7 +126,7 @@
       } catch(Exception e) {
         throw new IOException(e.getLocalizedMessage());
       } finally {
-    	if( writer != null )
+    	if (writer != null)
           writer.close();
     	writer = null;
       }
@@ -134,7 +134,7 @@
     LOG.info("created control files for: "+nrFiles+" files");
   }
 
-  private static String getFileName( int fIdx ) {
+  private static String getFileName(int fIdx) {
     return BASE_FILE_NAME + Integer.toString(fIdx);
   }
   
@@ -158,12 +158,12 @@
     void collectStats(OutputCollector output, 
                       String name,
                       long execTime, 
-                      Object objSize ) throws IOException {
+                      Object objSize) throws IOException {
       long totalSize = ((Long)objSize).longValue();
       float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
-      LOG.info("Number of bytes processed = " + totalSize );
-      LOG.info("Exec time = " + execTime );
-      LOG.info("IO rate = " + ioRateMbSec );
+      LOG.info("Number of bytes processed = " + totalSize);
+      LOG.info("Exec time = " + execTime);
+      LOG.info("IO rate = " + ioRateMbSec);
       
       output.collect(new UTF8("l:tasks"), new UTF8(String.valueOf(1)));
       output.collect(new UTF8("l:size"), new UTF8(String.valueOf(totalSize)));
@@ -180,14 +180,14 @@
 
     public WriteMapper() { 
       super(); 
-      for( int i=0; i < bufferSize; i++ )
+      for(int i=0; i < bufferSize; i++)
         buffer[i] = (byte)('0' + i % 50);
     }
 
-    public Object doIO( Reporter reporter, 
-                        String name, 
-                        long totalSize 
-                        ) throws IOException {
+    public Object doIO(Reporter reporter, 
+                       String name, 
+                       long totalSize 
+                       ) throws IOException {
       // create file
       totalSize *= MEGA;
       OutputStream out;
@@ -196,12 +196,12 @@
       try {
         // write to the file
         long nrRemaining;
-        for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize ) {
-          int curSize = ( bufferSize < nrRemaining ) ? bufferSize : (int)nrRemaining; 
-          out.write( buffer, 0, curSize );
-          reporter.setStatus( "writing " + name + "@" + 
-                              (totalSize - nrRemaining) + "/" + totalSize 
-                              + " ::host = " + hostName);
+        for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
+          int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining; 
+          out.write(buffer, 0, curSize);
+          reporter.setStatus("writing " + name + "@" + 
+                             (totalSize - nrRemaining) + "/" + totalSize 
+                             + " ::host = " + hostName);
         }
       } finally {
         out.close();
@@ -216,13 +216,13 @@
     fs.delete(DATA_DIR);
     fs.delete(WRITE_DIR);
     
-    runIOTest( WriteMapper.class, WRITE_DIR );
+    runIOTest(WriteMapper.class, WRITE_DIR);
   }
   
-  private static void runIOTest(  Class mapperClass, 
-                                  Path outputDir
-                                  ) throws IOException {
-    JobConf job = new JobConf( fsConfig, TestDFSIO.class );
+  private static void runIOTest( Class mapperClass, 
+                                 Path outputDir
+                                 ) throws IOException {
+    JobConf job = new JobConf(fsConfig, TestDFSIO.class);
 
     job.setInputPath(CONTROL_DIR);
     job.setInputFormat(SequenceFileInputFormat.class);
@@ -248,21 +248,21 @@
       super(); 
     }
 
-    public Object doIO( Reporter reporter, 
-                        String name, 
-                        long totalSize 
-                        ) throws IOException {
+    public Object doIO(Reporter reporter, 
+                       String name, 
+                       long totalSize 
+                       ) throws IOException {
       totalSize *= MEGA;
       // open file
       DataInputStream in = fs.open(new Path(DATA_DIR, name));
       try {
         long actualSize = 0;
-        for( int curSize = bufferSize; curSize == bufferSize; ) {
-          curSize = in.read( buffer, 0, bufferSize );
+        for(int curSize = bufferSize; curSize == bufferSize;) {
+          curSize = in.read(buffer, 0, bufferSize);
           actualSize += curSize;
-          reporter.setStatus( "reading " + name + "@" + 
-                              actualSize + "/" + totalSize 
-                              + " ::host = " + hostName);
+          reporter.setStatus("reading " + name + "@" + 
+                             actualSize + "/" + totalSize 
+                             + " ::host = " + hostName);
         }
       } finally {
         in.close();
@@ -273,7 +273,7 @@
 
   private static void readTest(FileSystem fs) throws IOException {
     fs.delete(READ_DIR);
-    runIOTest( ReadMapper.class, READ_DIR );
+    runIOTest(ReadMapper.class, READ_DIR);
   }
 
   private static void sequentialTest(
@@ -283,16 +283,16 @@
                                      int nrFiles
                                      ) throws Exception {
     IOStatMapper ioer = null;
-    if( testType == TEST_TYPE_READ )
+    if (testType == TEST_TYPE_READ)
       ioer = new ReadMapper();
-    else if( testType == TEST_TYPE_WRITE )
+    else if (testType == TEST_TYPE_WRITE)
       ioer = new WriteMapper();
     else
       return;
-    for( int i=0; i < nrFiles; i++)
+    for(int i=0; i < nrFiles; i++)
       ioer.doIO(Reporter.NULL,
                 BASE_FILE_NAME+Integer.toString(i), 
-                MEGA*fileSize );
+                MEGA*fileSize);
   }
 
   public static void main(String[] args) {
@@ -339,45 +339,45 @@
       fsConfig.setInt("test.io.file.buffer.size", bufferSize);
       FileSystem fs = FileSystem.get(fsConfig);
 
-      if( isSequential ) {
+      if (isSequential) {
         long tStart = System.currentTimeMillis();
-        sequentialTest( fs, testType, fileSize, nrFiles );
+        sequentialTest(fs, testType, fileSize, nrFiles);
         long execTime = System.currentTimeMillis() - tStart;
         String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
-        LOG.info( resultLine );
+        LOG.info(resultLine);
         return;
       }
-      if( testType == TEST_TYPE_CLEANUP ) {
-        cleanup( fs );
+      if (testType == TEST_TYPE_CLEANUP) {
+        cleanup(fs);
         return;
       }
       createControlFile(fs, fileSize, nrFiles);
       long tStart = System.currentTimeMillis();
-      if( testType == TEST_TYPE_WRITE )
+      if (testType == TEST_TYPE_WRITE)
         writeTest(fs);
-      if( testType == TEST_TYPE_READ )
+      if (testType == TEST_TYPE_READ)
         readTest(fs);
       long execTime = System.currentTimeMillis() - tStart;
     
-      analyzeResult( fs, testType, execTime, resFileName );
-    } catch( Exception e ) {
-      System.err.print( e.getLocalizedMessage());
+      analyzeResult(fs, testType, execTime, resFileName);
+    } catch(Exception e) {
+      System.err.print(e.getLocalizedMessage());
       System.exit(-1);
     }
   }
   
-  private static void analyzeResult(  FileSystem fs, 
-                                      int testType,
-                                      long execTime,
-                                      String resFileName
-                                      ) throws IOException {
+  private static void analyzeResult( FileSystem fs, 
+                                     int testType,
+                                     long execTime,
+                                     String resFileName
+                                     ) throws IOException {
     Path reduceFile;
-    if( testType == TEST_TYPE_WRITE )
-      reduceFile = new Path( WRITE_DIR, "part-00000" );
+    if (testType == TEST_TYPE_WRITE)
+      reduceFile = new Path(WRITE_DIR, "part-00000");
     else
-      reduceFile = new Path( READ_DIR, "part-00000" );
+      reduceFile = new Path(READ_DIR, "part-00000");
     DataInputStream in;
-    in = new DataInputStream(fs.open( reduceFile ));
+    in = new DataInputStream(fs.open(reduceFile));
   
     BufferedReader lines;
     lines = new BufferedReader(new InputStreamReader(in));
@@ -387,23 +387,23 @@
     float rate = 0;
     float sqrate = 0;
     String line;
-    while( (line = lines.readLine()) != null ) {
+    while((line = lines.readLine()) != null) {
       StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
       String attr = tokens.nextToken(); 
-      if( attr.endsWith(":tasks") )
-        tasks = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith(":size") )
-        size = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith(":time") )
-        time = Long.parseLong( tokens.nextToken() );
-      else if( attr.endsWith(":rate") )
-        rate = Float.parseFloat( tokens.nextToken() );
-      else if( attr.endsWith(":sqrate") )
-        sqrate = Float.parseFloat( tokens.nextToken() );
+      if (attr.endsWith(":tasks"))
+        tasks = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith(":size"))
+        size = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith(":time"))
+        time = Long.parseLong(tokens.nextToken());
+      else if (attr.endsWith(":rate"))
+        rate = Float.parseFloat(tokens.nextToken());
+      else if (attr.endsWith(":sqrate"))
+        sqrate = Float.parseFloat(tokens.nextToken());
     }
     
     double med = rate / 1000 / tasks;
-    double stdDev = Math.sqrt( Math.abs(sqrate / 1000 / tasks - med*med ));
+    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
     String resultLines[] = {
       "----- TestDFSIO ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
                                     (testType == TEST_TYPE_READ) ? "read" : 
@@ -417,17 +417,17 @@
       "    Test exec time sec: " + (float)execTime / 1000,
       "" };
 
-    PrintStream res = new PrintStream( 
-                                      new FileOutputStream( 
-                                                           new File(resFileName), true )); 
-    for( int i = 0; i < resultLines.length; i++ ) {
-      LOG.info( resultLines[i] );
-      res.println( resultLines[i] );
+    PrintStream res = new PrintStream(
+                                      new FileOutputStream(
+                                                           new File(resFileName), true)); 
+    for(int i = 0; i < resultLines.length; i++) {
+      LOG.info(resultLines[i]);
+      res.println(resultLines[i]);
     }
   }
 
-  private static void cleanup( FileSystem fs ) throws IOException {
-    LOG.info( "Cleaning up test files" );
+  private static void cleanup(FileSystem fs) throws IOException {
+    LOG.info("Cleaning up test files");
     fs.delete(new Path(TEST_ROOT_DIR));
   }
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/TestGlobPaths.java Thu Apr 19 14:34:41 2007
@@ -56,7 +56,7 @@
       pTestSetExcl();
       pTestCombination();
       pTestRelativePath();
-    } catch( IOException e) {
+    } catch(IOException e) {
       e.printStackTrace();
     } 
   }
@@ -66,9 +66,9 @@
       String [] files = new String[2];
       files[0] = USER_DIR+"/a2c";
       files[1] = USER_DIR+"/ab\\[c.d";
-      Path[] matchedPath = prepareTesting( USER_DIR+"/ab\\[c.d", files );
-      assertEquals( matchedPath.length, 1 );
-      assertEquals( matchedPath[0], path[1] );
+      Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[1]);
     } finally {
       cleanupDFS();
     }
@@ -82,10 +82,10 @@
       files[2] = USER_DIR+"/a.c";
       files[3] = USER_DIR+"/abcd";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a?c", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[2] );
-      assertEquals( matchedPath[1], path[1] );
-      assertEquals( matchedPath[2], path[0] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[2]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[0]);
     } finally {
       cleanupDFS();
     }
@@ -105,10 +105,10 @@
       files[2] = USER_DIR+"/abc.p";
       files[3] = USER_DIR+"/bacd";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a*", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[0] );
-      assertEquals( matchedPath[1], path[1] );
-      assertEquals( matchedPath[2], path[2] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[2]);
     } finally {
       cleanupDFS();
     }
@@ -122,10 +122,10 @@
       files[2] = USER_DIR+"/a.old.java";
       files[3] = USER_DIR+"/.java";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a.*", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[0] );
-      assertEquals( matchedPath[1], path[2] );
-      assertEquals( matchedPath[2], path[1] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[1]);
     } finally {
       cleanupDFS();
     }
@@ -139,10 +139,10 @@
       files[2] = USER_DIR+"/ab37x";
       files[3] = USER_DIR+"/bacd";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a*x", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[0] );
-      assertEquals( matchedPath[1], path[2] );
-      assertEquals( matchedPath[2], path[1] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[1]);
     } finally {
       cleanupDFS();
     } 
@@ -156,10 +156,10 @@
       files[2] = USER_DIR+"/a.hlp";
       files[3] = USER_DIR+"/a.hxy";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a.[ch]??", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[1] );
-      assertEquals( matchedPath[1], path[2] );
-      assertEquals( matchedPath[2], path[3] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[1]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[3]);
     } finally {
       cleanupDFS();
     }
@@ -173,10 +173,10 @@
       files[2] = USER_DIR+"/a.f";
       files[3] = USER_DIR+"/a.h";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a.[d-fm]", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], path[0] );
-      assertEquals( matchedPath[1], path[1] );
-      assertEquals( matchedPath[2], path[2] );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[2]);
     } finally {
       cleanupDFS();
     }
@@ -190,9 +190,9 @@
       files[2] = USER_DIR+"/a.0";
       files[3] = USER_DIR+"/a.h";
       Path[] matchedPath = prepareTesting(USER_DIR+"/a.[^a-cg-z0-9]", files);
-      assertEquals( matchedPath.length, 2 );
-      assertEquals( matchedPath[0], path[0] );
-      assertEquals( matchedPath[1], path[1] );
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
     } finally {
       cleanupDFS();
     }
@@ -206,9 +206,9 @@
       files[2] = "/user1/cc/b.hlp";
       files[3] = "/user/dd/a.hxy";
       Path[] matchedPath = prepareTesting("/use?/*/a.[ch]??", files);
-      assertEquals( matchedPath.length, 2 );
-      assertEquals( matchedPath[0], path[1] );
-      assertEquals( matchedPath[1], path[3] );
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[1]);
+      assertEquals(matchedPath[1], path[3]);
     } finally {
       cleanupDFS();
     }
@@ -222,28 +222,28 @@
       files[2] = "abc.p";
       files[3] = "bacd";
       Path[] matchedPath = prepareTesting("a*", files);
-      assertEquals( matchedPath.length, 3 );
-      assertEquals( matchedPath[0], new Path(USER_DIR, path[0]) );
-      assertEquals( matchedPath[1], new Path(USER_DIR, path[1]) );
-      assertEquals( matchedPath[2], new Path(USER_DIR, path[2]) );
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], new Path(USER_DIR, path[0]));
+      assertEquals(matchedPath[1], new Path(USER_DIR, path[1]));
+      assertEquals(matchedPath[2], new Path(USER_DIR, path[2]));
     } finally {
       cleanupDFS();
     }
   }
   
-  private Path[] prepareTesting( String pattern, String[] files)
+  private Path[] prepareTesting(String pattern, String[] files)
     throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
-      path[i] = new Path( files[i] );
-      if (!fs.mkdirs( path[i] )) {
+      path[i] = new Path(files[i]);
+      if (!fs.mkdirs(path[i])) {
         throw new IOException("Mkdirs failed to create " + path[i].toString());
       }
     }
-    return fs.globPaths( new Path(pattern) );
+    return fs.globPaths(new Path(pattern));
   }
   
-  private void cleanupDFS( ) throws IOException {
-    fs.delete( new Path("/user"));
+  private void cleanupDFS() throws IOException {
+    fs.delete(new Path("/user"));
   }
   
 }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java Thu Apr 19 14:34:41 2007
@@ -5,7 +5,7 @@
 public class Jets3tS3FileSystemTest extends S3FileSystemBaseTest {
 
   @Override
-    public FileSystemStore getFileSystemStore() throws IOException {
+  public FileSystemStore getFileSystemStore() throws IOException {
     return null; // use default store
   }
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java Thu Apr 19 14:34:41 2007
@@ -21,7 +21,7 @@
   abstract FileSystemStore getFileSystemStore() throws IOException;
 
   @Override
-    protected void setUp() throws IOException {
+  protected void setUp() throws IOException {
     Configuration conf = new Configuration();
     
     s3FileSystem = new S3FileSystem(getFileSystemStore());
@@ -34,7 +34,7 @@
   }
 
   @Override
-    protected void tearDown() throws Exception {
+  protected void tearDown() throws Exception {
     s3FileSystem.purge();
     s3FileSystem.close();
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java Thu Apr 19 14:34:41 2007
@@ -8,7 +8,7 @@
 public class TestInMemoryS3FileSystem extends S3FileSystemBaseTest {
 
   @Override
-    public FileSystemStore getFileSystemStore() throws IOException {
+  public FileSystemStore getFileSystemStore() throws IOException {
     return new InMemoryFileSystemStore();
   }
   

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestBytesWritable.java Thu Apr 19 14:34:41 2007
@@ -32,7 +32,7 @@
     int orig_capacity = buf.getCapacity();
     buf.setSize(size*2);
     int new_capacity = buf.getCapacity();
-    System.arraycopy(buf.get(),0, buf.get(), size, size);
+    System.arraycopy(buf.get(), 0, buf.get(), size, size);
     assertTrue(new_capacity >= size * 2);
     assertEquals(size * 2, buf.getSize());
     assertTrue(new_capacity != orig_capacity);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestMD5Hash.java Thu Apr 19 14:34:41 2007
@@ -41,11 +41,11 @@
     MD5Hash md5Hash = getTestHash();
 
     MD5Hash md5Hash00
-      = new MD5Hash(new byte[] {0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0});
+      = new MD5Hash(new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
 
     MD5Hash md5HashFF
-      = new MD5Hash(new byte[] {-1,-1,-1,-1,-1,-1,-1,-1,
-                                -1,-1,-1,-1,-1,-1,-1,-1});
+      = new MD5Hash(new byte[] {-1, -1, -1, -1, -1, -1, -1, -1,
+                                -1, -1, -1, -1, -1, -1, -1, -1});
 
     // test i/o
     TestWritable.testWritable(md5Hash);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestSequenceFile.java Thu Apr 19 14:34:41 2007
@@ -310,7 +310,7 @@
                                                int megabytes, int factor) {
     SequenceFile.Sorter sorter = 
       fast
-      ? new SequenceFile.Sorter(fs, new RandomDatum.Comparator(),RandomDatum.class, conf)
+      ? new SequenceFile.Sorter(fs, new RandomDatum.Comparator(), RandomDatum.class, conf)
       : new SequenceFile.Sorter(fs, RandomDatum.class, RandomDatum.class, conf);
     sorter.setMemory(megabytes * 1024*1024);
     sorter.setFactor(factor);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestText.java Thu Apr 19 14:34:41 2007
@@ -44,11 +44,11 @@
     while (buffer.length()<length) {
       int codePoint = RANDOM.nextInt(Character.MAX_CODE_POINT);
       char tmpStr[] = new char[2];
-      if(Character.isDefined(codePoint)) {
+      if (Character.isDefined(codePoint)) {
         //unpaired surrogate
-        if(codePoint < Character.MIN_SUPPLEMENTARY_CODE_POINT &&
-           !Character.isHighSurrogate((char)codePoint) &&
-           !Character.isLowSurrogate((char)codePoint) ) {
+        if (codePoint < Character.MIN_SUPPLEMENTARY_CODE_POINT &&
+            !Character.isHighSurrogate((char)codePoint) &&
+            !Character.isLowSurrogate((char)codePoint)) {
           Character.toChars(codePoint, tmpStr, 0);
           buffer.append(tmpStr);
         }
@@ -74,7 +74,7 @@
   public void testWritable() throws Exception {
     for (int i = 0; i < NUM_ITERATIONS; i++) {
       String str;
-      if(i == 0 )
+      if (i == 0)
         str = getLongString();
       else
         str = getTestString();
@@ -91,7 +91,7 @@
 
     for (int i = 0; i < NUM_ITERATIONS; i++) {
       // generate a random string
-      if(i == 0 )
+      if (i == 0)
         before = getLongString();
       else
         before = getTestString();
@@ -119,7 +119,7 @@
     for (int i = 0; i < NUM_ITERATIONS; i++) {
       // generate a random string
       String before;          
-      if(i == 0 )
+      if (i == 0)
         before = getLongString();
       else
         before = getTestString();
@@ -146,7 +146,7 @@
     DataOutputBuffer out2 = new DataOutputBuffer();
     DataOutputBuffer out3 = new DataOutputBuffer();
     Text.Comparator comparator = new Text.Comparator();
-    for (int i=0; i<NUM_ITERATIONS; i++ ) {
+    for (int i=0; i<NUM_ITERATIONS; i++) {
       // reset output buffer
       out1.reset();
       out2.reset();
@@ -155,7 +155,7 @@
       // generate two random strings
       String str1 = getTestString();
       String str2 = getTestString();
-      if(i == 0 ) {
+      if (i == 0) {
         str1 = getLongString();
         str2 = getLongString();
       } else {
@@ -207,7 +207,7 @@
     Text a=new Text("abc");
     Text b=new Text("a");
     b.set(a);
-    assertEquals("abc",b.toString());
+    assertEquals("abc", b.toString());
   }
 
   public static void main(String[] args)  throws Exception

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/TestVersionedWritable.java Thu Apr 19 14:34:41 2007
@@ -80,7 +80,7 @@
     String compressableTestString = 
       "Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
       "Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " +
-      "Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. " ;
+      "Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. Blah. ";
 
     SimpleVersionedWritable containedObject = new SimpleVersionedWritable();
     String[] testStringArray = {"The", "Quick", "Brown", "Fox", "Jumped", "Over", "The", "Lazy", "Dog"};
@@ -88,11 +88,11 @@
     public void write(DataOutput out) throws IOException {
       super.write(out);
       out.writeUTF(shortTestString); 
-      WritableUtils.writeString(out,longTestString); 
-      int comp = WritableUtils.writeCompressedString(out,compressableTestString); 
+      WritableUtils.writeString(out, longTestString); 
+      int comp = WritableUtils.writeCompressedString(out, compressableTestString); 
       System.out.println("Compression is " + comp + "%");
       containedObject.write(out); // Warning if this is a recursive call, you need a null value.
-      WritableUtils.writeStringArray(out,testStringArray); 
+      WritableUtils.writeStringArray(out, testStringArray); 
 
     }
 		

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/io/retry/TestRetryProxy.java Thu Apr 19 14:34:41 2007
@@ -22,7 +22,7 @@
   private UnreliableImplementation unreliableImpl;
   
   @Override
-    protected void setUp() throws Exception {
+  protected void setUp() throws Exception {
     unreliableImpl = new UnreliableImplementation();
   }
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/HadoopTestCase.java Thu Apr 19 14:34:41 2007
@@ -55,40 +55,40 @@
     throws IOException {
     if (mrMode != LOCAL_MR && mrMode != CLUSTER_MR) {
       throw new IllegalArgumentException(
-        "Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR");
+                                         "Invalid MapRed mode, must be LOCAL_MR or CLUSTER_MR");
     }
     if (fsMode != LOCAL_FS && fsMode != DFS_FS) {
       throw new IllegalArgumentException(
-        "Invalid FileSystem mode, must be LOCAL_FS or DFS_FS");
+                                         "Invalid FileSystem mode, must be LOCAL_FS or DFS_FS");
     }
     if (taskTrackers < 1) {
       throw new IllegalArgumentException(
-        "Invalid taskTrackers value, must be greater than 0");
+                                         "Invalid taskTrackers value, must be greater than 0");
     }
     if (dataNodes < 1) {
       throw new IllegalArgumentException(
-        "Invalid dataNodes value, must be greater than 0");
+                                         "Invalid dataNodes value, must be greater than 0");
     }
     localMR = (mrMode == LOCAL_MR);
     localFS = (fsMode == LOCAL_FS);
-/*
-    JobConf conf = new JobConf();
-    fsRoot = conf.get("hadoop.tmp.dir");
+    /*
+      JobConf conf = new JobConf();
+      fsRoot = conf.get("hadoop.tmp.dir");
 
-    if (fsRoot == null) {
+      if (fsRoot == null) {
       throw new IllegalArgumentException(
-        "hadoop.tmp.dir is not defined");
-    }
+      "hadoop.tmp.dir is not defined");
+      }
 
-    fsRoot = fsRoot.replace(' ', '+') + "/fs";
+      fsRoot = fsRoot.replace(' ', '+') + "/fs";
 
-    File file = new File(fsRoot);
-    if (!file.exists()) {
+      File file = new File(fsRoot);
+      if (!file.exists()) {
       if (!file.mkdirs()) {
-        throw new RuntimeException("Could not create FS base path: " + file);
+      throw new RuntimeException("Could not create FS base path: " + file);
       }
-    }
-*/
+      }
+    */
     this.taskTrackers = taskTrackers;
     this.dataNodes = dataNodes;
   }

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRBench.java Thu Apr 19 14:34:41 2007
@@ -150,7 +150,7 @@
     jobConf.setMapOutputKeyClass(UTF8.class);
     jobConf.setMapOutputValueClass(UTF8.class);
     
-    if ( null != jarFile ) {
+    if (null != jarFile) {
       jobConf.setJar(jarFile);
     }
     jobConf.setMapperClass(Map.class);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MRCaching.java Thu Apr 19 14:34:41 2007
@@ -63,7 +63,7 @@
         // read the cached files (unzipped, unjarred and text)
         // and put it into a single file TEST_ROOT_DIR/test.txt
         String TEST_ROOT_DIR = jconf.get("test.build.data","/tmp");
-        Path file = new Path("file:///",TEST_ROOT_DIR);
+        Path file = new Path("file:///", TEST_ROOT_DIR);
         FileSystem fs = FileSystem.getLocal(conf);
         if (!fs.mkdirs(file)) {
           throw new IOException("Mkdirs failed to create " + file.toString());
@@ -130,7 +130,7 @@
     String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/tmp"))
       .toString().replace(' ', '+');
     //if (TEST_ROOT_DIR.startsWith("C:")) TEST_ROOT_DIR = "/tmp";
-    conf.set("test.build.data",TEST_ROOT_DIR);
+    conf.set("test.build.data", TEST_ROOT_DIR);
     final Path inDir = new Path(indir);
     final Path outDir = new Path(outdir);
     FileSystem fs = FileSystem.get(conf);

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/MiniMRCluster.java Thu Apr 19 14:34:41 2007
@@ -195,7 +195,7 @@
    * Wait until the system is idle.
    */
   public void waitUntilIdle() {
-    for(Iterator itr= taskTrackerList.iterator(); itr.hasNext(); ) {
+    for(Iterator itr= taskTrackerList.iterator(); itr.hasNext();) {
       TaskTrackerRunner runner = (TaskTrackerRunner) itr.next();
       while (!runner.isDead && (!runner.isInitialized || !runner.tt.isIdle())) {
         if (!runner.isInitialized) {

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/NotificationTestCase.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/NotificationTestCase.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/NotificationTestCase.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/NotificationTestCase.java Thu Apr 19 14:34:41 2007
@@ -75,7 +75,7 @@
     // create servlet handler
     ServletHandler handler = new ServletHandler();
     handler.addServlet(servletClass.getName(), servletPath,
-      servletClass.getName());
+                       servletClass.getName());
 
     // bind servlet handler to context
     context.addHandler(handler);
@@ -104,14 +104,14 @@
       throws ServletException, IOException {
       if (counter == 0) {
         stdPrintln((new Date()).toString() +
-          "Receiving First notification for [" + req.getQueryString() +
-          "], returning error");
+                   "Receiving First notification for [" + req.getQueryString() +
+                   "], returning error");
         res.sendError(HttpServletResponse.SC_BAD_REQUEST, "forcing error");
       }
       else {
         stdPrintln((new Date()).toString() +
-          "Receiving Second notification for [" + req.getQueryString() +
-          "], returning OK");
+                   "Receiving Second notification for [" + req.getQueryString() +
+                   "], returning OK");
         res.setStatus(HttpServletResponse.SC_OK);
       }
       counter++;
@@ -144,7 +144,7 @@
 
   public void testMR() throws Exception {
     System.out.println(launchWordCount(this.createJobConf(),
-      "a b c d e f g h", 1, 1));
+                                       "a b c d e f g h", 1, 1));
     synchronized(Thread.currentThread()) {
       stdPrintln("Sleeping for 2 seconds to give time for retry");
       Thread.currentThread().sleep(2000);
@@ -153,9 +153,9 @@
   }
 
   private String launchWordCount(JobConf conf,
-    String input,
-    int numMaps,
-    int numReduces) throws IOException {
+                                 String input,
+                                 int numMaps,
+                                 int numReduces) throws IOException {
     Path inDir = new Path("testing/wc/input");
     Path outDir = new Path("testing/wc/output");
 

Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java?view=diff&rev=530556&r1=530555&r2=530556
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/mapred/TestMapRed.java Thu Apr 19 14:34:41 2007
@@ -402,7 +402,7 @@
     fs.delete(randomOuts);
 
 
-    JobConf genJob = new JobConf(conf,TestMapRed.class);
+    JobConf genJob = new JobConf(conf, TestMapRed.class);
     genJob.setInputPath(randomIns);
     genJob.setInputFormat(SequenceFileInputFormat.class);
     genJob.setMapperClass(RandomGenMapper.class);
@@ -447,7 +447,7 @@
     int intermediateReduces = 10;
     Path intermediateOuts = new Path(testdir, "intermediateouts");
     fs.delete(intermediateOuts);
-    JobConf checkJob = new JobConf(conf,TestMapRed.class);
+    JobConf checkJob = new JobConf(conf, TestMapRed.class);
     checkJob.setInputPath(randomOuts);
     checkJob.setInputFormat(TextInputFormat.class);
     checkJob.setMapperClass(RandomCheckMapper.class);
@@ -470,7 +470,7 @@
     //
     Path finalOuts = new Path(testdir, "finalouts");        
     fs.delete(finalOuts);
-    JobConf mergeJob = new JobConf(conf,TestMapRed.class);
+    JobConf mergeJob = new JobConf(conf, TestMapRed.class);
     mergeJob.setInputPath(intermediateOuts);
     mergeJob.setInputFormat(SequenceFileInputFormat.class);
     mergeJob.setMapperClass(MergeMapper.class);
@@ -501,12 +501,12 @@
         if (dist[i] == 0) {
           continue;
         }
-        if (! in.next(key, val)) {
+        if (!in.next(key, val)) {
           System.err.println("Cannot read entry " + i);
           success = false;
           break;
         } else {
-          if ( !((key.get() == i ) && (val.get() == dist[i]))) {
+          if (!((key.get() == i) && (val.get() == dist[i]))) {
             System.err.println("Mismatch!  Pos=" + key.get() + ", i=" + i + ", val=" + val.get() + ", dist[i]=" + dist[i]);
             success = false;
           }