You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC

svn commit: r1399950 [19/27] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apac...

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
@@ -29,13 +33,10 @@ import java.net.Socket;
 import java.nio.ByteBuffer;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -67,13 +68,13 @@ import org.mockito.Mockito;
  * This tests data transfer protocol handling in the Datanode. It sends
  * various forms of wrong data and verifies that Datanode handles it well.
  */
-public class TestDataTransferProtocol extends TestCase {
+public class TestDataTransferProtocol {
   
   private static final Log LOG = LogFactory.getLog(
                     "org.apache.hadoop.hdfs.TestDataTransferProtocol");
 
   private static final DataChecksum DEFAULT_CHECKSUM =
-    DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32C, 512);
+    DataChecksum.newDataChecksum(DataChecksum.Type.CRC32C, 512);
   
   DatanodeID datanode;
   InetSocketAddress dnAddr;
@@ -139,13 +140,6 @@ public class TestDataTransferProtocol ex
     }
   }
   
-  void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
-    byte [] arr = new byte[fileLen];
-    FSDataOutputStream out = fs.create(path);
-    out.write(arr);
-    out.close();
-  }
-  
   void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
     byte [] arr = new byte[fileLen];
     FSDataInputStream in = fs.open(path);
@@ -205,7 +199,8 @@ public class TestDataTransferProtocol ex
     }
   }
   
-  @Test public void testOpWrite() throws IOException {
+  @Test 
+  public void testOpWrite() throws IOException {
     int numDataNodes = 1;
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
@@ -333,7 +328,8 @@ public class TestDataTransferProtocol ex
     }
   }
   
-@Test  public void testDataTransferProtocol() throws IOException {
+  @Test  
+  public void testDataTransferProtocol() throws IOException {
     Random random = new Random();
     int oneMil = 1024*1024;
     Path file = new Path("dataprotocol.dat");
@@ -353,7 +349,9 @@ public class TestDataTransferProtocol ex
     
     int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
     
-    createFile(fileSys, file, fileLen);
+      DFSTestUtil.createFile(fileSys, file, fileLen, fileLen,
+          fileSys.getDefaultBlockSize(file),
+          fileSys.getDefaultReplication(file), 0L);
 
     // get the first blockid for the file
     final ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,10 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
@@ -28,22 +32,27 @@ import java.util.concurrent.TimeoutExcep
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
+import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * This test verifies that block verification occurs on the datanode
  */
-public class TestDatanodeBlockScanner extends TestCase {
+public class TestDatanodeBlockScanner {
   
   private static final Log LOG = 
                  LogFactory.getLog(TestDatanodeBlockScanner.class);
@@ -55,6 +64,10 @@ public class TestDatanodeBlockScanner ex
   
   private static Pattern pattern_blockVerify = 
              Pattern.compile(".*?(SCAN_PERIOD)\\s*:\\s*(\\d+.*?)");
+  
+  static {
+    ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.WARN);
+  }
   /**
    * This connects to datanode and fetches block verification data.
    * It repeats this until the given block has a verification time > newTime.
@@ -72,15 +85,15 @@ public class TestDatanodeBlockScanner ex
   throws IOException, TimeoutException {
     URL url = new URL("http://localhost:" + infoPort +
                       "/blockScannerReport?listblocks");
-    long lastWarnTime = System.currentTimeMillis();
+    long lastWarnTime = Time.now();
     if (newTime <= 0) newTime = 1L;
     long verificationTime = 0;
     
     String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
     long failtime = (timeout <= 0) ? Long.MAX_VALUE 
-        : System.currentTimeMillis() + timeout;
+        : Time.now() + timeout;
     while (verificationTime < newTime) {
-      if (failtime < System.currentTimeMillis()) {
+      if (failtime < Time.now()) {
         throw new TimeoutException("failed to achieve block verification after "
             + timeout + " msec.  Current verification timestamp = "
             + verificationTime + ", requested verification time > " 
@@ -103,7 +116,7 @@ public class TestDatanodeBlockScanner ex
       }
       
       if (verificationTime < newTime) {
-        long now = System.currentTimeMillis();
+        long now = Time.now();
         if ((now - lastWarnTime) >= 5*1000) {
           LOG.info("Waiting for verification of " + block);
           lastWarnTime = now; 
@@ -117,8 +130,9 @@ public class TestDatanodeBlockScanner ex
     return verificationTime;
   }
 
+  @Test
   public void testDatanodeBlockScanner() throws IOException, TimeoutException {
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -167,7 +181,8 @@ public class TestDatanodeBlockScanner ex
     return MiniDFSCluster.corruptReplica(replica, blk);
   }
 
-  public void testBlockCorruptionPolicy() throws IOException {
+  @Test
+  public void testBlockCorruptionPolicy() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     Random random = new Random();
@@ -200,12 +215,12 @@ public class TestDatanodeBlockScanner ex
     assertTrue(MiniDFSCluster.corruptReplica(1, block));
     assertTrue(MiniDFSCluster.corruptReplica(2, block));
 
-    // Read the file to trigger reportBadBlocks by client
-    try {
-      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
-                        conf, true);
-    } catch (IOException e) {
-      // Ignore exception
+    // Trigger each of the DNs to scan this block immediately.
+    // The block pool scanner doesn't run frequently enough on its own
+    // to notice these, and due to HDFS-1371, the client won't report
+    // bad blocks to the NN when all replicas are bad.
+    for (DataNode dn : cluster.getDataNodes()) {
+      DataNodeTestUtils.runBlockScannerForBlock(dn, block);
     }
 
     // We now have the blocks to be marked as corrupt and we get back all
@@ -231,12 +246,14 @@ public class TestDatanodeBlockScanner ex
    * 4. Test again waits until the block is reported with expected number
    *    of good replicas.
    */
+  @Test
   public void testBlockCorruptionRecoveryPolicy1() throws Exception {
     // Test recovery of 1 corrupt replica
     LOG.info("Testing corrupt replica recovery for one corrupt replica");
     blockCorruptionRecoveryPolicy(4, (short)3, 1);
   }
 
+  @Test
   public void testBlockCorruptionRecoveryPolicy2() throws Exception {
     // Test recovery of 2 corrupt replicas
     LOG.info("Testing corrupt replica recovery for two corrupt replicas");
@@ -252,6 +269,7 @@ public class TestDatanodeBlockScanner ex
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 3);
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
+    conf.setLong(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 5L);
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     cluster.waitActive();
@@ -259,35 +277,47 @@ public class TestDatanodeBlockScanner ex
     Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
     DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
     ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
+    final int ITERATIONS = 10;
 
     // Wait until block is replicated to numReplicas
     DFSTestUtil.waitReplication(fs, file1, numReplicas);
 
-    // Corrupt numCorruptReplicas replicas of block 
-    int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
-    for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
-      if (corruptReplica(block, i)) {
-        corruptReplicasDNIDs[j++] = i;
-        LOG.info("successfully corrupted block " + block + " on node " 
-                 + i + " " + cluster.getDataNodes().get(i).getDisplayName());
+    for (int k = 0; ; k++) {
+      // Corrupt numCorruptReplicas replicas of block 
+      int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
+      for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
+        if (corruptReplica(block, i)) {
+          corruptReplicasDNIDs[j++] = i;
+          LOG.info("successfully corrupted block " + block + " on node " 
+                   + i + " " + cluster.getDataNodes().get(i).getDisplayName());
+        }
+      }
+      
+      // Restart the datanodes containing corrupt replicas 
+      // so they would be reported to namenode and re-replicated
+      // They MUST be restarted in reverse order from highest to lowest index,
+      // because the act of restarting them removes them from the ArrayList
+      // and causes the indexes of all nodes above them in the list to change.
+      for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
+        LOG.info("restarting node with corrupt replica: position " 
+            + i + " node " + corruptReplicasDNIDs[i] + " " 
+            + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
+        cluster.restartDataNode(corruptReplicasDNIDs[i]);
       }
-    }
-    
-    // Restart the datanodes containing corrupt replicas 
-    // so they would be reported to namenode and re-replicated
-    // They MUST be restarted in reverse order from highest to lowest index,
-    // because the act of restarting them removes them from the ArrayList
-    // and causes the indexes of all nodes above them in the list to change.
-    for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
-      LOG.info("restarting node with corrupt replica: position " 
-          + i + " node " + corruptReplicasDNIDs[i] + " " 
-          + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
-      cluster.restartDataNode(corruptReplicasDNIDs[i]);
-    }
 
-    // Loop until all corrupt replicas are reported
-    DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
-        block, numCorruptReplicas);
+      // Loop until all corrupt replicas are reported
+      try {
+        DFSTestUtil.waitCorruptReplicas(fs, cluster.getNamesystem(), file1, 
+                                        block, numCorruptReplicas);
+      } catch(TimeoutException e) {
+        if (k > ITERATIONS) {
+          throw e;
+        }
+        LOG.info("Timed out waiting for corrupt replicas, trying again, iteration " + k);
+        continue;
+      }
+      break;
+    }
     
     // Loop until the block recovers after replication
     DFSTestUtil.waitReplication(fs, file1, numReplicas);
@@ -301,6 +331,7 @@ public class TestDatanodeBlockScanner ex
   }
   
   /** Test if NameNode handles truncated blocks in block report */
+  @Test
   public void testTruncatedBlockReport() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = (short)2;
@@ -311,7 +342,7 @@ public class TestDatanodeBlockScanner ex
     conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 3L);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
 
-    long startTime = System.currentTimeMillis();
+    long startTime = Time.now();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                                .numDataNodes(REPLICATION_FACTOR)
                                                .build();
@@ -395,10 +426,10 @@ public class TestDatanodeBlockScanner ex
   private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex,
       long timeout) throws TimeoutException, InterruptedException {
     File blockFile = MiniDFSCluster.getBlockFile(dnIndex, blk);
-    long failtime = System.currentTimeMillis() 
+    long failtime = Time.now() 
                     + ((timeout > 0) ? timeout : Long.MAX_VALUE);
     while (blockFile != null && blockFile.exists()) {
-      if (failtime < System.currentTimeMillis()) {
+      if (failtime < Time.now()) {
         throw new TimeoutException("waited too long for blocks to be deleted: "
             + blockFile.getPath() + (blockFile.exists() ? " still exists; " : " is absent; "));
       }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java Fri Oct 19 02:25:55 2012
@@ -17,9 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
-import java.io.IOException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
-import junit.framework.TestCase;
+import java.io.IOException;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -31,17 +32,18 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * This class tests that pipelines survive data node death and recovery.
  */
-public class TestDatanodeDeath extends TestCase {
+public class TestDatanodeDeath {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -83,6 +85,7 @@ public class TestDatanodeDeath extends T
     }
 
     // create a bunch of files. Write to them and then verify.
+    @Override
     public void run() {
       System.out.println("Workload starting ");
       for (int i = 0; i < numberOfFiles; i++) {
@@ -225,6 +228,7 @@ public class TestDatanodeDeath extends T
       this.conf = conf;
     }
 
+    @Override
     public void run() {
 
       while (running) {
@@ -408,11 +412,15 @@ public class TestDatanodeDeath extends T
     }
   }
 
+  @Test
   public void testSimple0() throws IOException {simpleTest(0);}
 
+  @Test
   public void testSimple1() throws IOException {simpleTest(1);}
 
+  @Test
   public void testSimple2() throws IOException {simpleTest(2);}
 
+  @Test
   public void testComplex() throws IOException {complexTest();}
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeRegistration.java Fri Oct 19 02:25:55 2012
@@ -19,13 +19,15 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.fail;
-import static org.mockito.Mockito.*;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
 
 import java.net.InetSocketAddress;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
@@ -91,6 +93,58 @@ public class TestDatanodeRegistration {
   }
   
   @Test
+  public void testChangeStorageID() throws Exception {
+    final String DN_IP_ADDR = "127.0.0.1";
+    final String DN_HOSTNAME = "localhost";
+    final int DN_XFER_PORT = 12345;
+    final int DN_INFO_PORT = 12346;
+    final int DN_IPC_PORT = 12347;
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0)
+          .build();
+      InetSocketAddress addr = new InetSocketAddress(
+        "localhost",
+        cluster.getNameNodePort());
+      DFSClient client = new DFSClient(addr, conf);
+      NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
+
+      // register a datanode
+      DatanodeID dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+          "fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+      long nnCTime = cluster.getNamesystem().getFSImage().getStorage()
+          .getCTime();
+      StorageInfo mockStorageInfo = mock(StorageInfo.class);
+      doReturn(nnCTime).when(mockStorageInfo).getCTime();
+      doReturn(HdfsConstants.LAYOUT_VERSION).when(mockStorageInfo)
+          .getLayoutVersion();
+      DatanodeRegistration dnReg = new DatanodeRegistration(dnId,
+          mockStorageInfo, null, VersionInfo.getVersion());
+      rpcServer.registerDatanode(dnReg);
+
+      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
+      assertEquals("Expected a registered datanode", 1, report.length);
+
+      // register the same datanode again with a different storage ID
+      dnId = new DatanodeID(DN_IP_ADDR, DN_HOSTNAME,
+          "changed-fake-storage-id", DN_XFER_PORT, DN_INFO_PORT, DN_IPC_PORT);
+      dnReg = new DatanodeRegistration(dnId,
+          mockStorageInfo, null, VersionInfo.getVersion());
+      rpcServer.registerDatanode(dnReg);
+
+      report = client.datanodeReport(DatanodeReportType.ALL);
+      assertEquals("Datanode with changed storage ID not recognized",
+          1, report.length);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  @Test
   public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java Fri Oct 19 02:25:55 2012
@@ -17,27 +17,30 @@
 */
 package org.apache.hadoop.hdfs;
 
+import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 
-import junit.framework.TestCase;
 import org.apache.hadoop.conf.Configuration;
-
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import static org.apache.hadoop.test.MetricsAsserts.*;
+import org.junit.Test;
 
 /**
  * This test ensures the all types of data node report work correctly.
  */
-public class TestDatanodeReport extends TestCase {
+public class TestDatanodeReport {
   final static private Configuration conf = new HdfsConfiguration();
   final static private int NUM_OF_DATANODES = 4;
     
   /**
    * This test attempts to different types of datanode report.
    */
+  @Test
   public void testDatanodeReport() throws Exception {
     conf.setInt(
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java Fri Oct 19 02:25:55 2012
@@ -17,18 +17,20 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+
 import java.net.InetSocketAddress;
 import java.net.URI;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.junit.Test;
 
 /** Test NameNode port defaulting code. */
-public class TestDefaultNameNodePort extends TestCase {
+public class TestDefaultNameNodePort {
 
+  @Test
   public void testGetAddressFromString() throws Exception {
     assertEquals(NameNode.getAddress("foo").getPort(),
                  NameNode.DEFAULT_PORT);
@@ -40,6 +42,7 @@ public class TestDefaultNameNodePort ext
                  555);
   }
 
+  @Test
   public void testGetAddressFromConf() throws Exception {
     Configuration conf = new HdfsConfiguration();
     FileSystem.setDefaultUri(conf, "hdfs://foo/");
@@ -50,6 +53,7 @@ public class TestDefaultNameNodePort ext
     assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
   }
 
+  @Test
   public void testGetUri() {
     assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
                  URI.create("hdfs://foo:555"));

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDeprecatedKeys.java Fri Oct 19 02:25:55 2012
@@ -18,13 +18,15 @@
 
 package org.apache.hadoop.hdfs;
 
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import static org.junit.Assert.assertTrue;
+
 import org.apache.hadoop.conf.Configuration;
-import junit.framework.TestCase;
+import org.junit.Test;
 
-public class TestDeprecatedKeys extends TestCase {
+public class TestDeprecatedKeys {
  
   //Tests a deprecated key
+  @Test
   public void testDeprecatedKeys() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.set("topology.script.file.name", "xyz");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Fri Oct 19 02:25:55 2012
@@ -22,27 +22,41 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
-
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.net.URI;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.Random;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.BlockStorageLocation;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Test;
+import org.mockito.InOrder;
 
 public class TestDistributedFileSystem {
   private static final Random RAN = new Random();
@@ -106,17 +120,38 @@ public class TestDistributedFileSystem {
       DFSTestUtil.readFile(fileSys, p);
       
       DFSClient client = ((DistributedFileSystem)fileSys).dfs;
-      SocketCache cache = client.socketCache;
-      assertEquals(1, cache.size());
 
       fileSys.close();
       
-      assertEquals(0, cache.size());
     } finally {
       if (cluster != null) {cluster.shutdown();}
     }
   }
+
+  @Test
+  public void testDFSCloseOrdering() throws Exception {
+    DistributedFileSystem fs = new MyDistributedFileSystem();
+    Path path = new Path("/a");
+    fs.deleteOnExit(path);
+    fs.close();
+
+    InOrder inOrder = inOrder(fs.dfs);
+    inOrder.verify(fs.dfs).closeOutputStreams(eq(false));
+    inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true));
+    inOrder.verify(fs.dfs).close();
+  }
   
+  private static class MyDistributedFileSystem extends DistributedFileSystem {
+    MyDistributedFileSystem() {
+      statistics = new FileSystem.Statistics("myhdfs"); // can't mock finals
+      dfs = mock(DFSClient.class);
+    }
+    @Override
+    public boolean exists(Path p) {
+      return true; // trick out deleteOnExit
+    }
+  }
+
   @Test
   public void testDFSSeekExceptions() throws IOException {
     Configuration conf = getTestConfiguration();
@@ -169,82 +204,82 @@ public class TestDistributedFileSystem {
       for(int i = 0; i < filepaths.length; i++) {
         filepaths[i] = new Path(filepathstring + i);
       }
-      final long millis = System.currentTimeMillis();
+      final long millis = Time.now();
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        dfs.dfs.leaserenewer.setGraceSleepPeriod(grace);
-        assertFalse(dfs.dfs.leaserenewer.isRunning());
+        dfs.dfs.getLeaseRenewer().setGraceSleepPeriod(grace);
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
   
         {
           //create a file
           final FSDataOutputStream out = dfs.create(filepaths[0]);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //write something
           out.writeLong(millis);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //close
           out.close();
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           for(int i = 0; i < 3; i++) {
-            if (dfs.dfs.leaserenewer.isRunning()) {
+            if (dfs.dfs.getLeaseRenewer().isRunning()) {
               Thread.sleep(grace/2);
             }
           }
           //passed grace period
-          assertFalse(dfs.dfs.leaserenewer.isRunning());
+          assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         }
 
         {
           //create file1
           final FSDataOutputStream out1 = dfs.create(filepaths[1]);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //create file2
           final FSDataOutputStream out2 = dfs.create(filepaths[2]);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
 
           //write something to file1
           out1.writeLong(millis);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //close file1
           out1.close();
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
 
           //write something to file2
           out2.writeLong(millis);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //close file2
           out2.close();
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
         }
 
         {
           //create file3
           final FSDataOutputStream out3 = dfs.create(filepaths[3]);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           Thread.sleep(grace/4*3);
           //passed previous grace period, should still running
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //write something to file3
           out3.writeLong(millis);
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           //close file3
           out3.close();
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           Thread.sleep(grace/4*3);
           //within grace period
-          assertTrue(dfs.dfs.leaserenewer.isRunning());
+          assertTrue(dfs.dfs.getLeaseRenewer().isRunning());
           for(int i = 0; i < 3; i++) {
-            if (dfs.dfs.leaserenewer.isRunning()) {
+            if (dfs.dfs.getLeaseRenewer().isRunning()) {
               Thread.sleep(grace/2);
             }
           }
           //passed grace period
-          assertFalse(dfs.dfs.leaserenewer.isRunning());
+          assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         }
 
         dfs.close();
@@ -273,15 +308,15 @@ public class TestDistributedFileSystem {
 
       {
         DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
-        assertFalse(dfs.dfs.leaserenewer.isRunning());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
 
         //open and check the file
         FSDataInputStream in = dfs.open(filepaths[0]);
-        assertFalse(dfs.dfs.leaserenewer.isRunning());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         assertEquals(millis, in.readLong());
-        assertFalse(dfs.dfs.leaserenewer.isRunning());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         in.close();
-        assertFalse(dfs.dfs.leaserenewer.isRunning());
+        assertFalse(dfs.dfs.getLeaseRenewer().isRunning());
         dfs.close();
       }
       
@@ -416,7 +451,6 @@ public class TestDistributedFileSystem {
 
     final Configuration conf = getTestConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem hdfs = cluster.getFileSystem();
@@ -570,4 +604,150 @@ public class TestDistributedFileSystem {
     testDFSClient();
     testFileChecksum();
   }
+
+  /**
+   * Tests the normal path of batching up BlockLocation[]s to be passed to a
+   * single
+   * {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
+   * call
+   */
+  @Test
+  public void testGetFileBlockStorageLocationsBatching() throws Exception {
+    final Configuration conf = getTestConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
+        true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    // Create two files
+    Path tmpFile1 = new Path("/tmpfile1.dat");
+    Path tmpFile2 = new Path("/tmpfile2.dat");
+    DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
+    DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
+    // Get locations of blocks of both files and concat together
+    BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
+    BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
+    BlockLocation[] blockLocs = (BlockLocation[]) ArrayUtils.addAll(blockLocs1,
+        blockLocs2);
+    // Fetch VolumeBlockLocations in batch
+    BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
+        .asList(blockLocs));
+    int counter = 0;
+    // Print out the list of ids received for each block
+    for (BlockStorageLocation l : locs) {
+      for (int i = 0; i < l.getVolumeIds().length; i++) {
+        VolumeId id = l.getVolumeIds()[i];
+        String name = l.getNames()[i];
+        if (id != null) {
+          System.out.println("Datanode " + name + " has block " + counter
+              + " on volume id " + id.toString());
+        }
+      }
+      counter++;
+    }
+    assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
+        locs.length);
+    for (BlockStorageLocation l : locs) {
+      assertEquals("Expected two replicas for each block", 2,
+          l.getVolumeIds().length);
+      for (int i = 0; i < l.getVolumeIds().length; i++) {
+        VolumeId id = l.getVolumeIds()[i];
+        String name = l.getNames()[i];
+        assertTrue("Expected block to be valid on datanode " + name,
+            id.isValid());
+      }
+    }
+  }
+
+  /**
+   * Tests error paths for
+   * {@link DistributedFileSystem#getFileBlockStorageLocations(java.util.List)}
+   */
+  @Test
+  public void testGetFileBlockStorageLocationsError() throws Exception {
+    final Configuration conf = getTestConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED,
+        true);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(2).build();
+    cluster.getDataNodes();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    // Create a file
+    Path tmpFile = new Path("/tmpfile1.dat");
+    DFSTestUtil.createFile(fs, tmpFile, 1024, (short) 2, 0xDEADDEADl);
+    // Get locations of blocks of the file
+    BlockLocation[] blockLocs = fs.getFileBlockLocations(tmpFile, 0, 1024);
+    // Stop a datanode to simulate a failure
+    cluster.stopDataNode(0);
+    // Fetch VolumeBlockLocations
+    BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
+        .asList(blockLocs));
+
+    assertEquals("Expected one HdfsBlockLocation for one 1-block file", 1,
+        locs.length);
+
+    for (BlockStorageLocation l : locs) {
+      assertEquals("Expected two replicas for each block", 2,
+          l.getVolumeIds().length);
+      assertTrue("Expected one valid and one invalid replica",
+          (l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
+    }
+  }
+
+  @Test
+  public void testCreateWithCustomChecksum() throws Exception {
+    Configuration conf = getTestConfiguration();
+    final long grace = 1000L;
+    MiniDFSCluster cluster = null;
+    Path testBasePath = new Path("/test/csum");
+    // create args 
+    Path path1 = new Path(testBasePath, "file_wtih_crc1");
+    Path path2 = new Path(testBasePath, "file_with_crc2");
+    ChecksumOpt opt1 = new ChecksumOpt(DataChecksum.Type.CRC32C, 512);
+    ChecksumOpt opt2 = new ChecksumOpt(DataChecksum.Type.CRC32, 512);
+
+    // common args
+    FsPermission perm = FsPermission.getDefault().applyUMask(
+        FsPermission.getUMask(conf));
+    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.OVERWRITE,
+        CreateFlag.CREATE);
+    short repl = 1;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      FileSystem dfs = cluster.getFileSystem();
+
+      dfs.mkdirs(testBasePath);
+
+      // create two files with different checksum types
+      FSDataOutputStream out1 = dfs.create(path1, perm, flags, 4096, repl,
+          131072L, null, opt1);
+      FSDataOutputStream out2 = dfs.create(path2, perm, flags, 4096, repl,
+          131072L, null, opt2);
+
+      for (int i = 0; i < 1024; i++) {
+        out1.write(i);
+        out2.write(i);
+      }
+      out1.close();
+      out2.close();
+
+      // the two checksums must be different.
+      MD5MD5CRC32FileChecksum sum1 =
+          (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path1);
+      MD5MD5CRC32FileChecksum sum2 =
+          (MD5MD5CRC32FileChecksum)dfs.getFileChecksum(path2);
+      assertFalse(sum1.equals(sum2));
+
+      // check the individual params
+      assertEquals(DataChecksum.Type.CRC32C, sum1.getCrcType());
+      assertEquals(DataChecksum.Type.CRC32,  sum2.getCrcType());
+
+    } finally {
+      if (cluster != null) {
+        cluster.getFileSystem().delete(testBasePath, true);
+        cluster.shutdown();
+      }
+    }
+  }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java Fri Oct 19 02:25:55 2012
@@ -17,14 +17,18 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.EOFException;
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.RandomAccessFile;
 import java.util.Random;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -35,11 +39,12 @@ import org.apache.hadoop.fs.LocalFileSys
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
 
 /**
  * This class tests if FSInputChecker works correctly.
  */
-public class TestFSInputChecker extends TestCase {
+public class TestFSInputChecker {
   static final long seed = 0xDEADBEEFL;
   static final int BYTES_PER_SUM = 10;
   static final int BLOCK_SIZE = 2*BYTES_PER_SUM;
@@ -156,8 +161,8 @@ public class TestFSInputChecker extends 
   private void testSkip1(int skippedBytes) 
   throws Exception {
     long oldPos = stm.getPos();
-    long nSkipped = stm.skip(skippedBytes);
-    long newPos = oldPos+nSkipped;
+    IOUtils.skipFully(stm, skippedBytes);
+    long newPos = oldPos + skippedBytes;
     assertEquals(stm.getPos(), newPos);
     stm.readFully(actual);
     checkAndEraseData(actual, (int)newPos, expected, "Read Sanity Test");
@@ -190,13 +195,31 @@ public class TestFSInputChecker extends 
     testSkip1(FILE_SIZE-1);
     
     stm.seek(0);
-    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE);
-    assertEquals(stm.skip(10), 0);
+    IOUtils.skipFully(stm, FILE_SIZE);
+    try {
+      IOUtils.skipFully(stm, 10);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping 0 byte(s).");
+    }
     
     stm.seek(0);
-    assertEquals(stm.skip(FILE_SIZE+10), FILE_SIZE);
+    try {
+      IOUtils.skipFully(stm, FILE_SIZE + 10);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping " + FILE_SIZE + " byte(s).");
+    }
     stm.seek(10);
-    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE-10);
+    try {
+      IOUtils.skipFully(stm, FILE_SIZE);
+      fail("expected to get a PrematureEOFException");
+    } catch (EOFException e) {
+      assertEquals(e.getMessage(), "Premature EOF from inputStream " +
+          "after skipping " + (FILE_SIZE - 10) + " byte(s).");
+    }
   }
 
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
@@ -291,6 +314,7 @@ public class TestFSInputChecker extends 
     in.close();    
   }
   
+  @Test
   public void testFSInputChecker() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSOutputSummer.java Fri Oct 19 02:25:55 2012
@@ -17,21 +17,24 @@
  */
 package org.apache.hadoop.hdfs;
 
-import junit.framework.TestCase;
-import java.io.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
 import java.util.Random;
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 /**
  * This class tests if FSOutputSummer works correctly.
  */
-public class TestFSOutputSummer extends TestCase {
+public class TestFSOutputSummer {
   private static final long seed = 0xDEADBEEFL;
   private static final int BYTES_PER_CHECKSUM = 10;
   private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM;
@@ -111,6 +114,7 @@ public class TestFSOutputSummer extends 
   /**
    * Test write opeation for output stream in DFS.
    */
+  @Test
   public void testFSOutputSummer() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend2.java Fri Oct 19 02:25:55 2012
@@ -17,12 +17,16 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import junit.framework.TestCase;
-
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -36,16 +40,14 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * This class tests the building blocks that are needed to
  * support HDFS appends.
  */
-public class TestFileAppend2 extends TestCase {
+public class TestFileAppend2 {
 
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
@@ -79,6 +81,7 @@ public class TestFileAppend2 extends Tes
    * Verify that all data exists in file.
    * @throws IOException an exception might be thrown
    */ 
+  @Test
   public void testSimpleAppend() throws IOException {
     final Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
@@ -238,6 +241,7 @@ public class TestFileAppend2 extends Tes
     }
 
     // create a bunch of files. Write to them and then verify.
+    @Override
     public void run() {
       System.out.println("Workload " + id + " starting... ");
       for (int i = 0; i < numAppendsPerThread; i++) {
@@ -328,6 +332,7 @@ public class TestFileAppend2 extends Tes
    * Test that appends to files at random offsets.
    * @throws IOException an exception might be thrown
    */
+  @Test
   public void testComplexAppend() throws IOException {
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.RandomAccessFile;
@@ -42,7 +46,6 @@ import org.apache.log4j.Level;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /** This class implements some of tests posted in HADOOP-2658. */
 public class TestFileAppend3  {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend4.java Fri Oct 19 02:25:55 2012
@@ -166,6 +166,7 @@ public class TestFileAppend4 {
       AppendTestUtil.write(stm, 0, 4096);
       final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
       Thread t = new Thread() { 
+          @Override
           public void run() {
             try {
               stm.close();
@@ -181,7 +182,7 @@ public class TestFileAppend4 {
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leaserenewer.interruptAndJoin();
+      client.getLeaseRenewer().interruptAndJoin();
  
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(
@@ -238,6 +239,7 @@ public class TestFileAppend4 {
       AppendTestUtil.write(stm, 0, 4096);
       final AtomicReference<Throwable> err = new AtomicReference<Throwable>();
       Thread t = new Thread() { 
+          @Override
           public void run() {
             try {
               stm.close();
@@ -253,7 +255,7 @@ public class TestFileAppend4 {
       // has not been completed in the NN.
       // Lose the leases
       LOG.info("Killing lease checker");
-      client.leaserenewer.interruptAndJoin();
+      client.getLeaseRenewer().interruptAndJoin();
  
       FileSystem fs1 = cluster.getFileSystem();
       FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java Fri Oct 19 02:25:55 2012
@@ -23,7 +23,6 @@ import static org.junit.Assert.assertEqu
 import java.io.File;
 import java.io.IOException;
 import java.util.EnumMap;
-import java.util.Random;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -32,8 +31,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
-import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileConcurrentReader.java Fri Oct 19 02:25:55 2012
@@ -16,6 +16,15 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -32,17 +41,17 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
-
-import java.io.IOException;
-import java.util.*;
-import java.util.concurrent.atomic.*;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
 
 
 /**
  * This class tests the cases of a concurrent reads/writes to a file;
  * ie, one writer and one or more readers can see unfinsihed blocks
  */
-public class TestFileConcurrentReader extends junit.framework.TestCase {
+public class TestFileConcurrentReader {
 
   private enum SyncType {
     SYNC,
@@ -68,18 +77,16 @@ public class TestFileConcurrentReader ex
   private FileSystem fileSystem;
 
 
-  @Override
-  protected void setUp() throws IOException {
+  @Before
+  public void setUp() throws IOException {
     conf = new Configuration();
     init(conf);
   }
 
-  @Override
-  protected void tearDown() throws Exception {
+  @After
+  public void tearDown() throws Exception {
     cluster.shutdown();
     cluster = null;
-    
-    super.tearDown();
   }
 
   private void init(Configuration conf) throws IOException {
@@ -144,6 +151,7 @@ public class TestFileConcurrentReader ex
   /**
    * Test that that writes to an incomplete block are available to a reader
    */
+  @Test
   public void testUnfinishedBlockRead()
     throws IOException {
     // create a new file in the root, write data, do no close
@@ -166,6 +174,7 @@ public class TestFileConcurrentReader ex
    * would result in too small a buffer to do the buffer-copy needed
    * for partial chunks.
    */
+  @Test
   public void testUnfinishedBlockPacketBufferOverrun() throws IOException {
     // check that / exists
     Path path = new Path("/");
@@ -191,6 +200,7 @@ public class TestFileConcurrentReader ex
   // use a small block size and a large write so that DN is busy creating
   // new blocks.  This makes it almost 100% sure we can reproduce
   // case of client getting a DN that hasn't yet created the blocks
+  @Test
   public void testImmediateReadOfNewFile()
     throws IOException {
     final int blockSize = 64 * 1024;
@@ -267,31 +277,39 @@ public class TestFileConcurrentReader ex
 
   // for some reason, using tranferTo evokes the race condition more often
   // so test separately
+  @Test
   public void testUnfinishedBlockCRCErrorTransferTo() throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.SYNC, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorTransferToVerySmallWrite()
     throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.SYNC, SMALL_WRITE_SIZE);
   }
 
   // fails due to issue w/append, disable 
+  @Ignore
+  @Test
   public void _testUnfinishedBlockCRCErrorTransferToAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(true, SyncType.APPEND, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorNormalTransfer() throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.SYNC, DEFAULT_WRITE_SIZE);
   }
 
+  @Test
   public void testUnfinishedBlockCRCErrorNormalTransferVerySmallWrite()
     throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.SYNC, SMALL_WRITE_SIZE);
   }
 
   // fails due to issue w/append, disable 
+  @Ignore
+  @Test
   public void _testUnfinishedBlockCRCErrorNormalTransferAppend()
     throws IOException {
     runTestUnfinishedBlockCRCError(false, SyncType.APPEND, DEFAULT_WRITE_SIZE);
@@ -440,4 +458,4 @@ public class TestFileConcurrentReader ex
     inputStream.close();
     return numRead + startPos - 1;
   }
-}
\ No newline at end of file
+}

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java Fri Oct 19 02:25:55 2012
@@ -18,14 +18,16 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.util.ArrayList;
 
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -42,11 +44,12 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * A JUnit test for corrupted file handling.
  */
-public class TestFileCorruption extends TestCase {
+public class TestFileCorruption {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
@@ -56,6 +59,7 @@ public class TestFileCorruption extends 
   static Log LOG = ((Log4JLogger)NameNode.stateChangeLog);
 
   /** check if DFS can handle corrupted blocks properly */
+  @Test
   public void testFileCorruption() throws Exception {
     MiniDFSCluster cluster = null;
     DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFileCorruption").
@@ -88,6 +92,7 @@ public class TestFileCorruption extends 
   }
 
   /** check if local FS can handle corrupted blocks properly */
+  @Test
   public void testLocalFileCorruption() throws Exception {
     Configuration conf = new HdfsConfiguration();
     Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
@@ -114,6 +119,7 @@ public class TestFileCorruption extends 
    * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
    * See Hadoop-4351.
    */
+  @Test
   public void testArrayOutOfBoundsException() throws Exception {
     MiniDFSCluster cluster = null;
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Fri Oct 19 02:25:55 2012
@@ -16,7 +16,6 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
-
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
@@ -31,6 +30,9 @@ import static org.apache.hadoop.hdfs.DFS
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import static org.junit.Assume.assumeTrue;
 
 import java.io.BufferedReader;
@@ -39,7 +41,9 @@ import java.io.FileNotFoundException;
 import java.io.FileReader;
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.net.URI;
 import java.net.UnknownHostException;
+import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 
 import org.apache.commons.logging.LogFactory;
@@ -51,6 +55,7 @@ import org.apache.hadoop.fs.FSDataInputS
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -67,13 +72,19 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
 /**
  * This class tests various cases during file creation.
  */
-public class TestFileCreation extends junit.framework.TestCase {
+public class TestFileCreation {
   static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
 
   {
@@ -88,6 +99,15 @@ public class TestFileCreation extends ju
   static final int numBlocks = 2;
   static final int fileSize = numBlocks * blockSize + 1;
   boolean simulatedStorage = false;
+  
+  private static final String[] NON_CANONICAL_PATHS = new String[] {
+    "//foo",
+    "///foo2",
+    "//dir//file",
+    "////test2/file",
+    "/dir/./file2",
+    "/dir/../file3"
+  };
 
   // creates a file but does not close it
   public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
@@ -122,6 +142,7 @@ public class TestFileCreation extends ju
   /**
    * Test that server default values can be retrieved on the client side
    */
+  @Test
   public void testServerDefaults() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT);
@@ -147,19 +168,28 @@ public class TestFileCreation extends ju
     }
   }
 
+  @Test
   public void testFileCreation() throws IOException {
-    checkFileCreation(null);
+    checkFileCreation(null, false);
+  }
+
+  /** Same test but the client should use DN hostnames */
+  @Test
+  public void testFileCreationUsingHostname() throws IOException {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+    checkFileCreation(null, true);
   }
 
   /** Same test but the client should bind to a local interface */
+  @Test
   public void testFileCreationSetLocalInterface() throws IOException {
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
 
     // The mini cluster listens on the loopback so we can use it here
-    checkFileCreation("lo");
+    checkFileCreation("lo", false);
 
     try {
-      checkFileCreation("bogus-interface");
+      checkFileCreation("bogus-interface", false);
       fail("Able to specify a bogus interface");
     } catch (UnknownHostException e) {
       assertEquals("No such interface bogus-interface", e.getMessage());
@@ -169,16 +199,28 @@ public class TestFileCreation extends ju
   /**
    * Test if file creation and disk space consumption works right
    * @param netIf the local interface, if any, clients should use to access DNs
+   * @param useDnHostname whether the client should contact DNs by hostname
    */
-  public void checkFileCreation(String netIf) throws IOException {
+  public void checkFileCreation(String netIf, boolean useDnHostname)
+      throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (netIf != null) {
       conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
     }
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
+    if (useDnHostname) {
+      // Since the mini cluster only listens on the loopback we have to
+      // ensure the hostname used to access DNs maps to the loopback. We
+      // do this by telling the DN to advertise localhost as its hostname
+      // instead of the default hostname.
+      conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
+    }
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+      .checkDataNodeHostConfig(true)
+      .build();
     FileSystem fs = cluster.getFileSystem();
     try {
 
@@ -254,6 +296,7 @@ public class TestFileCreation extends ju
   /**
    * Test deleteOnExit
    */
+  @Test
   public void testDeleteOnExit() throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
@@ -314,8 +357,63 @@ public class TestFileCreation extends ju
   }
 
   /**
+   * Test that a file which is open for write is overwritten by another
+   * client. Regression test for HDFS-3755.
+   */
+  @Test
+  public void testOverwriteOpenForWrite() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    SimulatedFSDataset.setFactory(conf);
+    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    FileSystem fs = cluster.getFileSystem();
+    
+    UserGroupInformation otherUgi = UserGroupInformation.createUserForTesting(
+        "testuser", new String[]{"testgroup"});
+    FileSystem fs2 = otherUgi.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(cluster.getConfiguration(0));
+      }
+    });
+    
+    try {
+      Path p = new Path("/testfile");
+      FSDataOutputStream stm1 = fs.create(p);
+      stm1.write(1);
+      stm1.hflush();
+
+      // Create file again without overwrite
+      try {
+        fs2.create(p, false);
+        fail("Did not throw!");
+      } catch (IOException abce) {
+        GenericTestUtils.assertExceptionContains("already being created by",
+            abce);
+      }
+      
+      FSDataOutputStream stm2 = fs2.create(p, true);
+      stm2.write(2);
+      stm2.close();
+      
+      try {
+        stm1.close();
+        fail("Should have exception closing stm1 since it was deleted");
+      } catch (IOException ioe) {
+        GenericTestUtils.assertExceptionContains("File is not open for writing", ioe);
+      }
+      
+    } finally {
+      IOUtils.closeStream(fs);
+      IOUtils.closeStream(fs2);
+      cluster.shutdown();
+    }
+  }
+  
+  /**
    * Test that file data does not become corrupted even in the face of errors.
    */
+  @Test
   public void testFileCreationError1() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
@@ -388,6 +486,7 @@ public class TestFileCreation extends ju
    * Test that the filesystem removes the last block from a file if its
    * lease expires.
    */
+  @Test
   public void testFileCreationError2() throws IOException {
     long leasePeriod = 1000;
     System.out.println("testFileCreationError2 start");
@@ -453,6 +552,7 @@ public class TestFileCreation extends ju
   }
 
   /** test addBlock(..) when replication<min and excludeNodes==null. */
+  @Test
   public void testFileCreationError3() throws IOException {
     System.out.println("testFileCreationError3 start");
     Configuration conf = new HdfsConfiguration();
@@ -484,10 +584,9 @@ public class TestFileCreation extends ju
 
   /**
    * Test that file leases are persisted across namenode restarts.
-   * This test is currently not triggered because more HDFS work is 
-   * is needed to handle persistent leases.
    */
-  public void xxxtestFileCreationNamenodeRestart() throws IOException {
+  @Test
+  public void testFileCreationNamenodeRestart() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s
     conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
@@ -625,6 +724,7 @@ public class TestFileCreation extends ju
   /**
    * Test that all open files are closed when client dies abnormally.
    */
+  @Test
   public void testDFSClientDeath() throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();
     System.out.println("Testing adbornal client death.");
@@ -661,6 +761,7 @@ public class TestFileCreation extends ju
   /**
    * Test file creation using createNonRecursive().
    */
+  @Test
   public void testFileCreationNonRecursive() throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (simulatedStorage) {
@@ -668,13 +769,13 @@ public class TestFileCreation extends ju
     }
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
-    final Path path = new Path("/" + System.currentTimeMillis()
+    final Path path = new Path("/" + Time.now()
         + "-testFileCreationNonRecursive");
     FSDataOutputStream out = null;
 
     try {
       IOException expectedException = null;
-      final String nonExistDir = "/non-exist-" + System.currentTimeMillis();
+      final String nonExistDir = "/non-exist-" + Time.now();
 
       fs.delete(new Path(nonExistDir), true);
       EnumSet<CreateFlag> createFlag = EnumSet.of(CreateFlag.CREATE);
@@ -755,6 +856,7 @@ public class TestFileCreation extends ju
 /**
  * Test that file data becomes available before file is closed.
  */
+  @Test
   public void testFileCreationSimulated() throws IOException {
     simulatedStorage = true;
     testFileCreation();
@@ -764,6 +866,7 @@ public class TestFileCreation extends ju
   /**
    * Test creating two files at the same time. 
    */
+  @Test
   public void testConcurrentFileCreation() throws IOException {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
@@ -796,6 +899,7 @@ public class TestFileCreation extends ju
   /**
    * Test creating a file whose data gets sync when closed
    */
+  @Test
   public void testFileCreationSyncOnClose() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFS_DATANODE_SYNCONCLOSE_KEY, true);
@@ -831,6 +935,7 @@ public class TestFileCreation extends ju
    * Then change lease period and wait for lease recovery.
    * Finally, read the block directly from each Datanode and verify the content.
    */
+  @Test
   public void testLeaseExpireHardLimit() throws Exception {
     System.out.println("testLeaseExpireHardLimit start");
     final long leasePeriod = 1000;
@@ -894,6 +999,7 @@ public class TestFileCreation extends ju
   }
 
   // test closing file system before all file handles are closed.
+  @Test
   public void testFsClose() throws Exception {
     System.out.println("test file system close start");
     final int DATANODE_NUM = 3;
@@ -922,6 +1028,7 @@ public class TestFileCreation extends ju
   }
 
   // test closing file after cluster is shutdown
+  @Test
   public void testFsCloseAfterClusterShutdown() throws IOException {
     System.out.println("test testFsCloseAfterClusterShutdown start");
     final int DATANODE_NUM = 3;
@@ -966,4 +1073,93 @@ public class TestFileCreation extends ju
       }
     }
   }
+
+  /**
+   * Regression test for HDFS-3626. Creates a file using a non-canonical path
+   * (i.e. with extra slashes between components) and makes sure that the NN
+   * can properly restart.
+   * 
+   * This test RPCs directly to the NN, to ensure that even an old client
+   * which passes an invalid path won't cause corrupt edits.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
+    doCreateTest(CreationMethod.DIRECT_NN_RPC);
+  }
+  
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a string object.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartFromString()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_STRING);
+  }
+
+  /**
+   * Another regression test for HDFS-3626. This one creates files using
+   * a Path instantiated from a URI object.
+   */
+  @Test
+  public void testCreateNonCanonicalPathAndRestartFromUri()
+      throws Exception {
+    doCreateTest(CreationMethod.PATH_FROM_URI);
+  }
+  
+  private static enum CreationMethod {
+    DIRECT_NN_RPC,
+    PATH_FROM_URI,
+    PATH_FROM_STRING
+  };
+  private void doCreateTest(CreationMethod method) throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(1).build();
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
+
+      for (String pathStr : NON_CANONICAL_PATHS) {
+        System.out.println("Creating " + pathStr + " by " + method);
+        switch (method) {
+        case DIRECT_NN_RPC:
+          try {
+            nnrpc.create(pathStr, new FsPermission((short)0755), "client",
+                new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
+                true, (short)1, 128*1024*1024L);
+            fail("Should have thrown exception when creating '"
+                + pathStr + "'" + " by " + method);
+          } catch (InvalidPathException ipe) {
+            // When we create by direct NN RPC, the NN just rejects the
+            // non-canonical paths, rather than trying to normalize them.
+            // So, we expect all of them to fail. 
+          }
+          break;
+          
+        case PATH_FROM_URI:
+        case PATH_FROM_STRING:
+          // Unlike the above direct-to-NN case, we expect these to succeed,
+          // since the Path constructor should normalize the path.
+          Path p;
+          if (method == CreationMethod.PATH_FROM_URI) {
+            p = new Path(new URI(fs.getUri() + pathStr));
+          } else {
+            p = new Path(fs.getUri() + pathStr);  
+          }
+          FSDataOutputStream stm = fs.create(p);
+          IOUtils.closeStream(stm);
+          break;
+        default:
+          throw new AssertionError("bad method: " + method);
+        }
+      }
+      
+      cluster.restartNameNode();
+
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationClient.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
@@ -30,9 +32,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.log4j.Level;
-
 import org.junit.Test;
-import static org.junit.Assert.assertEquals;
 
 /**
  * This class tests client lease recovery.
@@ -125,6 +125,7 @@ public class TestFileCreationClient {
       this.filepath = filepath;
     }
 
+    @Override
     public void run() {
       FSDataOutputStream out = null;
       int i = 0;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationDelete.java Fri Oct 19 02:25:55 2012
@@ -16,6 +16,7 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 
@@ -29,14 +30,16 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.log4j.Level;
+import org.junit.Test;
 
-public class TestFileCreationDelete extends junit.framework.TestCase {
+public class TestFileCreationDelete {
   {
     ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger)LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL);
   }
 
+  @Test
   public void testFileCreationDeleteParent() throws IOException {
     Configuration conf = new HdfsConfiguration();
     final int MAX_IDLE_TIME = 2000; // 2s

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreationEmpty.java Fri Oct 19 02:25:55 2012
@@ -16,17 +16,19 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertFalse;
 
 import java.util.ConcurrentModificationException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.junit.Test;
 
 /**
  * Test empty file creation.
  */
-public class TestFileCreationEmpty extends junit.framework.TestCase {
+public class TestFileCreationEmpty {
   private boolean isConcurrentModificationException = false;
 
   /**
@@ -35,9 +37,11 @@ public class TestFileCreationEmpty exten
    * The empty files are supposed to be closed by that 
    * without causing ConcurrentModificationException.
    */
+  @Test
   public void testLeaseExpireEmptyFiles() throws Exception {
     final Thread.UncaughtExceptionHandler oldUEH = Thread.getDefaultUncaughtExceptionHandler();
     Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
+      @Override
       public void uncaughtException(Thread t, Throwable e) {
         if (e instanceof ConcurrentModificationException) {
           LeaseManager.LOG.error("t=" + t, e);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java Fri Oct 19 02:25:55 2012
@@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -78,7 +79,8 @@ public class TestFileStatus {
     hftpfs = cluster.getHftpFileSystem(0);
     dfsClient = new DFSClient(NameNode.getAddress(conf), conf);
     file1 = new Path("filestatus.dat");
-    writeFile(fs, file1, 1, fileSize, blockSize);
+    DFSTestUtil.createFile(fs, file1, fileSize, fileSize, blockSize, (short) 1,
+        seed);
   }
   
   @AfterClass
@@ -86,21 +88,9 @@ public class TestFileStatus {
     fs.close();
     cluster.shutdown();
   }
-
-  private static void writeFile(FileSystem fileSys, Path name, int repl,
-      int fileSize, int blockSize) throws IOException {
-    // Create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true,
-        HdfsConstants.IO_FILE_BUFFER_SIZE, (short)repl, (long)blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void checkFile(FileSystem fileSys, Path name, int repl)
-      throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     DFSTestUtil.waitReplication(fileSys, name, (short) repl);
   }
   
@@ -129,7 +119,7 @@ public class TestFileStatus {
 
   /** Test the FileStatus obtained calling getFileStatus on a file */  
   @Test
-  public void testGetFileStatusOnFile() throws IOException {
+  public void testGetFileStatusOnFile() throws Exception {
     checkFile(fs, file1, 1);
     // test getFileStatus on a file
     FileStatus status = fs.getFileStatus(file1);
@@ -217,7 +207,8 @@ public class TestFileStatus {
 
     // create another file that is smaller than a block.
     Path file2 = new Path(dir, "filestatus2.dat");
-    writeFile(fs, file2, 1, blockSize/4, blockSize);
+    DFSTestUtil.createFile(fs, file2, blockSize/4, blockSize/4, blockSize,
+        (short) 1, seed);
     checkFile(fs, file2, 1);
     
     // verify file attributes
@@ -229,7 +220,8 @@ public class TestFileStatus {
 
     // Create another file in the same directory
     Path file3 = new Path(dir, "filestatus3.dat");
-    writeFile(fs, file3, 1, blockSize/4, blockSize);
+    DFSTestUtil.createFile(fs, file3, blockSize/4, blockSize/4, blockSize,
+        (short) 1, seed);
     checkFile(fs, file3, 1);
     file3 = fs.makeQualified(file3);