You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ac...@apache.org on 2012/09/26 20:32:25 UTC

svn commit: r1390642 - in /hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs: ./ src/test/java/org/apache/hadoop/hdfs/ src/test/java/org/apache/hadoop/hdfs/server/balancer/ src/test/java/org/apache/hadoop/hdfs/server/blockmanage...

Author: acmurthy
Date: Wed Sep 26 18:32:24 2012
New Revision: 1390642

URL: http://svn.apache.org/viewvc?rev=1390642&view=rev
Log:
HDFS-3910. Revert from branch-2.0.2-alpha.

Modified:
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
    hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Sep 26 18:32:24 2012
@@ -252,8 +252,6 @@ Release 2.0.2-alpha - 2012-09-07 
 
     HDFS-3907. Allow multiple users for local block readers. (eli)
 
-    HDFS-3910. DFSTestUtil#waitReplication should timeout. (eli)
-
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Wed Sep 26 18:32:24 2012
@@ -273,7 +273,7 @@ public class DFSTestUtil {
    * specified target.
    */
   public void waitReplication(FileSystem fs, String topdir, short value) 
-      throws IOException, InterruptedException, TimeoutException {
+                                              throws IOException {
     Path root = new Path(topdir);
 
     /** wait for the replication factor to settle down */
@@ -498,44 +498,36 @@ public class DFSTestUtil {
       return fileNames;
     }
   }
-
-  /**
-   * Wait for the given file to reach the given replication factor.
-   * @throws TimeoutException if we fail to sufficiently replicate the file
-   */
-  public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
-      throws IOException, InterruptedException, TimeoutException {
-    boolean correctReplFactor;
-    final int ATTEMPTS = 20;
-    int count = 0;
-
+  
+  /** wait for the file's replication to be done */
+  public static void waitReplication(FileSystem fs, Path fileName, 
+      short replFactor)  throws IOException {
+    boolean good;
     do {
-      correctReplFactor = true;
+      good = true;
       BlockLocation locs[] = fs.getFileBlockLocations(
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
-      count++;
       for (int j = 0; j < locs.length; j++) {
         String[] hostnames = locs[j].getNames();
         if (hostnames.length != replFactor) {
-          correctReplFactor = false;
-          System.out.println("Block " + j + " of file " + fileName
-              + " has replication factor " + hostnames.length
-              + " (desired " + replFactor + "); locations "
-              + Joiner.on(' ').join(hostnames));
-          Thread.sleep(1000);
+          String hostNameList = "";
+          for (String h : hostnames) hostNameList += h + " ";
+          System.out.println("Block " + j + " of file " + fileName 
+              + " has replication factor " + hostnames.length + "; locations "
+              + hostNameList);
+          good = false;
+          try {
+            System.out.println("Waiting for replication factor to drain");
+            Thread.sleep(100);
+          } catch (InterruptedException e) {} 
           break;
         }
       }
-      if (correctReplFactor) {
+      if (good) {
         System.out.println("All blocks of file " + fileName
             + " verified to have replication factor " + replFactor);
       }
-    } while (!correctReplFactor && count < ATTEMPTS);
-
-    if (count == ATTEMPTS) {
-      throw new TimeoutException("Timed out waiting for " + fileName +
-          " to reach " + replFactor + " replicas");
-    }
+    } while(!good);
   }
   
   /** delete directory and everything underneath it.*/

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java Wed Sep 26 18:32:24 2012
@@ -61,7 +61,7 @@ public class TestBlockReaderLocal {
    * of this class might immediately issue a retry on failure, so it's polite.
    */
   @Test
-  public void testStablePositionAfterCorruptRead() throws Exception {
+  public void testStablePositionAfterCorruptRead() throws IOException {
     final short REPL_FACTOR = 1;
     final long FILE_LENGTH = 512L;
     cluster.waitActive();

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Wed Sep 26 18:32:24 2012
@@ -24,7 +24,6 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -200,11 +199,11 @@ public class TestClientReportBadBlock {
   }
 
   /**
-   * Create a file with one block and corrupt some/all of the block replicas.
+   * create a file with one block and corrupt some/all of the block replicas.
    */
   private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
       int corruptBlockCount) throws IOException, AccessControlException,
-      FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
+      FileNotFoundException, UnresolvedLinkException {
     DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
     DFSTestUtil.waitReplication(dfs, filePath, repl);
     // Locate the file blocks by asking name node

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java Wed Sep 26 18:32:24 2012
@@ -1037,7 +1037,8 @@ public class TestDFSClientRetries {
    * way. See HDFS-3067.
    */
   @Test
-  public void testRetryOnChecksumFailure() throws Exception {
+  public void testRetryOnChecksumFailure()
+      throws UnresolvedLinkException, IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Wed Sep 26 18:32:24 2012
@@ -182,7 +182,7 @@ public class TestDatanodeBlockScanner {
   }
 
   @Test
-  public void testBlockCorruptionPolicy() throws Exception {
+  public void testBlockCorruptionPolicy() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     Random random = new Random();

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java Wed Sep 26 18:32:24 2012
@@ -25,7 +25,6 @@ import static org.junit.Assert.fail;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -101,7 +100,7 @@ public class TestFileStatus {
   }
   
   private void checkFile(FileSystem fileSys, Path name, int repl)
-      throws IOException, InterruptedException, TimeoutException {
+      throws IOException {
     DFSTestUtil.waitReplication(fileSys, name, (short) repl);
   }
   
@@ -130,7 +129,7 @@ public class TestFileStatus {
 
   /** Test the FileStatus obtained calling getFileStatus on a file */  
   @Test
-  public void testGetFileStatusOnFile() throws Exception {
+  public void testGetFileStatusOnFile() throws IOException {
     checkFile(fs, file1, 1);
     // test getFileStatus on a file
     FileStatus status = fs.getFileStatus(file1);

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Wed Sep 26 18:32:24 2012
@@ -27,7 +27,6 @@ import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -421,8 +420,8 @@ public class TestReplication {
     }
   }
   
-  private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
-      throws IOException, InterruptedException, TimeoutException {
+  private void changeBlockLen(MiniDFSCluster cluster, 
+      int lenDelta) throws IOException, InterruptedException {
     final Path fileName = new Path("/file1");
     final short REPLICATION_FACTOR = (short)1;
     final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Wed Sep 26 18:32:24 2012
@@ -88,7 +88,7 @@ public class TestBalancer {
   /* create a file with a length of <code>fileLen</code> */
   static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
       short replicationFactor, int nnIndex)
-  throws IOException, InterruptedException, TimeoutException {
+  throws IOException {
     FileSystem fs = cluster.getFileSystem(nnIndex);
     DFSTestUtil.createFile(fs, filePath, fileLen, 
         replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
    * whose used space to be <code>size</code>
    */
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
-      short numNodes) throws IOException, InterruptedException, TimeoutException {
+      short numNodes) throws IOException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
       cluster.waitActive();

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Wed Sep 26 18:32:24 2012
@@ -23,7 +23,6 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Random;
-import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -97,7 +96,7 @@ public class TestBalancerWithMultipleNam
 
   /* create a file with a length of <code>fileLen</code> */
   private static void createFile(Suite s, int index, long len
-      ) throws IOException, InterruptedException, TimeoutException {
+      ) throws IOException {
     final FileSystem fs = s.cluster.getFileSystem(index);
     DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
     DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -107,7 +106,7 @@ public class TestBalancerWithMultipleNam
    * whose used space to be <code>size</code>
    */
   private static ExtendedBlock[][] generateBlocks(Suite s, long size
-      ) throws IOException, InterruptedException, TimeoutException {
+      ) throws IOException {
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     for(int n = 0; n < s.clients.length; n++) {
       final long fileLen = size/s.replication;

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Sep 26 18:32:24 2012
@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
    * corrupt ones.
    */
   @Test
-  public void testProcesOverReplicateBlock() throws Exception {
+  public void testProcesOverReplicateBlock() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
    * send heartbeats. 
    */
   @Test
-  public void testChooseReplicaToDelete() throws Exception {
+  public void testChooseReplicaToDelete() throws IOException {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     try {

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Wed Sep 26 18:32:24 2012
@@ -89,7 +89,7 @@ public class TestBlockReplacement {
   }
   
   @Test
-  public void testBlockReplacement() throws Exception {
+  public void testBlockReplacement() throws IOException, TimeoutException {
     final Configuration CONF = new HdfsConfiguration();
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
     final String[] NEW_RACKS = {"/RACK2"};

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Wed Sep 26 18:32:24 2012
@@ -27,9 +27,6 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeoutException;
-
-import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -68,7 +65,7 @@ import org.mockito.invocation.Invocation
 
 /**
  * This test simulates a variety of situations when blocks are being
- * intentionally corrupted, unexpectedly modified, and so on before a block
+ * intentionally orrupted, unexpectedly modified, and so on before a block
  * report is happening
  */
 public class TestBlockReport {
@@ -319,7 +316,7 @@ public class TestBlockReport {
    * @throws IOException in case of an error
    */
   @Test
-  public void blockReport_06() throws Exception {
+  public void blockReport_06() throws IOException {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -356,7 +353,7 @@ public class TestBlockReport {
   @Test
   // Currently this test is failing as expected 'cause the correct behavior is
   // not yet implemented (9/15/09)
-  public void blockReport_07() throws Exception {
+  public void blockReport_07() throws IOException {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -673,24 +670,21 @@ public class TestBlockReport {
   }
 
   private void startDNandWait(Path filePath, boolean waitReplicas) 
-      throws IOException, InterruptedException, TimeoutException {
-    if (LOG.isDebugEnabled()) {
+    throws IOException {
+    if(LOG.isDebugEnabled()) {
       LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
     }
     cluster.startDataNodes(conf, 1, true, null, null);
-    cluster.waitClusterUp();
     ArrayList<DataNode> datanodes = cluster.getDataNodes();
     assertEquals(datanodes.size(), 2);
 
-    if (LOG.isDebugEnabled()) {
+    if(LOG.isDebugEnabled()) {
       int lastDn = datanodes.size() - 1;
       LOG.debug("New datanode "
           + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + " has been started");
     }
-    if (waitReplicas) {
-      DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
-    }
+    if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
   }
 
   private ArrayList<Block> prepareForRide(final Path filePath,
@@ -842,9 +836,8 @@ public class TestBlockReport {
     public void run() {
       try {
         startDNandWait(filePath, true);
-      } catch (Exception e) {
-        e.printStackTrace();
-        Assert.fail("Failed to start BlockChecker: " + e);
+      } catch (IOException e) {
+        LOG.warn("Shouldn't happen", e);
       }
     }
   }

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Wed Sep 26 18:32:24 2012
@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
    * failure if the configuration parameter allows this.
    */
   @Test
-  public void testVolumeFailure() throws Exception {
+  public void testVolumeFailure() throws IOException {
     FileSystem fs = cluster.getFileSystem();
     dataDir = new File(cluster.getDataDirectory());
     System.out.println("Data dir: is " +  dataDir.getPath());

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java Wed Sep 26 18:32:24 2012
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
   }
 
   // test recovering unlinked tmp replicas
-  @Test public void testRecoverReplicas() throws Exception {
+  @Test public void testRecoverReplicas() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Sep 26 18:32:24 2012
@@ -114,7 +114,7 @@ public class TestFSEditLogLoader {
    * automatically bumped up to the new minimum upon restart.
    */
   @Test
-  public void testReplicationAdjusted() throws Exception {
+  public void testReplicationAdjusted() throws IOException {
     // start a cluster 
     Configuration conf = new HdfsConfiguration();
     // Replicate and heartbeat fast to shave a few seconds off test

Modified: hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java?rev=1390642&r1=1390641&r2=1390642&view=diff
==============================================================================
--- hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java (original)
+++ hadoop/common/branches/branch-2.0.2-alpha/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java Wed Sep 26 18:32:24 2012
@@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (2) is equal to replication factor (2))
    */
   @Test
-  public void testWhenDecreasingReplication() throws Exception {
+  public void testWhenDecreasingReplication() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks {
    * 
    */
   @Test
-  public void testByAddingAnExtraDataNode() throws Exception {
+  public void testByAddingAnExtraDataNode() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks {
    *      replicas (1) is equal to replication factor (1))
    */
   @Test
-  public void testWithReplicationFactorAsOne() throws Exception {
+  public void testWithReplicationFactorAsOne() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks {
    *    Verify that all replicas are corrupt and 3 replicas are present.
    */
   @Test
-  public void testWithAllCorruptReplicas() throws Exception {
+  public void testWithAllCorruptReplicas() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));