You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC

svn commit: r1399950 [17/27] - in /hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apac...

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Fri Oct 19 02:25:55 2012
@@ -17,52 +17,409 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.*;
+
 import java.io.IOException;
 import java.util.regex.Pattern;
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.*;
 
-import junit.framework.TestCase;
+public class TestGlobPaths {
 
-public class TestGlobPaths extends TestCase {
-  
   static class RegexPathFilter implements PathFilter {
-    
+
     private final String regex;
     public RegexPathFilter(String regex) {
       this.regex = regex;
     }
 
+    @Override
     public boolean accept(Path path) {
       return path.toString().matches(regex);
     }
 
   }
-  
+
   static private MiniDFSCluster dfsCluster;
   static private FileSystem fs;
   static final private int NUM_OF_PATHS = 4;
-  static final String USER_DIR = "/user/"+System.getProperty("user.name");
+  static private String USER_DIR;
   private Path[] path = new Path[NUM_OF_PATHS];
-  
-  protected void setUp() throws Exception {
-    try {
-      Configuration conf = new HdfsConfiguration();
-      dfsCluster = new MiniDFSCluster.Builder(conf).build();
-      fs = FileSystem.get(conf);
-    } catch (IOException e) {
-      e.printStackTrace();
-    }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    dfsCluster = new MiniDFSCluster.Builder(conf).build();
+    fs = FileSystem.get(conf);
+    USER_DIR = fs.getHomeDirectory().toUri().getPath().toString();
   }
   
-  protected void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();
     }
   }
+
+  @Test
+  public void testMultiGlob() throws IOException {
+    FileStatus[] status;
+    /*
+     *  /dir1/subdir1
+     *  /dir1/subdir1/f1
+     *  /dir1/subdir1/f2
+     *  /dir1/subdir2/f1
+     *  /dir2/subdir1
+     *  /dir2/subdir2
+     *  /dir2/subdir2/f1
+     *  /dir3/f1
+     *  /dir3/f1
+     *  /dir3/f2(dir)
+     *  /dir3/subdir2(file)
+     *  /dir3/subdir3
+     *  /dir3/subdir3/f1
+     *  /dir3/subdir3/f1/f1
+     *  /dir3/subdir3/f3
+     *  /dir4
+     */
+
+    Path d1 = new Path(USER_DIR, "dir1");
+    Path d11 = new Path(d1, "subdir1");
+    Path d12 = new Path(d1, "subdir2");
+    
+    Path f111 = new Path(d11, "f1");
+    fs.createNewFile(f111);
+    Path f112 = new Path(d11, "f2");
+    fs.createNewFile(f112);
+    Path f121 = new Path(d12, "f1");
+    fs.createNewFile(f121);
+    
+    Path d2 = new Path(USER_DIR, "dir2");
+    Path d21 = new Path(d2, "subdir1");
+    fs.mkdirs(d21);
+    Path d22 = new Path(d2, "subdir2");
+    Path f221 = new Path(d22, "f1");
+    fs.createNewFile(f221);
+
+    Path d3 = new Path(USER_DIR, "dir3");
+    Path f31 = new Path(d3, "f1");
+    fs.createNewFile(f31);
+    Path d32 = new Path(d3, "f2");
+    fs.mkdirs(d32);
+    Path f32 = new Path(d3, "subdir2"); // fake as a subdir!
+    fs.createNewFile(f32);
+    Path d33 = new Path(d3, "subdir3");
+    Path f333 = new Path(d33, "f3");
+    fs.createNewFile(f333);
+    Path d331 = new Path(d33, "f1");
+    Path f3311 = new Path(d331, "f1");
+    fs.createNewFile(f3311);
+    Path d4 = new Path(USER_DIR, "dir4");
+    fs.mkdirs(d4);
+
+    /*
+     * basic 
+     */
+    Path root = new Path(USER_DIR);
+    status = fs.globStatus(root);
+    checkStatus(status, root);
+    
+    status = fs.globStatus(new Path(USER_DIR, "x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path("x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "x/x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path("x/x"));
+    assertNull(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "*"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path("*"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path(USER_DIR, "*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("x/*"));
+    checkStatus(status);
+
+    // make sure full pattern is scanned instead of bailing early with undef
+    status = fs.globStatus(new Path(USER_DIR, "x/x/x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("x/x/x/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path("*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    /*
+     * one level deep
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path("dir*/*"));
+    checkStatus(status, d11, d12, d21, d22, f31, d32, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path("dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/f*"));
+    checkStatus(status, f31, d32);
+
+    status = fs.globStatus(new Path("dir*/f*"));
+    checkStatus(status, f31, d32);
+
+    /*
+     * subdir1 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1"));
+    checkStatus(status, d11, d21);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*"));
+    checkStatus(status, f111, f112);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/*/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/x*"));
+    checkStatus(status);
+
+    /*
+     * subdir2 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2"));
+    checkStatus(status, d12, d22, f32);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*"));
+    checkStatus(status, f121, f221);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir2/*/*"));
+    checkStatus(status);
+
+    /*
+     * subdir3 globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3"));
+    checkStatus(status, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*"));
+    checkStatus(status, d331, f333); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir3/*/*/*"));
+    checkStatus(status);
+
+    /*
+     * file1 single dir globs
+     */    
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1"));
+    checkStatus(status, f111);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*"));
+    checkStatus(status, f111);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1/*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f1*/*"));
+    checkStatus(status);
+
+    /*
+     * file1 multi-dir globs
+     */
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1"));
+    checkStatus(status, f111, f121, f221, d331);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*"));
+    checkStatus(status, f111, f121, f221, d331);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/x"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f1*/*/*"));
+    checkStatus(status);
+
+    /*
+     *  file glob multiple files
+     */
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*"));
+    checkStatus(status, d11, d12, d21, d22, f32, d33);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*"));
+    checkStatus(status, f111, f112, f121, f221, d331, f333); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*"));
+    checkStatus(status, f111, f112, f121, f221, d331, f333);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/f*/*"));
+    checkStatus(status, f3311);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/f1"));
+    checkStatus(status, f3311); 
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir*/*/*"));
+    checkStatus(status, f3311); 
+
+
+    // doesn't exist
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path(USER_DIR, "dir*/subdir1/f3*"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x,y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("dir*/{x,y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("dir*/{f1,y}"));
+    checkStatus(status, f31);
+
+    status = fs.globStatus(new Path("{x,y}"));
+    checkStatus(status);
+    
+    status = fs.globStatus(new Path("/{x/x,y/y}"));
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("{x/x,y/y}"));
+    checkStatus(status);
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR));
+    checkStatus(status, new Path(USER_DIR));
+
+    status = fs.globStatus(new Path(USER_DIR+"{/dir1}"));
+    checkStatus(status, d1);
+
+    status = fs.globStatus(new Path(USER_DIR+"{/dir*}"));
+    checkStatus(status, d1, d2, d3, d4);
+
+    /* 
+     * true filter
+     */
+
+    PathFilter trueFilter = new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        return true;
+      }
+    };
+
+    status = fs.globStatus(new Path(Path.SEPARATOR), trueFilter);
+    checkStatus(status, new Path(Path.SEPARATOR));
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR), trueFilter);
+    checkStatus(status, new Path(USER_DIR));    
+
+    status = fs.globStatus(d1, trueFilter);
+    checkStatus(status, d1);
+
+    status = fs.globStatus(new Path(USER_DIR), trueFilter);
+    checkStatus(status, new Path(USER_DIR));
+
+    status = fs.globStatus(new Path(USER_DIR, "*"), trueFilter);
+    checkStatus(status, d1, d2, d3, d4);
+
+    status = fs.globStatus(new Path("/x/*"), trueFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x"), trueFilter);
+    assertNull(status);
+
+    status = fs.globStatus(new Path("/x/x"), trueFilter);
+    assertNull(status);
+    
+    /*
+     * false filter
+     */
+    PathFilter falseFilter = new PathFilter() {
+      @Override
+      public boolean accept(Path path) {
+        return false;
+      }
+    };
+
+    status = fs.globStatus(new Path(Path.SEPARATOR), falseFilter);
+    assertNull(status);
+    
+    status = fs.globStatus(new Path(Path.CUR_DIR), falseFilter);
+    assertNull(status);    
+    
+    status = fs.globStatus(new Path(USER_DIR), falseFilter);
+    assertNull(status);
+    
+    status = fs.globStatus(new Path(USER_DIR, "*"), falseFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x/*"), falseFilter);
+    checkStatus(status);
+
+    status = fs.globStatus(new Path("/x"), falseFilter);
+    assertNull(status);
+
+    status = fs.globStatus(new Path("/x/x"), falseFilter);
+    assertNull(status);
+  }
+  
+  private void checkStatus(FileStatus[] status, Path ... expectedMatches) {
+    assertNotNull(status);
+    String[] paths = new String[status.length];
+    for (int i=0; i < status.length; i++) {
+      paths[i] = getPathFromStatus(status[i]);
+    }
+    String got = StringUtils.join(paths, "\n");
+    String expected = StringUtils.join(expectedMatches, "\n");
+    assertEquals(expected, got);
+  }
+
+  private String getPathFromStatus(FileStatus status) {
+    return status.getPath().toUri().getPath();
+  }
   
+  
+  @Test
   public void testPathFilter() throws IOException {
     try {
       String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" };
@@ -75,6 +432,7 @@ public class TestGlobPaths extends TestC
     }
   }
   
+  @Test
   public void testPathFilterWithFixedLastComponent() throws IOException {
     try {
       String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b",
@@ -88,21 +446,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  public void testGlob() throws Exception {
-    //pTestEscape(); // need to wait until HADOOP-1995 is fixed
-    pTestJavaRegexSpecialChars();
-    pTestCurlyBracket();
-    pTestLiteral();
-    pTestAny();
-    pTestClosure();
-    pTestSet();
-    pTestRange();
-    pTestSetExcl();
-    pTestCombination();
-    pTestRelativePath();
-  }
-  
-  private void pTestLiteral() throws IOException {
+  @Test
+  public void pTestLiteral() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a2c", USER_DIR+"/abc.d"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/abc.d", files);
@@ -113,7 +458,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestEscape() throws IOException {
+  @Test
+  public void pTestEscape() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/ab\\[c.d"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
@@ -124,7 +470,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestAny() throws IOException {
+  @Test
+  public void pTestAny() throws IOException {
     try {
       String [] files = new String[] { USER_DIR+"/abc", USER_DIR+"/a2c",
                                        USER_DIR+"/a.c", USER_DIR+"/abcd"};
@@ -138,15 +485,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestClosure() throws IOException {
-    pTestClosure1();
-    pTestClosure2();
-    pTestClosure3();
-    pTestClosure4();
-    pTestClosure5();
-  }
-  
-  private void pTestClosure1() throws IOException {
+  @Test
+  public void pTestClosure1() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a", USER_DIR+"/abc",
                                       USER_DIR+"/abc.p", USER_DIR+"/bacd"};
@@ -160,7 +500,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestClosure2() throws IOException {
+  @Test
+  public void pTestClosure2() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/a.", USER_DIR+"/a.txt",
                                      USER_DIR+"/a.old.java", USER_DIR+"/.java"};
@@ -174,7 +515,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestClosure3() throws IOException {
+  @Test
+  public void pTestClosure3() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.txt.x", USER_DIR+"/ax",
                                       USER_DIR+"/ab37x", USER_DIR+"/bacd"};
@@ -188,7 +530,8 @@ public class TestGlobPaths extends TestC
     } 
   }
 
-  private void pTestClosure4() throws IOException {
+  @Test
+  public void pTestClosure4() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/dir1/file1", 
                                       USER_DIR+"/dir2/file2", 
@@ -202,7 +545,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestClosure5() throws IOException {
+  @Test
+  public void pTestClosure5() throws IOException {
     try {
       String [] files = new String[] {USER_DIR+"/dir1/file1", 
                                       USER_DIR+"/file1"};
@@ -214,7 +558,8 @@ public class TestGlobPaths extends TestC
     }
   }
 
-  private void pTestSet() throws IOException {
+  @Test
+  public void pTestSet() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.c", USER_DIR+"/a.cpp",
                                       USER_DIR+"/a.hlp", USER_DIR+"/a.hxy"};
@@ -228,7 +573,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestRange() throws IOException {
+  @Test
+  public void pTestRange() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
                                       USER_DIR+"/a.f", USER_DIR+"/a.h"};
@@ -242,7 +588,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestSetExcl() throws IOException {
+  @Test
+  public void pTestSetExcl() throws IOException {
     try {    
       String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
                                       USER_DIR+"/a.0", USER_DIR+"/a.h"};
@@ -255,7 +602,8 @@ public class TestGlobPaths extends TestC
     }
   }
 
-  private void pTestCombination() throws IOException {
+  @Test
+  public void pTestCombination() throws IOException {
     try {    
       String [] files = new String[] {"/user/aa/a.c", "/user/bb/a.cpp",
                                       "/user1/cc/b.hlp", "/user/dd/a.hxy"};
@@ -267,7 +615,8 @@ public class TestGlobPaths extends TestC
     }
   }
   
-  private void pTestRelativePath() throws IOException {
+  @Test
+  public void pTestRelativePath() throws IOException {
     try {
       String [] files = new String[] {"a", "abc", "abc.p", "bacd"};
       Path[] matchedPath = prepareTesting("a*", files);
@@ -281,7 +630,8 @@ public class TestGlobPaths extends TestC
   }
   
   /* Test {xx,yy} */
-  private void pTestCurlyBracket() throws IOException {
+  @Test
+  public void pTestCurlyBracket() throws IOException {
     Path[] matchedPath;
     String [] files;
     try {
@@ -380,7 +730,8 @@ public class TestGlobPaths extends TestC
   }
   
   /* test that a path name can contain Java regex special characters */
-  private void pTestJavaRegexSpecialChars() throws IOException {
+  @Test
+  public void pTestJavaRegexSpecialChars() throws IOException {
     try {
       String[] files = new String[] {USER_DIR+"/($.|+)bc", USER_DIR+"/abc"};
       Path[] matchedPath = prepareTesting(USER_DIR+"/($.|+)*", files);
@@ -391,6 +742,7 @@ public class TestGlobPaths extends TestC
     }
 
   }
+  
   private Path[] prepareTesting(String pattern, String[] files)
     throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
@@ -427,8 +779,9 @@ public class TestGlobPaths extends TestC
     return globResults;
   }
   
-  private void cleanupDFS() throws IOException {
-    fs.delete(new Path("/user"), true);
+  @After
+  public void cleanupDFS() throws IOException {
+    fs.delete(new Path(USER_DIR), true);
   }
   
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.fs;
 
+import static org.apache.hadoop.fs.FileContextTestHelper.exists;
+import static org.apache.hadoop.fs.FileContextTestHelper.getTestRootPath;
+
 import java.io.IOException;
 import java.net.URISyntaxException;
 
@@ -27,8 +30,8 @@ import org.apache.hadoop.fs.Options.Rena
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import org.junit.AfterClass;
@@ -37,8 +40,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import static org.apache.hadoop.fs.FileContextTestHelper.*;
-
 public class TestHDFSFileContextMainOperations extends
     FileContextMainOperationsBaseTest {
   private static MiniDFSCluster cluster;
@@ -75,6 +76,7 @@ public class TestHDFSFileContextMainOper
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java Fri Oct 19 02:25:55 2012
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+
 import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
@@ -25,19 +28,15 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URL;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
-import org.apache.hadoop.fs.Path;
+import org.junit.Test;
 
 /**
  * Test of the URL stream handler factory.
  */
-public class TestUrlStreamHandler extends TestCase {
+public class TestUrlStreamHandler {
 
   /**
    * Test opening and reading from an InputStream through a hdfs:// URL.
@@ -47,6 +46,7 @@ public class TestUrlStreamHandler extend
    * 
    * @throws IOException
    */
+  @Test
   public void testDfsUrls() throws IOException {
 
     Configuration conf = new HdfsConfiguration();
@@ -105,6 +105,7 @@ public class TestUrlStreamHandler extend
    * @throws IOException
    * @throws URISyntaxException
    */
+  @Test
   public void testFileUrls() throws IOException, URISyntaxException {
     // URLStreamHandler is already set in JVM by testDfsUrls() 
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.loadGenerator;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileReader;
@@ -27,9 +29,7 @@ import org.apache.hadoop.conf.Configured
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-
-import static org.junit.Assert.*;
-
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
@@ -171,7 +171,7 @@ public class TestLoadGenerator extends C
       args = new String[] {"-readProbability", "0.3", "-writeProbability", "0.3",
           "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
           "-numOfThreads", "1", "-startTime", 
-          Long.toString(System.currentTimeMillis()), "-elapsedTime", "10"};
+          Long.toString(Time.now()), "-elapsedTime", "10"};
       
       assertEquals(0, lg.run(args));
 
@@ -227,7 +227,7 @@ public class TestLoadGenerator extends C
       String[] scriptArgs = new String[] {
           "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
           "-numOfThreads", "10", "-startTime", 
-          Long.toString(System.currentTimeMillis()), "-scriptFile", script};
+          Long.toString(Time.now()), "-scriptFile", script};
       
       assertEquals(0, lg.run(scriptArgs));
       

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/permission/TestStickyBit.java Fri Oct 19 02:25:55 2012
@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.fs.permission;
 
-import java.io.IOException;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
-import junit.framework.TestCase;
+import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -32,8 +35,9 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
 
-public class TestStickyBit extends TestCase {
+public class TestStickyBit {
 
   static UserGroupInformation user1 = 
     UserGroupInformation.createUserForTesting("theDoctor", new String[] {"tardis"});
@@ -158,6 +162,7 @@ public class TestStickyBit extends TestC
     assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
   }
 
+  @Test
   public void testGeneralSBBehavior() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
     try {
@@ -195,6 +200,7 @@ public class TestStickyBit extends TestC
    * Test that one user can't rename/move another user's file when the sticky
    * bit is set.
    */
+  @Test
   public void testMovingFiles() throws IOException, InterruptedException {
     MiniDFSCluster cluster = null;
 
@@ -243,6 +249,7 @@ public class TestStickyBit extends TestC
    * the sticky bit back on re-start, and that no extra sticky bits appear after
    * re-start.
    */
+  @Test
   public void testStickyBitPersistence() throws IOException {
     MiniDFSCluster cluster = null;
     try {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java Fri Oct 19 02:25:55 2012
@@ -59,9 +59,12 @@ public class TestViewFileSystemAtHdfsRoo
       
   @AfterClass
   public static void clusterShutdownAtEnd() throws Exception {
-    cluster.shutdown();   
+    if (cluster != null) {
+      cluster.shutdown();
+    }
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     fsTarget = fHdfs;
@@ -83,7 +86,7 @@ public class TestViewFileSystemAtHdfsRoo
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 8;
+    return 1; // all point to the same fs so 1 unique token
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java Fri Oct 19 02:25:55 2012
@@ -80,6 +80,7 @@ public class TestViewFileSystemHdfs exte
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs
@@ -89,6 +90,7 @@ public class TestViewFileSystemHdfs exte
     super.setUp();
   }
 
+  @Override
   @After
   public void tearDown() throws Exception {
     super.tearDown();
@@ -115,7 +117,7 @@ public class TestViewFileSystemHdfs exte
 
   @Override
   int getExpectedDelegationTokenCount() {
-    return 9;
+    return 2; // Mount points to 2 unique hdfs 
   }
 
   @Override

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java Fri Oct 19 02:25:55 2012
@@ -61,6 +61,7 @@ public class TestViewFsAtHdfsRoot extend
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsDefaultValue.java Fri Oct 19 02:25:55 2012
@@ -18,6 +18,19 @@
 package org.apache.hadoop.fs.viewfs;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -25,38 +38,17 @@ import java.net.URISyntaxException;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsConstants;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.io.DataInputBuffer;
-import org.apache.hadoop.io.DataOutputBuffer;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 /**
  * Tests for viewfs implementation of default fs level values.

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsFileStatusHdfs.java Fri Oct 19 02:25:55 2012
@@ -23,6 +23,9 @@ package org.apache.hadoop.fs.viewfs;
  * Since viewfs has overlayed ViewFsFileStatus, we ran into
  * serialization problems. THis test is test the fix.
  */
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -40,11 +43,9 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.io.DataInputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.security.UserGroupInformation;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import static org.junit.Assert.*;
 
 public class TestViewFsFileStatusHdfs {
   

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java Fri Oct 19 02:25:55 2012
@@ -60,6 +60,7 @@ public class TestViewFsHdfs extends View
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java Fri Oct 19 02:25:55 2012
@@ -17,13 +17,12 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Random;
 
-import junit.framework.Assert;
-import junit.framework.TestCase;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -53,6 +52,7 @@ public class AppendTestUtil {
   }
 
   private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
+    @Override
     protected Random initialValue() {
       final Random r =  new Random();
       synchronized(SEED) { 
@@ -120,16 +120,16 @@ public class AppendTestUtil {
       FSDataInputStream in = fs.open(p);
       if (in.getWrappedStream() instanceof DFSInputStream) {
         long len = ((DFSInputStream)in.getWrappedStream()).getFileLength();
-        TestCase.assertEquals(length, len);
+        assertEquals(length, len);
       } else {
-        TestCase.assertEquals(length, status.getLen());
+        assertEquals(length, status.getLen());
       }
       
       for(i++; i < length; i++) {
-        TestCase.assertEquals((byte)i, (byte)in.read());  
+        assertEquals((byte)i, (byte)in.read());  
       }
       i = -(int)length;
-      TestCase.assertEquals(-1, in.read()); //EOF  
+      assertEquals(-1, in.read()); //EOF  
       in.close();
     } catch(IOException ioe) {
       throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
@@ -174,7 +174,7 @@ public class AppendTestUtil {
   private static void checkData(final byte[] actual, int from,
                                 final byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
-      Assert.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
                    expected[from+idx]+" actual "+actual[idx],
                    expected[from+idx], actual[idx]);
       actual[idx] = 0;
@@ -188,7 +188,7 @@ public class AppendTestUtil {
       final FSDataOutputStream out = fs.create(p, (short)1);
       out.write(bytes);
       out.close();
-      Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen());
+      assertEquals(bytes.length, fs.getFileStatus(p).getLen());
     }
 
     for(int i = 2; i < 500; i++) {
@@ -196,7 +196,7 @@ public class AppendTestUtil {
       final FSDataOutputStream out = fs.append(p);
       out.write(bytes);
       out.close();
-      Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
+      assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
     }
   }
 }
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BenchmarkThroughput.java Fri Oct 19 02:25:55 2012
@@ -33,9 +33,9 @@ import org.apache.hadoop.fs.ChecksumFile
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-
 import org.apache.log4j.Level;
 
 /**
@@ -52,12 +52,12 @@ public class BenchmarkThroughput extends
   private int BUFFER_SIZE;
 
   private void resetMeasurements() {
-    startTime = System.currentTimeMillis();
+    startTime = Time.now();
   }
 
   private void printMeasurements() {
     System.out.println(" time: " +
-                       ((System.currentTimeMillis() - startTime)/1000));
+                       ((Time.now() - startTime)/1000));
   }
 
   private Path writeLocalFile(String name, Configuration conf,
@@ -168,6 +168,7 @@ public class BenchmarkThroughput extends
       "  dfsthroughput.buffer.size:\tbuffer size for write/read (4k)\n");
   }
 
+  @Override
   public int run(String[] args) throws IOException {
     // silence the minidfs cluster
     Log hadoopLog = LogFactory.getLog("org");

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java Fri Oct 19 02:25:55 2012
@@ -18,25 +18,26 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.net.Socket;
-import java.net.InetSocketAddress;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 import java.io.DataOutputStream;
-import java.util.Random;
-import java.util.List;
 import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.List;
+import java.util.Random;
 
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.net.NetUtils;
 
-import static org.junit.Assert.*;
-
 /**
  * A helper class to setup the cluster, and get to BlockReader and DataNode for a block.
  */
@@ -154,7 +155,7 @@ public class BlockReaderTestUtil {
       testBlock.getBlockToken(), 
       offset, lenToRead,
       conf.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-      true, "");
+      true, "", null, null);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSClientAdapter.java Fri Oct 19 02:25:55 2012
@@ -33,7 +33,7 @@ public class DFSClientAdapter {
   
   public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
     try {
-      dfs.dfs.leaserenewer.interruptAndJoin();
+      dfs.dfs.getLeaseRenewer().interruptAndJoin();
     } catch (InterruptedException e) {
       throw new IOException(e);
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Oct 19 02:25:55 2012
@@ -55,7 +55,6 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSTestUtil.Builder;
 import org.apache.hadoop.hdfs.MiniDFSCluster.NameNodeInfo;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -86,6 +85,7 @@ import org.apache.hadoop.security.UserGr
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.VersionInfo;
 
+import com.google.common.base.Charsets;
 import com.google.common.base.Joiner;
 
 /** Utilities for HDFS tests */
@@ -211,27 +211,40 @@ public class DFSTestUtil {
   
   public static void createFile(FileSystem fs, Path fileName, long fileLen, 
       short replFactor, long seed) throws IOException {
+    createFile(fs, fileName, 1024, fileLen, fs.getDefaultBlockSize(fileName),
+        replFactor, seed);
+  }
+  
+  public static void createFile(FileSystem fs, Path fileName, int bufferLen,
+      long fileLen, long blockSize, short replFactor, long seed)
+      throws IOException {
+    assert bufferLen > 0;
     if (!fs.mkdirs(fileName.getParent())) {
       throw new IOException("Mkdirs failed to create " + 
                             fileName.getParent().toString());
     }
     FSDataOutputStream out = null;
     try {
-      out = fs.create(fileName, replFactor);
-      byte[] toWrite = new byte[1024];
-      Random rb = new Random(seed);
-      long bytesToWrite = fileLen;
-      while (bytesToWrite>0) {
-        rb.nextBytes(toWrite);
-        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
-
-        out.write(toWrite, 0, bytesToWriteNext);
-        bytesToWrite -= bytesToWriteNext;
+      out = fs.create(fileName, true, fs.getConf()
+        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+        replFactor, blockSize);
+      if (fileLen > 0) {
+        byte[] toWrite = new byte[bufferLen];
+        Random rb = new Random(seed);
+        long bytesToWrite = fileLen;
+        while (bytesToWrite>0) {
+          rb.nextBytes(toWrite);
+          int bytesToWriteNext = (bufferLen < bytesToWrite) ? bufferLen
+              : (int) bytesToWrite;
+  
+          out.write(toWrite, 0, bytesToWriteNext);
+          bytesToWrite -= bytesToWriteNext;
+        }
       }
-      out.close();
-      out = null;
     } finally {
-      IOUtils.closeStream(out);
+      if (out != null) {
+        out.close();
+      }
     }
   }
   
@@ -274,7 +287,7 @@ public class DFSTestUtil {
    * specified target.
    */
   public void waitReplication(FileSystem fs, String topdir, short value) 
-                                              throws IOException {
+      throws IOException, InterruptedException, TimeoutException {
     Path root = new Path(topdir);
 
     /** wait for the replication factor to settle down */
@@ -499,36 +512,44 @@ public class DFSTestUtil {
       return fileNames;
     }
   }
-  
-  /** wait for the file's replication to be done */
-  public static void waitReplication(FileSystem fs, Path fileName, 
-      short replFactor)  throws IOException {
-    boolean good;
+
+  /**
+   * Wait for the given file to reach the given replication factor.
+   * @throws TimeoutException if we fail to sufficiently replicate the file
+   */
+  public static void waitReplication(FileSystem fs, Path fileName, short replFactor)
+      throws IOException, InterruptedException, TimeoutException {
+    boolean correctReplFactor;
+    final int ATTEMPTS = 40;
+    int count = 0;
+
     do {
-      good = true;
+      correctReplFactor = true;
       BlockLocation locs[] = fs.getFileBlockLocations(
         fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+      count++;
       for (int j = 0; j < locs.length; j++) {
         String[] hostnames = locs[j].getNames();
         if (hostnames.length != replFactor) {
-          String hostNameList = "";
-          for (String h : hostnames) hostNameList += h + " ";
-          System.out.println("Block " + j + " of file " + fileName 
-              + " has replication factor " + hostnames.length + "; locations "
-              + hostNameList);
-          good = false;
-          try {
-            System.out.println("Waiting for replication factor to drain");
-            Thread.sleep(100);
-          } catch (InterruptedException e) {} 
+          correctReplFactor = false;
+          System.out.println("Block " + j + " of file " + fileName
+              + " has replication factor " + hostnames.length
+              + " (desired " + replFactor + "); locations "
+              + Joiner.on(' ').join(hostnames));
+          Thread.sleep(1000);
           break;
         }
       }
-      if (good) {
+      if (correctReplFactor) {
         System.out.println("All blocks of file " + fileName
             + " verified to have replication factor " + replFactor);
       }
-    } while(!good);
+    } while (!correctReplFactor && count < ATTEMPTS);
+
+    if (count == ATTEMPTS) {
+      throw new TimeoutException("Timed out waiting for " + fileName +
+          " to reach " + replFactor + " replicas");
+    }
   }
   
   /** delete directory and everything underneath it.*/
@@ -587,12 +608,21 @@ public class DFSTestUtil {
     IOUtils.copyBytes(is, os, s.length(), true);
   }
   
-  // Returns url content as string.
+  /**
+   * @return url content as string (UTF-8 encoding assumed)
+   */
   public static String urlGet(URL url) throws IOException {
+    return new String(urlGetBytes(url), Charsets.UTF_8);
+  }
+  
+  /**
+   * @return URL contents as a byte array
+   */
+  public static byte[] urlGetBytes(URL url) throws IOException {
     URLConnection conn = url.openConnection();
     ByteArrayOutputStream out = new ByteArrayOutputStream();
     IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
-    return out.toString();
+    return out.toByteArray();
   }
   
   /**

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DataNodeCluster.java Fri Oct 19 02:25:55 2012
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.server.dat
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
 import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.util.Time;
 
 
 /**
@@ -231,7 +232,6 @@ public class DataNodeCluster {
     }
     
     int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
-    return "/Rack-" + rand + "-"+ ip  + "-" + 
-                      System.currentTimeMillis();
+    return "/Rack-" + rand + "-"+ ip  + "-" + Time.now(); 
   }
 }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/FileAppendTest4.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs;
 
+import java.io.IOException;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -27,8 +29,6 @@ import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.IOException;
-
 /** This is a comprehensive append test that tries
  * all combinations of file length and number of appended bytes
  * In each iteration, it creates a file of len1. Then reopen

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java Fri Oct 19 02:25:55 2012
@@ -118,6 +118,8 @@ public class MiniDFSCluster {
   public static final String PROP_TEST_BUILD_DATA = "test.build.data";
   /** Configuration option to set the data dir: {@value} */
   public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
+  public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
+      = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
 
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
@@ -143,6 +145,7 @@ public class MiniDFSCluster {
     private boolean setupHostsFile = false;
     private MiniDFSNNTopology nnTopology = null;
     private boolean checkExitOnShutdown = true;
+    private boolean checkDataNodeHostConfig = false;
     
     public Builder(Configuration conf) {
       this.conf = conf;
@@ -261,6 +264,14 @@ public class MiniDFSCluster {
     }
 
     /**
+     * Default: false
+     */
+    public Builder checkDataNodeHostConfig(boolean val) {
+      this.checkDataNodeHostConfig = val;
+      return this;
+    }
+    
+    /**
      * Default: null
      */
     public Builder clusterId(String cid) {
@@ -324,7 +335,8 @@ public class MiniDFSCluster {
                        builder.waitSafeMode,
                        builder.setupHostsFile,
                        builder.nnTopology,
-                       builder.checkExitOnShutdown);
+                       builder.checkExitOnShutdown,
+                       builder.checkDataNodeHostConfig);
   }
   
   public class DataNodeProperties {
@@ -561,7 +573,7 @@ public class MiniDFSCluster {
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
         operation, racks, hosts,
         simulatedCapacities, null, true, false,
-        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true);
+        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false);
   }
 
   private void initMiniDFSCluster(
@@ -571,7 +583,8 @@ public class MiniDFSCluster {
       boolean manageDataDfsDirs, StartupOption operation, String[] racks,
       String[] hosts, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
-      MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown)
+      MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
+      boolean checkDataNodeHostConfig)
   throws IOException {
     ExitUtil.disableSystemExit();
 
@@ -587,7 +600,9 @@ public class MiniDFSCluster {
     
     int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
     conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
-    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+    int safemodeExtension = conf.getInt(
+        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
+    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
     conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
     conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                    StaticMapping.class, DNSToSwitchMapping.class);
@@ -609,14 +624,20 @@ public class MiniDFSCluster {
     }
     
     federation = nnTopology.isFederated();
-    createNameNodesAndSetConf(
-        nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
-        enableManagedDfsDirsRedundancy,
-        format, operation, clusterId, conf);
-    
+    try {
+      createNameNodesAndSetConf(
+          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
+          enableManagedDfsDirsRedundancy,
+          format, operation, clusterId, conf);
+    } catch (IOException ioe) {
+      LOG.error("IOE creating namenodes. Permissions dump:\n" +
+          createPermissionsDiagnosisString(data_dir));
+      throw ioe;
+    }
     if (format) {
       if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
-        throw new IOException("Cannot remove data directory: " + data_dir);
+        throw new IOException("Cannot remove data directory: " + data_dir +
+            createPermissionsDiagnosisString(data_dir));
       }
     }
     
@@ -626,12 +647,33 @@ public class MiniDFSCluster {
 
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
-        hosts, simulatedCapacities, setupHostsFile);
+        hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig);
     waitClusterUp();
     //make sure ProxyUsers uses the latest conf
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
   }
   
+  /**
+   * @return a debug string which can help diagnose an error of why
+   * a given directory might have a permissions error in the context
+   * of a test case
+   */
+  private String createPermissionsDiagnosisString(File path) {
+    StringBuilder sb = new StringBuilder();
+    while (path != null) { 
+      sb.append("path '" + path + "': ").append("\n");
+      sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
+      sb.append("\tpermissions: ");
+      sb.append(path.isDirectory() ? "d": "-");
+      sb.append(path.canRead() ? "r" : "-");
+      sb.append(path.canWrite() ? "w" : "-");
+      sb.append(path.canExecute() ? "x" : "-");
+      sb.append("\n");
+      path = path.getParentFile();
+    }
+    return sb.toString();
+  }
+
   private void createNameNodesAndSetConf(MiniDFSNNTopology nnTopology,
       boolean manageNameDfsDirs, boolean manageNameDfsSharedDirs,
       boolean enableManagedDfsDirsRedundancy, boolean format,
@@ -842,8 +884,8 @@ public class MiniDFSCluster {
     // After the NN has started, set back the bound ports into
     // the conf
     conf.set(DFSUtil.addKeySuffixes(
-        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), NetUtils
-        .getHostPortString(nn.getNameNodeAddress()));
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId),
+        nn.getNameNodeAddressHostPortString());
     conf.set(DFSUtil.addKeySuffixes(
         DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils
         .getHostPortString(nn.getHttpAddress()));
@@ -865,8 +907,8 @@ public class MiniDFSCluster {
    * @return URI of the given namenode in MiniDFSCluster
    */
   public URI getURI(int nnIndex) {
-    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getNameNodeAddress();
-    String hostPort = NetUtils.getHostPortString(addr);
+    String hostPort =
+        nameNodes[nnIndex].nameNode.getNameNodeAddressHostPortString();
     URI uri = null;
     try {
       uri = new URI("hdfs://" + hostPort);
@@ -903,7 +945,8 @@ public class MiniDFSCluster {
   /**
    * wait for the cluster to get out of safemode.
    */
-  public void waitClusterUp() {
+  public void waitClusterUp() throws IOException {
+    int i = 0;
     if (numDataNodes > 0) {
       while (!isClusterUp()) {
         try {
@@ -911,6 +954,9 @@ public class MiniDFSCluster {
           Thread.sleep(1000);
         } catch (InterruptedException e) {
         }
+        if (++i > 10) {
+          throw new IOException("Timed out waiting for Mini HDFS Cluster to start");
+        }
       }
     }
   }
@@ -978,7 +1024,21 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
     startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
-                   simulatedCapacities, setupHostsFile, false);
+        simulatedCapacities, setupHostsFile, false, false);
+  }
+
+  /**
+   * @see MiniDFSCluster#startDataNodes(Configuration, int, boolean, StartupOption,
+   * String[], String[], long[], boolean, boolean, boolean)
+   */
+  public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+      boolean manageDfsDirs, StartupOption operation, 
+      String[] racks, String[] hosts,
+      long[] simulatedCapacities,
+      boolean setupHostsFile,
+      boolean checkDataNodeAddrConfig) throws IOException {
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+        simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false);
   }
 
   /**
@@ -1004,19 +1064,25 @@ public class MiniDFSCluster {
    * @param simulatedCapacities array of capacities of the simulated data nodes
    * @param setupHostsFile add new nodes to dfs hosts files
    * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
+   * @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
    *
    * @throws IllegalStateException if NameNode has been shutdown
    */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-                             boolean manageDfsDirs, StartupOption operation, 
-                             String[] racks, String[] hosts,
-                             long[] simulatedCapacities,
-                             boolean setupHostsFile,
-                             boolean checkDataNodeAddrConfig) throws IOException {
+      boolean manageDfsDirs, StartupOption operation, 
+      String[] racks, String[] hosts,
+      long[] simulatedCapacities,
+      boolean setupHostsFile,
+      boolean checkDataNodeAddrConfig,
+      boolean checkDataNodeHostConfig) throws IOException {
     if (operation == StartupOption.RECOVER) {
       return;
     }
-    conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    if (checkDataNodeHostConfig) {
+      conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    } else {
+      conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    }
 
     int curDatanodesNum = dataNodes.size();
     // for mincluster's the default initialDelay for BRs is 0
@@ -1316,9 +1382,12 @@ public class MiniDFSCluster {
   public void shutdown() {
     LOG.info("Shutting down the Mini HDFS Cluster");
     if (checkExitOnShutdown)  {
-     if (ExitUtil.terminateCalled()) {
-       throw new AssertionError("Test resulted in an unexpected exit");
-     }
+      if (ExitUtil.terminateCalled()) {
+        LOG.fatal("Test resulted in an unexpected exit",
+            ExitUtil.getFirstExitException());
+        ExitUtil.resetFirstExitException();
+        throw new AssertionError("Test resulted in an unexpected exit");
+      }
     }
     shutdownDataNodes();
     for (NameNodeInfo nnInfo : nameNodes) {

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAbandonBlock.java Fri Oct 19 02:25:55 2012
@@ -17,18 +17,20 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-
-import static org.junit.Assert.*;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendDifferentChecksum.java Fri Oct 19 02:25:55 2012
@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.Time;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Ignore;
@@ -105,16 +106,16 @@ public class TestAppendDifferentChecksum
     FileSystem fsWithCrc32C = createFsWithChecksum("CRC32C", 512);
 
     Path p = new Path("/testAlgoSwitchRandomized");
-    long seed = System.currentTimeMillis();
+    long seed = Time.now();
     System.out.println("seed: " + seed);
     Random r = new Random(seed);
     
     // Create empty to start
     IOUtils.closeStream(fsWithCrc32.create(p));
     
-    long st = System.currentTimeMillis();
+    long st = Time.now();
     int len = 0;
-    while (System.currentTimeMillis() - st < RANDOM_TEST_RUNTIME) {
+    while (Time.now() - st < RANDOM_TEST_RUNTIME) {
       int thisLen = r.nextInt(500);
       FileSystem fs = (r.nextBoolean() ? fsWithCrc32 : fsWithCrc32C);
       FSDataOutputStream stm = fs.append(p);

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java Fri Oct 19 02:25:55 2012
@@ -17,27 +17,27 @@
 */
 package org.apache.hadoop.hdfs;
 
-import java.util.ArrayList;
+import static org.junit.Assert.assertEquals;
 
-import junit.framework.TestCase;
-import org.apache.hadoop.conf.Configuration;
+import java.util.ArrayList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.junit.Test;
 
 /**
  * This test ensures that the balancer bandwidth is dynamically adjusted
  * correctly.
  */
-public class TestBalancerBandwidth extends TestCase {
+public class TestBalancerBandwidth {
   final static private Configuration conf = new Configuration();
   final static private int NUM_OF_DATANODES = 2;
   final static private int DEFAULT_BANDWIDTH = 1024*1024;
   public static final Log LOG = LogFactory.getLog(TestBalancerBandwidth.class);
 
+  @Test
   public void testBalancerBandwidth() throws Exception {
     /* Set bandwidthPerSec to a low value of 1M bps. */
     conf.setLong(

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockMissingException.java Fri Oct 19 02:25:55 2012
@@ -17,26 +17,24 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertTrue;
+
 import java.io.File;
 import java.io.IOException;
 
-import junit.framework.TestCase;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.BlockMissingException;
+import org.junit.Test;
 
-public class TestBlockMissingException extends TestCase {
+public class TestBlockMissingException {
   final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing");
   final static int NUM_DATANODES = 3;
 
@@ -47,6 +45,7 @@ public class TestBlockMissingException e
   /**
    * Test DFS Raid
    */
+  @Test
   public void testBlockMissingException() throws Exception {
     LOG.info("Test testBlockMissingException started.");
     long blockSize = 1024L;

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderLocal.java Fri Oct 19 02:25:55 2012
@@ -61,7 +61,7 @@ public class TestBlockReaderLocal {
    * of this class might immediately issue a retry on failure, so it's polite.
    */
   @Test
-  public void testStablePositionAfterCorruptRead() throws IOException {
+  public void testStablePositionAfterCorruptRead() throws Exception {
     final short REPL_FACTOR = 1;
     final long FILE_LENGTH = 512L;
     cluster.waitActive();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java Fri Oct 19 02:25:55 2012
@@ -17,24 +17,26 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+
 import java.io.IOException;
 import java.util.ArrayList;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.junit.Test;
 
 /**
  * This class tests DatanodeDescriptor.getBlocksScheduled() at the
  * NameNode. This counter is supposed to keep track of blocks currently
  * scheduled to a datanode.
  */
-public class TestBlocksScheduledCounter extends TestCase {
+public class TestBlocksScheduledCounter {
 
+  @Test
   public void testBlocksScheduledCounter() throws IOException {
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
                                                .build();

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientBlockVerification.java Fri Oct 19 02:25:55 2012
@@ -18,21 +18,20 @@
 
 package org.apache.hadoop.hdfs;
 
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
 import java.util.List;
 
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
-import org.apache.hadoop.fs.Path;
 import org.apache.log4j.Level;
-
-import org.junit.Test;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.never;
+import org.junit.Test;
 
 public class TestClientBlockVerification {
 
@@ -61,7 +60,7 @@ public class TestClientBlockVerification
     RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
         util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024));
     util.readAndCheckEOS(reader, FILE_SIZE_K * 1024, true);
-    verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
+    verify(reader).sendReadResult(Status.CHECKSUM_OK);
     reader.close();
   }
 
@@ -76,7 +75,7 @@ public class TestClientBlockVerification
 
     // We asked the blockreader for the whole file, and only read
     // half of it, so no CHECKSUM_OK
-    verify(reader, never()).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
+    verify(reader, never()).sendReadResult(Status.CHECKSUM_OK);
     reader.close();
   }
 
@@ -92,7 +91,7 @@ public class TestClientBlockVerification
         util.getBlockReader(testBlock, 0, FILE_SIZE_K * 1024 / 2));
     // And read half the file
     util.readAndCheckEOS(reader, FILE_SIZE_K * 1024 / 2, true);
-    verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
+    verify(reader).sendReadResult(Status.CHECKSUM_OK);
     reader.close();
   }
 
@@ -111,7 +110,7 @@ public class TestClientBlockVerification
         RemoteBlockReader2 reader = (RemoteBlockReader2)spy(
             util.getBlockReader(testBlock, startOffset, length));
         util.readAndCheckEOS(reader, length, true);
-        verify(reader).sendReadResult(reader.dnSock, Status.CHECKSUM_OK);
+        verify(reader).sendReadResult(Status.CHECKSUM_OK);
         reader.close();
       }
     }

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java Fri Oct 19 02:25:55 2012
@@ -26,11 +26,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.io.IOUtils;
-
-import org.junit.Test;
 import org.junit.Assert;
+import org.junit.Test;
 
 /**
  * This tests pipeline recovery related client protocol works correct or not.

Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java Fri Oct 19 02:25:55 2012
@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.io.RandomAccessFile;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -199,11 +200,11 @@ public class TestClientReportBadBlock {
   }
 
   /**
-   * create a file with one block and corrupt some/all of the block replicas.
+   * Create a file with one block and corrupt some/all of the block replicas.
    */
   private void createAFileWithCorruptedBlockReplicas(Path filePath, short repl,
       int corruptBlockCount) throws IOException, AccessControlException,
-      FileNotFoundException, UnresolvedLinkException {
+      FileNotFoundException, UnresolvedLinkException, InterruptedException, TimeoutException {
     DFSTestUtil.createFile(dfs, filePath, BLOCK_SIZE, repl, 0);
     DFSTestUtil.waitReplication(dfs, filePath, repl);
     // Locate the file blocks by asking name node