You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ss...@apache.org on 2012/10/16 02:03:59 UTC

svn commit: r1398581 [8/9] - in /hadoop/common/branches/MR-3902/hadoop-hdfs-project: hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/ hadoop-hdfs-h...

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestModTime.java Tue Oct 16 00:02:55 2012
@@ -21,6 +21,7 @@ import static org.junit.Assert.assertEqu
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
+import java.io.OutputStream;
 import java.net.InetSocketAddress;
 import java.util.Random;
 
@@ -32,12 +33,14 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.util.ThreadUtil;
 import org.junit.Test;
 
 /**
  * This class tests the decommissioning of nodes.
  */
 public class TestModTime {
+  
   static final long seed = 0xDEADBEEFL;
   static final int blockSize = 8192;
   static final int fileSize = 16384;
@@ -47,19 +50,6 @@ public class TestModTime {
   Random myrand = new Random();
   Path hostsFile;
   Path excludeFile;
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
     assertTrue(fileSys.exists(name));
@@ -102,7 +92,8 @@ public class TestModTime {
      System.out.println("Creating testdir1 and testdir1/test1.dat.");
      Path dir1 = new Path("testdir1");
      Path file1 = new Path(dir1, "test1.dat");
-     writeFile(fileSys, file1, replicas);
+     DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+         (short) replicas, seed);
      FileStatus stat = fileSys.getFileStatus(file1);
      long mtime1 = stat.getModificationTime();
      assertTrue(mtime1 != 0);
@@ -117,7 +108,8 @@ public class TestModTime {
      //
      System.out.println("Creating testdir1/test2.dat.");
      Path file2 = new Path(dir1, "test2.dat");
-     writeFile(fileSys, file2, replicas);
+     DFSTestUtil.createFile(fileSys, file2, fileSize, fileSize, blockSize,
+         (short) replicas, seed);
      stat = fileSys.getFileStatus(file2);
 
      //
@@ -186,6 +178,46 @@ public class TestModTime {
       cluster.shutdown();
     }
   }
+  
+  /**
+   * Regression test for HDFS-3864 - NN does not update internal file mtime for
+   * OP_CLOSE when reading from the edit log.
+   */
+  @Test
+  public void testModTimePersistsAfterRestart() throws IOException {
+    final long sleepTime = 10; // 10 milliseconds
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    Configuration conf = new HdfsConfiguration();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      fs = cluster.getFileSystem();
+      Path testPath = new Path("/test");
+      
+      // Open a file, and get its initial modification time.
+      OutputStream out = fs.create(testPath);
+      long initialModTime = fs.getFileStatus(testPath).getModificationTime();
+      assertTrue(initialModTime > 0);
+      
+      // Wait and then close the file. Ensure that the mod time goes up.
+      ThreadUtil.sleepAtLeastIgnoreInterrupts(sleepTime);
+      out.close();
+      long modTimeAfterClose = fs.getFileStatus(testPath).getModificationTime();
+      assertTrue(modTimeAfterClose >= initialModTime + sleepTime);
+      
+      // Restart the NN, and make sure that the later mod time is still used.
+      cluster.restartNameNode();
+      long modTimeAfterRestart = fs.getFileStatus(testPath).getModificationTime();
+      assertEquals(modTimeAfterClose, modTimeAfterRestart);
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 
   public static void main(String[] args) throws Exception {
     new TestModTime().testModTime();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java Tue Oct 16 00:02:55 2012
@@ -83,7 +83,7 @@ public class TestParallelReadUtil {
   static class DirectReadWorkerHelper implements ReadWorkerHelper {
     @Override
     public int read(DFSInputStream dis, byte[] target, int startOff, int len) throws IOException {
-      ByteBuffer bb = ByteBuffer.wrap(target);
+      ByteBuffer bb = ByteBuffer.allocateDirect(target.length);
       int cnt = 0;
       synchronized(dis) {
         dis.seek(startOff);
@@ -95,6 +95,8 @@ public class TestParallelReadUtil {
           cnt += read;
         }
       }
+      bb.clear();
+      bb.get(target);
       return cnt;
     }
 

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java Tue Oct 16 00:02:55 2012
@@ -41,11 +41,9 @@ public class TestPread {
   boolean simulatedStorage = false;
 
   private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains three blocks of data
-    DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
-                                          blockSize);
     // test empty file open and read
-    stm.close();
+    DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 0,
+        blockSize, (short) 1, seed);
     FSDataInputStream in = fileSys.open(name);
     byte[] buffer = new byte[12 * blockSize];
     in.readFully(0, buffer, 0, 0);
@@ -62,11 +60,8 @@ public class TestPread {
       assertTrue("Cannot delete file", false);
     
     // now create the real file
-    stm = fileSys.create(name, true, 4096, (short)1, blockSize);
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
+    DFSTestUtil.createFile(fileSys, name, 12 * blockSize, 12 * blockSize,
+        blockSize, (short) 1, seed);
   }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Tue Oct 16 00:02:55 2012
@@ -26,14 +26,12 @@ import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
 import java.util.Iterator;
-import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -60,19 +58,6 @@ public class TestReplication {
   private static final int numDatanodes = racks.length;
   private static final Log LOG = LogFactory.getLog(
                                        "org.apache.hadoop.hdfs.TestReplication");
-
-  private void writeFile(FileSystem fileSys, Path name, int repl)
-    throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) repl, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   /* check if there are at least two nodes are on the same rack */
   private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -221,19 +206,25 @@ public class TestReplication {
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("/smallblocktest.dat");
-      writeFile(fileSys, file1, 3);
+      //writeFile(fileSys, file1, 3);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 3, seed);
       checkFile(fileSys, file1, 3);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 10);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 10, seed);
       checkFile(fileSys, file1, 10);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 4);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 4, seed);
       checkFile(fileSys, file1, 4);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 1);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 1, seed);
       checkFile(fileSys, file1, 1);
       cleanupFile(fileSys, file1);
-      writeFile(fileSys, file1, 2);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 2, seed);
       checkFile(fileSys, file1, 2);
       cleanupFile(fileSys, file1);
     } finally {
@@ -420,8 +411,8 @@ public class TestReplication {
     }
   }
   
-  private void changeBlockLen(MiniDFSCluster cluster, 
-      int lenDelta) throws IOException, InterruptedException {
+  private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+      throws IOException, InterruptedException, TimeoutException {
     final Path fileName = new Path("/file1");
     final short REPLICATION_FACTOR = (short)1;
     final FileSystem fs = cluster.getFileSystem();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java Tue Oct 16 00:02:55 2012
@@ -40,16 +40,6 @@ public class TestSeekBug {
   static final long seed = 0xDEADBEEFL;
   static final int ONEMB = 1 << 20;
   
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains 1MB
-    DataOutputStream stm = fileSys.create(name);
-    byte[] buffer = new byte[ONEMB];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
-  
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
       assertEquals(message+" byte "+(from+idx)+" differs. expected "+
@@ -132,7 +122,9 @@ public class TestSeekBug {
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("seektest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+          fileSys.getDefaultBlockSize(file1),
+          fileSys.getDefaultReplication(file1), seed);
       seekReadFile(fileSys, file1);
       smallReadSeek(fileSys, file1);
       cleanupFile(fileSys, file1);
@@ -151,7 +143,9 @@ public class TestSeekBug {
     FileSystem fileSys = FileSystem.getLocal(conf);
     try {
       Path file1 = new Path("build/test/data", "seektest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+          fileSys.getDefaultBlockSize(file1),
+          fileSys.getDefaultReplication(file1), seed);
       seekReadFile(fileSys, file1);
       cleanupFile(fileSys, file1);
     } finally {

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Tue Oct 16 00:02:55 2012
@@ -115,6 +115,14 @@ public class TestShortCircuitLocalRead {
     stm.close();
   }
 
+  private static byte [] arrayFromByteBuffer(ByteBuffer buf) {
+    ByteBuffer alt = buf.duplicate();
+    alt.clear();
+    byte[] arr = new byte[alt.remaining()];
+    alt.get(arr);
+    return arr;
+  }
+  
   /**
    * Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
    */
@@ -122,7 +130,7 @@ public class TestShortCircuitLocalRead {
       int readOffset) throws IOException {
     HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
 
-    ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
+    ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
 
     IOUtils.skipFully(stm, readOffset);
 
@@ -136,7 +144,8 @@ public class TestShortCircuitLocalRead {
     // Read across chunk boundary
     actual.limit(Math.min(actual.capacity(), nread + 517));
     nread += stm.read(actual);
-    checkData(actual.array(), readOffset, expected, nread, "A few bytes");
+    checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
+        "A few bytes");
     //Now read rest of it
     actual.limit(actual.capacity());
     while (actual.hasRemaining()) {
@@ -147,7 +156,7 @@ public class TestShortCircuitLocalRead {
       }
       nread += nbytes;
     }
-    checkData(actual.array(), readOffset, expected, "Read 3");
+    checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
     stm.close();
   }
 
@@ -224,7 +233,8 @@ public class TestShortCircuitLocalRead {
   @Test
   public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
     final Configuration conf = new Configuration();
-    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
+    conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+        "alloweduser1,alloweduser2");
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
         .format(true).build();
     cluster.waitActive();
@@ -232,8 +242,10 @@ public class TestShortCircuitLocalRead {
     FileSystem fs = cluster.getFileSystem();
     try {
       DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
-      UserGroupInformation aUgi = UserGroupInformation
-          .createRemoteUser("alloweduser");
+      UserGroupInformation aUgi1 =
+          UserGroupInformation.createRemoteUser("alloweduser1");
+      UserGroupInformation aUgi2 =
+          UserGroupInformation.createRemoteUser("alloweduser2");
       LocatedBlocks lb = cluster.getNameNode().getRpcServer()
           .getBlockLocations("/tmp/x", 0, 16);
       // Create a new block object, because the block inside LocatedBlock at
@@ -241,7 +253,7 @@ public class TestShortCircuitLocalRead {
       ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
       Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
       final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
-      ClientDatanodeProtocol proxy = aUgi
+      ClientDatanodeProtocol proxy = aUgi1
           .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
@@ -250,13 +262,29 @@ public class TestShortCircuitLocalRead {
             }
           });
       
-      //This should succeed
+      // This should succeed
       BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
       Assert.assertEquals(
           DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
           blpi.getBlockPath());
 
-      // Now try with a not allowed user.
+      // Try with the other allowed user
+      proxy = aUgi2
+          .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
+            @Override
+            public ClientDatanodeProtocol run() throws Exception {
+              return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
+                  60000, false);
+            }
+          });
+
+      // This should succeed as well
+      blpi = proxy.getBlockLocalPathInfo(blk, token);
+      Assert.assertEquals(
+          DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
+          blpi.getBlockPath());
+
+      // Now try with a disallowed user
       UserGroupInformation bUgi = UserGroupInformation
           .createRemoteUser("notalloweduser");
       proxy = bUgi

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java Tue Oct 16 00:02:55 2012
@@ -42,18 +42,6 @@ public class TestSmallBlock {
   static final int blockSize = 1;
   static final int fileSize = 20;
   boolean simulatedStorage = false;
-
-  private void writeFile(FileSystem fileSys, Path name) throws IOException {
-    // create and write a file that contains three blocks of data
-    FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
-        .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
-        (short) 1, blockSize);
-    byte[] buffer = new byte[fileSize];
-    Random rand = new Random(seed);
-    rand.nextBytes(buffer);
-    stm.write(buffer);
-    stm.close();
-  }
   
   private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
     for (int idx = 0; idx < actual.length; idx++) {
@@ -105,7 +93,8 @@ public class TestSmallBlock {
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("smallblocktest.dat");
-      writeFile(fileSys, file1);
+      DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+          (short) 1, seed);
       checkFile(fileSys, file1);
       cleanupFile(fileSys, file1);
     } finally {

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Tue Oct 16 00:02:55 2012
@@ -80,9 +80,11 @@ public class TestClientProtocolWithDeleg
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
         3600000, mockNameSys);
     sm.startThreads();
-    final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS,
-        0, 5, true, conf, sm);
-
+    final Server server = new RPC.Builder(conf)
+        .setProtocol(ClientProtocol.class).setInstance(mockNN)
+        .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+        .setSecretManager(sm).build();
+    
     server.start();
 
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Tue Oct 16 00:02:55 2012
@@ -74,6 +74,7 @@ import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assume;
+import org.junit.Before;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -87,14 +88,6 @@ public class TestBlockToken {
   public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
   private static final String ADDRESS = "0.0.0.0";
 
-  static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
-  private static Configuration conf;
-  static {
-    conf = new Configuration();
-    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
-    UserGroupInformation.setConfiguration(conf);
-  }
-
   static {
     ((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
     ((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
@@ -111,6 +104,13 @@ public class TestBlockToken {
   ExtendedBlock block1 = new ExtendedBlock("0", 0L);
   ExtendedBlock block2 = new ExtendedBlock("10", 10L);
   ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
+  
+  @Before
+  public void disableKerberos() {
+    Configuration conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
+    UserGroupInformation.setConfiguration(conf);
+  }
 
   private static class GetLengthAnswer implements
       Answer<GetReplicaVisibleLengthResponseProto> {
@@ -215,8 +215,9 @@ public class TestBlockToken {
     tokenGenerationAndVerification(masterHandler, slaveHandler);
   }
 
-  private Server createMockDatanode(BlockTokenSecretManager sm,
-      Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
+  private static Server createMockDatanode(BlockTokenSecretManager sm,
+      Token<BlockTokenIdentifier> token, Configuration conf)
+      throws IOException, ServiceException {
     ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
 
     BlockTokenIdentifier id = sm.createIdentifier();
@@ -231,18 +232,23 @@ public class TestBlockToken {
         ProtobufRpcEngine.class);
     BlockingService service = ClientDatanodeProtocolService
         .newReflectiveBlockingService(mockDN);
-    return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
-        true, conf, sm);
+    return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
+        .setInstance(service).setBindAddress(ADDRESS).setPort(0)
+        .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
   }
 
   @Test
   public void testBlockTokenRpc() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    
     BlockTokenSecretManager sm = new BlockTokenSecretManager(
         blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
     Token<BlockTokenIdentifier> token = sm.generateToken(block3,
         EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
 
-    final Server server = createMockDatanode(sm, token);
+    final Server server = createMockDatanode(sm, token, conf);
 
     server.start();
 
@@ -271,13 +277,17 @@ public class TestBlockToken {
    */
   @Test
   public void testBlockTokenRpcLeak() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    UserGroupInformation.setConfiguration(conf);
+    
     Assume.assumeTrue(FD_DIR.exists());
     BlockTokenSecretManager sm = new BlockTokenSecretManager(
         blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
     Token<BlockTokenIdentifier> token = sm.generateToken(block3,
         EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
 
-    final Server server = createMockDatanode(sm, token);
+    final Server server = createMockDatanode(sm, token, conf);
     server.start();
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Tue Oct 16 00:02:55 2012
@@ -88,7 +88,7 @@ public class TestBalancer {
   /* create a file with a length of <code>fileLen</code> */
   static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
       short replicationFactor, int nnIndex)
-  throws IOException {
+  throws IOException, InterruptedException, TimeoutException {
     FileSystem fs = cluster.getFileSystem(nnIndex);
     DFSTestUtil.createFile(fs, filePath, fileLen, 
         replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
    * whose used space to be <code>size</code>
    */
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
-      short numNodes) throws IOException {
+      short numNodes) throws IOException, InterruptedException, TimeoutException {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
       cluster.waitActive();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithEncryptedTransfer.java Tue Oct 16 00:02:55 2012
@@ -33,17 +33,17 @@ public class TestBalancerWithEncryptedTr
     conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer0() throws Exception {
     new TestBalancer().testBalancer0Internal(conf);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer1() throws Exception {
     new TestBalancer().testBalancer1Internal(conf);
   }
   
-  @Test
+  @Test(timeout=60000)
   public void testEncryptedBalancer2() throws Exception {
     new TestBalancer().testBalancer2Internal(conf);
   }

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Tue Oct 16 00:02:55 2012
@@ -23,6 +23,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 import java.util.Random;
+import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNam
 
   /* create a file with a length of <code>fileLen</code> */
   private static void createFile(Suite s, int index, long len
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final FileSystem fs = s.cluster.getFileSystem(index);
     DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
     DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNam
    * whose used space to be <code>size</code>
    */
   private static ExtendedBlock[][] generateBlocks(Suite s, long size
-      ) throws IOException {
+      ) throws IOException, InterruptedException, TimeoutException {
     final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
     for(int n = 0; n < s.clients.length; n++) {
       final long fileLen = size/s.replication;

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Tue Oct 16 00:02:55 2012
@@ -379,7 +379,7 @@ public class TestBlockManager {
   
   private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
     BlockCollection bc = Mockito.mock(BlockCollection.class);
-    Mockito.doReturn((short)3).when(bc).getReplication();
+    Mockito.doReturn((short)3).when(bc).getBlockReplication();
     BlockInfo blockInfo = blockOnNodes(blockId, nodes);
 
     bm.blocksMap.addBlockCollection(blockInfo, bc);

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Tue Oct 16 00:02:55 2012
@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
    * corrupt ones.
    */
   @Test
-  public void testProcesOverReplicateBlock() throws IOException {
+  public void testProcesOverReplicateBlock() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
     conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
    * send heartbeats. 
    */
   @Test
-  public void testChooseReplicaToDelete() throws IOException {
+  public void testChooseReplicaToDelete() throws Exception {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     try {

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestRBWBlockInvalidation.java Tue Oct 16 00:02:55 2012
@@ -53,7 +53,7 @@ public class TestRBWBlockInvalidation {
    * datanode, namenode should ask to invalidate that corrupted block and
    * schedule replication for one more replica for that under replicated block.
    */
-  @Test
+  @Test(timeout=60000)
   public void testBlockInvalidationWhenRBWReplicaMissedInDN()
       throws IOException, InterruptedException {
     Configuration conf = new HdfsConfiguration();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Tue Oct 16 00:02:55 2012
@@ -38,9 +38,12 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
+import org.apache.hadoop.util.Time;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -55,6 +58,9 @@ public class TestReplicationPolicy {
   private static BlockPlacementPolicy replicator;
   private static final String filename = "/dummyfile.txt";
   private static DatanodeDescriptor dataNodes[];
+  // The interval for marking a datanode as stale,
+  private static long staleInterval = 
+      DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT;
 
   @Rule
   public ExpectedException exception = ExpectedException.none();
@@ -77,6 +83,8 @@ public class TestReplicationPolicy {
         "test.build.data", "build/test/data"), "dfs/");
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         new File(baseDir, "name").getPath());
+    // Enable the checking for stale datanodes in the beginning
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
 
     DFSTestUtil.formatNameNode(conf);
     namenode = new NameNode(conf);
@@ -111,30 +119,30 @@ public class TestReplicationPolicy {
         HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
 
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+                                      2, dataNodes[0], new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
 
-    targets = replicator.chooseTarget(filename,
-                                     4, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
@@ -229,7 +237,7 @@ public class TestReplicationPolicy {
     assertEquals(2, targets.length);
     //make sure that the chosen node is in the target.
     int i = 0;
-    for(; i < targets.length && !dataNodes[2].equals(targets[i]); i++);
+    for (; i < targets.length && !dataNodes[2].equals(targets[i]); i++);
     assertTrue(i < targets.length);
   }
 
@@ -249,30 +257,30 @@ public class TestReplicationPolicy {
         (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
         
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[1]);
     
-    targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[1]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[1]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
-    targets = replicator.chooseTarget(filename,
-                                      4, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[1]);
     for(int i=1; i<4; i++) {
@@ -305,23 +313,23 @@ public class TestReplicationPolicy {
     }
       
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     
-    targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     for(int i=0; i<3; i++) {
       assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
@@ -350,25 +358,221 @@ public class TestReplicationPolicy {
       DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
 
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, writerDesc, BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, writerDesc,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
-    
-    targets = replicator.chooseTarget(filename,
-                                      1, writerDesc, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 1, writerDesc,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
-    
-    targets = replicator.chooseTarget(filename,
-                                      2, writerDesc, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 2, writerDesc,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
-    
-    targets = replicator.chooseTarget(filename,
-                                      3, writerDesc, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 3, writerDesc,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));    
   }
+
+  private boolean containsWithinRange(DatanodeDescriptor target,
+      DatanodeDescriptor[] nodes, int startIndex, int endIndex) {
+    assert startIndex >= 0 && startIndex < nodes.length;
+    assert endIndex >= startIndex && endIndex < nodes.length;
+    for (int i = startIndex; i <= endIndex; i++) {
+      if (nodes[i].equals(target)) {
+        return true;
+      }
+    }
+    return false;
+  }
+  
+  @Test
+  public void testChooseTargetWithStaleNodes() throws Exception {
+    // Enable avoidng writing to stale datanodes
+    namenode.getNamesystem().getBlockManager().getDatanodeManager()
+        .setAvoidStaleDataNodesForWrite(true);
+    // Set dataNodes[0] as stale
+    dataNodes[0].setLastUpdate(Time.now() - staleInterval - 1);
+
+    DatanodeDescriptor[] targets;
+    // We set the datanode[0] as stale, thus should choose datanode[1] since
+    // datanode[1] is on the same rack with datanode[0] (writer)
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertEquals(targets[0], dataNodes[1]);
+
+    HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
+    excludedNodes.put(dataNodes[1], dataNodes[1]);
+    List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
+    BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
+    targets = chooseTarget(repl, 1, dataNodes[0], chosenNodes, excludedNodes,
+        BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
+    
+    // reset
+    namenode.getNamesystem().getBlockManager().getDatanodeManager()
+        .setAvoidStaleDataNodesForWrite(false);
+    dataNodes[0].setLastUpdate(Time.now());
+  }
+
+  /**
+   * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
+   * and when the number of replicas is less or equal to 3, all the healthy
+   * datanodes should be returned by the chooseTarget method. When the number 
+   * of replicas is 4, a stale node should be included.
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testChooseTargetWithHalfStaleNodes() throws Exception {
+    // Enable stale datanodes checking
+    namenode.getNamesystem().getBlockManager().getDatanodeManager()
+        .setAvoidStaleDataNodesForWrite(true);
+    // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
+    for (int i = 0; i < 3; i++) {
+      dataNodes[i].setLastUpdate(Time.now() - staleInterval - 1);
+    }
+
+    DatanodeDescriptor[] targets;
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 0);
+
+    // We set the datanode[0] as stale, thus should choose datanode[1]
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 1);
+    assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 2);
+    assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
+    assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));
+
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 3);
+    assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
+    assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
+    assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));
+
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+    assertEquals(targets.length, 4);
+    assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
+    assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
+    assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));
+
+    // reset
+    namenode.getNamesystem().getBlockManager().getDatanodeManager()
+        .setAvoidStaleDataNodesForWrite(false);
+    for (int i = 0; i < dataNodes.length; i++) {
+      dataNodes[i].setLastUpdate(Time.now());
+    }
+  }
+
+  @Test
+  public void testChooseTargetWithMoreThanHalfStaleNodes() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+    conf.setBoolean(
+        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
+    String[] hosts = new String[]{"host1", "host2", "host3", 
+                                  "host4", "host5", "host6"};
+    String[] racks = new String[]{"/d1/r1", "/d1/r1", "/d1/r2", 
+                                  "/d1/r2", "/d2/r3", "/d2/r3"};
+    MiniDFSCluster miniCluster = new MiniDFSCluster.Builder(conf).racks(racks)
+        .hosts(hosts).numDataNodes(hosts.length).build();
+    miniCluster.waitActive();
+    
+    try {
+      // Step 1. Make two datanodes as stale, check whether the 
+      // avoidStaleDataNodesForWrite calculation is correct.
+      // First stop the heartbeat of host1 and host2
+      for (int i = 0; i < 2; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+        miniCluster.getNameNode().getNamesystem().getBlockManager()
+            .getDatanodeManager().getDatanode(dn.getDatanodeId())
+            .setLastUpdate(Time.now() - staleInterval - 1);
+      }
+      // Instead of waiting, explicitly call heartbeatCheck to 
+      // let heartbeat manager to detect stale nodes
+      miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+      int numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getBlockManager().getDatanodeManager().getNumStaleNodes();
+      assertEquals(numStaleNodes, 2);
+      assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+      // Call chooseTarget
+      DatanodeDescriptor staleNodeInfo = miniCluster.getNameNode()
+          .getNamesystem().getBlockManager().getDatanodeManager()
+          .getDatanode(miniCluster.getDataNodes().get(0).getDatanodeId());
+      BlockPlacementPolicy replicator = miniCluster.getNameNode()
+          .getNamesystem().getBlockManager().getBlockPlacementPolicy();
+      DatanodeDescriptor[] targets = replicator.chooseTarget(filename, 3,
+          staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+      
+      // Step 2. Set more than half of the datanodes as stale
+      for (int i = 0; i < 4; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+        miniCluster.getNameNode().getNamesystem().getBlockManager()
+            .getDatanodeManager().getDatanode(dn.getDatanodeId())
+            .setLastUpdate(Time.now() - staleInterval - 1);
+      }
+      // Explicitly call heartbeatCheck
+      miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+      numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getBlockManager().getDatanodeManager().getNumStaleNodes();
+      assertEquals(numStaleNodes, 4);
+      // According to our strategy, stale datanodes will be included for writing
+      // to avoid hotspots
+      assertFalse(miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());     
+      // Call chooseTarget
+      targets = replicator.chooseTarget(filename, 3,
+          staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertTrue(cluster.isOnSameRack(targets[0], staleNodeInfo));
+      
+      // Step 3. Set 2 stale datanodes back to healthy nodes, 
+      // still have 2 stale nodes
+      for (int i = 2; i < 4; i++) {
+        DataNode dn = miniCluster.getDataNodes().get(i);
+        DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
+        miniCluster.getNameNode().getNamesystem().getBlockManager()
+            .getDatanodeManager().getDatanode(dn.getDatanodeId())
+            .setLastUpdate(Time.now());
+      }
+      // Explicitly call heartbeatCheck
+      miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+      numStaleNodes = miniCluster.getNameNode().getNamesystem()
+          .getBlockManager().getDatanodeManager().getNumStaleNodes();
+      assertEquals(numStaleNodes, 2);
+      assertTrue(miniCluster.getNameNode().getNamesystem().getBlockManager()
+          .getDatanodeManager().isAvoidingStaleDataNodesForWrite());
+      // Call chooseTarget
+      targets = replicator.chooseTarget(filename, 3,
+          staleNodeInfo, new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
+      assertEquals(targets.length, 3);
+      assertFalse(cluster.isOnSameRack(targets[0], staleNodeInfo));
+    } finally {
+      miniCluster.shutdown();
+    }
+  }
   
   /**
    * This testcase tests re-replication, when dataNodes[0] is already chosen.
@@ -490,8 +694,8 @@ public class TestReplicationPolicy {
         .format(true).build();
     try {
       cluster.waitActive();
-      final UnderReplicatedBlocks neededReplications = (UnderReplicatedBlocks) cluster
-          .getNameNode().getNamesystem().getBlockManager().neededReplications;
+      final UnderReplicatedBlocks neededReplications = cluster.getNameNode()
+          .getNamesystem().getBlockManager().neededReplications;
       for (int i = 0; i < 100; i++) {
         // Adding the blocks directly to normal priority
         neededReplications.add(new Block(random.nextLong()), 2, 0, 3);
@@ -529,10 +733,10 @@ public class TestReplicationPolicy {
       // Adding QUEUE_VERY_UNDER_REPLICATED block
       underReplicatedBlocks.add(new Block(random.nextLong()), 2, 0, 7);
 
-      // Adding QUEUE_UNDER_REPLICATED block
+      // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
       underReplicatedBlocks.add(new Block(random.nextLong()), 6, 0, 6);
 
-      // Adding QUEUE_REPLICAS_BADLY_DISTRIBUTED block
+      // Adding QUEUE_UNDER_REPLICATED block
       underReplicatedBlocks.add(new Block(random.nextLong()), 5, 0, 6);
 
       // Adding QUEUE_WITH_CORRUPT_BLOCKS block
@@ -618,6 +822,11 @@ public class TestReplicationPolicy {
     dataNodes[5].setRemaining(1*1024*1024);
     replicaNodeList.add(dataNodes[5]);
     
+    // Refresh the last update time for all the datanodes
+    for (int i = 0; i < dataNodes.length; i++) {
+      dataNodes[i].setLastUpdate(Time.now());
+    }
+    
     List<DatanodeDescriptor> first = new ArrayList<DatanodeDescriptor>();
     List<DatanodeDescriptor> second = new ArrayList<DatanodeDescriptor>();
     replicator.splitNodesWithRack(

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Tue Oct 16 00:02:55 2012
@@ -114,31 +114,31 @@ public class TestReplicationPolicyWithNo
         HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
 
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
 
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
 
-    targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
 
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameNodeGroup(targets[1], targets[2]));
 
-    targets = replicator.chooseTarget(filename,
-                                      4, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
@@ -220,30 +220,30 @@ public class TestReplicationPolicyWithNo
         (HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
 
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
 
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[1]);
 
-    targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[1]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
 
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[1]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
 
-    targets = replicator.chooseTarget(filename,
-                                      4, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[1]);
     assertTrue(cluster.isNodeGroupAware());
@@ -275,23 +275,23 @@ public class TestReplicationPolicyWithNo
     }
 
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
 
-    targets = replicator.chooseTarget(filename,
-                                      1, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
 
-    targets = replicator.chooseTarget(filename,
-                                      2, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
 
-    targets = replicator.chooseTarget(filename,
-                                      3, dataNodes[0], BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     for(int i=0; i<3; i++) {
       assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
@@ -313,21 +313,21 @@ public class TestReplicationPolicyWithNo
   public void testChooseTarget5() throws Exception {
     setupDataNodeCapacity();
     DatanodeDescriptor[] targets;
-    targets = replicator.chooseTarget(filename,
-                                      0, NODE, BLOCK_SIZE);
+    targets = replicator.chooseTarget(filename, 0, NODE,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 0);
-    
-    targets = replicator.chooseTarget(filename,
-                                      1, NODE, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 1, NODE,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 1);
-    
-    targets = replicator.chooseTarget(filename,
-                                      2, NODE, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 2, NODE,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
-    
-    targets = replicator.chooseTarget(filename,
-                                      3, NODE, BLOCK_SIZE);
+
+    targets = replicator.chooseTarget(filename, 3, NODE,
+        new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestJspHelper.java Tue Oct 16 00:02:55 2012
@@ -22,6 +22,7 @@ import static org.mockito.Mockito.when;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 
 import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
@@ -29,7 +30,9 @@ import javax.servlet.http.HttpServletReq
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
 import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -399,4 +402,43 @@ public class TestJspHelper {
                           ugi.getAuthenticationMethod());
     }
   }
+
+  @Test
+  public void testSortNodeByFields() throws Exception {
+    DatanodeID dnId1 = new DatanodeID("127.0.0.1", "localhost1", "storage1",
+        1234, 2345, 3456);
+    DatanodeID dnId2 = new DatanodeID("127.0.0.2", "localhost2", "storage2",
+        1235, 2346, 3457);
+    DatanodeDescriptor dnDesc1 = new DatanodeDescriptor(dnId1, "rack1", 1024,
+        100, 924, 100, 10, 2);
+    DatanodeDescriptor dnDesc2 = new DatanodeDescriptor(dnId2, "rack2", 2500,
+        200, 1848, 200, 20, 1);
+    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    live.add(dnDesc1);
+    live.add(dnDesc2);
+
+    // Test sorting by failed volumes
+    JspHelper.sortNodeList(live, "volfails", "ASC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+    JspHelper.sortNodeList(live, "volfails", "DSC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+
+    // Test sorting by Blockpool used
+    JspHelper.sortNodeList(live, "bpused", "ASC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+    JspHelper.sortNodeList(live, "bpused", "DSC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+
+    // Test sorting by Percentage Blockpool used
+    JspHelper.sortNodeList(live, "pcbpused", "ASC");
+    Assert.assertEquals(dnDesc2, live.get(0));
+    Assert.assertEquals(dnDesc1, live.get(1));
+    JspHelper.sortNodeList(live, "pcbpused", "DSC");
+    Assert.assertEquals(dnDesc1, live.get(0));
+    Assert.assertEquals(dnDesc2, live.get(1));
+  }
 }

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Tue Oct 16 00:02:55 2012
@@ -114,6 +114,12 @@ public class DataNodeTestUtils {  
         dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
   }
   
+  public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
+    DataBlockScanner scanner = dn.getBlockScanner();
+    BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
+    bpScanner.verifyBlock(b);
+  }
+  
   public static void shutdownBlockScanner(DataNode dn) {
     if (dn.blockScanner != null) {
       dn.blockScanner.shutdown();

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Tue Oct 16 00:02:55 2012
@@ -89,7 +89,7 @@ public class TestBlockReplacement {
   }
   
   @Test
-  public void testBlockReplacement() throws IOException, TimeoutException {
+  public void testBlockReplacement() throws Exception {
     final Configuration CONF = new HdfsConfiguration();
     final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
     final String[] NEW_RACKS = {"/RACK2"};

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Tue Oct 16 00:02:55 2012
@@ -27,6 +27,9 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.Random;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -65,7 +68,7 @@ import org.mockito.invocation.Invocation
 
 /**
  * This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
  * report is happening
  */
 public class TestBlockReport {
@@ -316,7 +319,7 @@ public class TestBlockReport {
    * @throws IOException in case of an error
    */
   @Test
-  public void blockReport_06() throws IOException {
+  public void blockReport_06() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -353,7 +356,7 @@ public class TestBlockReport {
   @Test
   // Currently this test is failing as expected 'cause the correct behavior is
   // not yet implemented (9/15/09)
-  public void blockReport_07() throws IOException {
+  public void blockReport_07() throws Exception {
     final String METHOD_NAME = GenericTestUtils.getMethodName();
     Path filePath = new Path("/" + METHOD_NAME + ".dat");
     final int DN_N1 = DN_N0 + 1;
@@ -670,21 +673,24 @@ public class TestBlockReport {
   }
 
   private void startDNandWait(Path filePath, boolean waitReplicas) 
-    throws IOException {
-    if(LOG.isDebugEnabled()) {
+      throws IOException, InterruptedException, TimeoutException {
+    if (LOG.isDebugEnabled()) {
       LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
     }
     cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.waitClusterUp();
     ArrayList<DataNode> datanodes = cluster.getDataNodes();
     assertEquals(datanodes.size(), 2);
 
-    if(LOG.isDebugEnabled()) {
+    if (LOG.isDebugEnabled()) {
       int lastDn = datanodes.size() - 1;
       LOG.debug("New datanode "
           + cluster.getDataNodes().get(lastDn).getDisplayName() 
           + " has been started");
     }
-    if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    if (waitReplicas) {
+      DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+    }
   }
 
   private ArrayList<Block> prepareForRide(final Path filePath,
@@ -836,8 +842,9 @@ public class TestBlockReport {
     public void run() {
       try {
         startDNandWait(filePath, true);
-      } catch (IOException e) {
-        LOG.warn("Shouldn't happen", e);
+      } catch (Exception e) {
+        e.printStackTrace();
+        Assert.fail("Failed to start BlockChecker: " + e);
       }
     }
   }

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java Tue Oct 16 00:02:55 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
@@ -216,48 +217,62 @@ public class TestDataNodeMultipleRegistr
       LOG.info("dn bpos len (still should be 3):" + bposs.length);
       Assert.assertEquals("should've registered with three namenodes", 3, bposs.length);
     } finally {
-      if(cluster != null) 
-        cluster.shutdown();
+      cluster.shutdown();
     }
   }
 
   @Test
   public void testMiniDFSClusterWithMultipleNN() throws IOException {
-
     Configuration conf = new HdfsConfiguration();
     // start Federated cluster and add a node.
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(2))
       .build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
     
     // add a node
-    cluster.addNameNode(conf, 0);
-    Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
-    cluster.shutdown();
+    try {
+      Assert.assertNotNull(cluster);
+      cluster.waitActive();
+      Assert.assertEquals("(1)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+
+      cluster.addNameNode(conf, 0);
+      Assert.assertEquals("(1)Should be 3 namenodes", 3, cluster.getNumNameNodes());
+    } catch (IOException ioe) {
+      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+    } finally {
+      cluster.shutdown();
+    }
         
     // 2. start with Federation flag set
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(1))
       .build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
     
-    // add a node
-    cluster.addNameNode(conf, 0);
-    Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
-    cluster.shutdown();
+    try {
+      Assert.assertNotNull(cluster);
+      cluster.waitActive();
+      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+    
+      // add a node
+      cluster.addNameNode(conf, 0);
+      Assert.assertEquals("(2)Should be 2 namenodes", 2, cluster.getNumNameNodes());
+    } catch (IOException ioe) {
+      Assert.fail("Failed to add NN to cluster:" + StringUtils.stringifyException(ioe));
+    } finally {
+      cluster.shutdown();
+    }
 
     // 3. start non-federated
     conf = new HdfsConfiguration();
     cluster = new MiniDFSCluster.Builder(conf).build();
-    Assert.assertNotNull(cluster);
-    Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
     
     // add a node
     try {
+      cluster.waitActive();
+      Assert.assertNotNull(cluster);
+      Assert.assertEquals("(2)Should be 1 namenodes", 1, cluster.getNumNameNodes());
+
       cluster.addNameNode(conf, 9929);
       Assert.fail("shouldn't be able to add another NN to non federated cluster");
     } catch (IOException e) {
@@ -268,6 +283,4 @@ public class TestDataNodeMultipleRegistr
       cluster.shutdown();
     }
   }
-      
-
 }

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Tue Oct 16 00:02:55 2012
@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
    * failure if the configuration parameter allows this.
    */
   @Test
-  public void testVolumeFailure() throws IOException {
+  public void testVolumeFailure() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     dataDir = new File(cluster.getDataDirectory());
     System.out.println("Data dir: is " +  dataDir.getPath());

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java Tue Oct 16 00:02:55 2012
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.da
 
 import java.io.IOException;
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -31,7 +34,13 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner;
+import static org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.SLEEP_PERIOD_MS;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
 import org.junit.Test;
+import org.junit.Ignore;
+import static org.junit.Assert.fail;
 
 
 public class TestMultipleNNDataBlockScanner {
@@ -166,4 +175,75 @@ public class TestMultipleNNDataBlockScan
       cluster.shutdown();
     }
   }
+  
+  @Test
+  public void test2NNBlockRescanInterval() throws IOException {
+    ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
+        .build();
+
+    try {
+      FileSystem fs = cluster.getFileSystem(1);
+      Path file2 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);
+
+      fs = cluster.getFileSystem(0);
+      Path file1 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+      for (int i = 0; i < 8; i++) {
+        LOG.info("Verifying that the blockscanner scans exactly once");
+        waitAndScanBlocks(1, 1);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * HDFS-3828: DN rescans blocks too frequently
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testBlockRescanInterval() throws IOException {
+    ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).build();
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      Path file1 = new Path("/test/testBlockScanInterval");
+      DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+      for (int i = 0; i < 4; i++) {
+        LOG.info("Verifying that the blockscanner scans exactly once");
+        waitAndScanBlocks(1, 1);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  void waitAndScanBlocks(long scansLastRun, long scansTotal)
+      throws IOException {
+    // DataBlockScanner will run for every 5 seconds so we are checking for
+    // every 5 seconds
+    int n = 5;
+    String bpid = cluster.getNamesystem(0).getBlockPoolId();
+    DataNode dn = cluster.getDataNodes().get(0);
+    long blocksScanned, total;
+    do {
+      try {
+        Thread.sleep(SLEEP_PERIOD_MS);
+      } catch (InterruptedException e) {
+        fail("Interrupted: " + e);
+      }
+      blocksScanned = dn.blockScanner.getBlocksScannedInLastRun(bpid);
+      total = dn.blockScanner.getTotalScans(bpid);
+      LOG.info("bpid = " + bpid + " blocksScanned = " + blocksScanned + " total=" + total);
+    } while (n-- > 0 && (blocksScanned != scansLastRun || scansTotal != total));
+    Assert.assertEquals(scansTotal, total);
+    Assert.assertEquals(scansLastRun, blocksScanned);
+  }
 }

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java Tue Oct 16 00:02:55 2012
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
   }
 
   // test recovering unlinked tmp replicas
-  @Test public void testRecoverReplicas() throws IOException {
+  @Test public void testRecoverReplicas() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);

Modified: hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1398581&r1=1398580&r2=1398581&view=diff
==============================================================================
--- hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/MR-3902/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Oct 16 00:02:55 2012
@@ -17,11 +17,16 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.spy;
+
 import java.io.File;
 import java.io.IOException;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -61,7 +66,7 @@ public class NameNodeAdapter {
   
   public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
       boolean resolveLink) throws AccessControlException, UnresolvedLinkException,
-        StandbyException {
+        StandbyException, IOException {
     return namenode.getNamesystem().getFileInfo(src, resolveLink);
   }
   
@@ -182,6 +187,15 @@ public class NameNodeAdapter {
     }
   }
   
+  public static FSEditLogOp createMkdirOp(String path) {
+    MkdirOp op = MkdirOp.getInstance(new FSEditLogOp.OpInstanceCache())
+      .setPath(path)
+      .setTimestamp(0)
+      .setPermissionStatus(new PermissionStatus(
+              "testuser", "testgroup", FsPermission.getDefault()));
+    return op;
+  }
+  
   /**
    * @return the number of blocks marked safe by safemode, or -1
    * if safemode is not running.