You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by vi...@apache.org on 2013/10/30 23:22:22 UTC

svn commit: r1537330 [10/11] - in /hadoop/common/branches/YARN-321/hadoop-hdfs-project: ./ hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/server/ hadoop-hdfs-nfs/ hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/ hadoop-hdfs-nfs/s...

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java Wed Oct 30 22:21:59 2013
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
 import org.junit.Before;
 import org.junit.Test;
@@ -70,8 +71,7 @@ public class TestBPOfferService {
       TestBPOfferService.class);
   private static final ExtendedBlock FAKE_BLOCK =
     new ExtendedBlock(FAKE_BPID, 12345L);
-  private static final String TEST_BUILD_DATA = System.getProperty(
-    "test.build.data", "build/test/data");
+  private static final File TEST_BUILD_DATA = PathUtils.getTestDir(TestBPOfferService.class);
 
   static {
     ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDatanodeJsp.java Wed Oct 30 22:21:59 2013
@@ -61,9 +61,10 @@ public class TestDatanodeJsp {
     
     InetSocketAddress nnIpcAddress = cluster.getNameNode().getNameNodeAddress();
     InetSocketAddress nnHttpAddress = cluster.getNameNode().getHttpAddress();
-    int dnInfoPort = cluster.getDataNodes().get(0).getInfoPort();
-    
-    URL url = new URL("http://localhost:" + dnInfoPort + "/"
+    String base = JspHelper.Url.url("http", cluster.getDataNodes().get(0)
+        .getDatanodeId());
+
+    URL url = new URL(base + "/"
         + "browseDirectory.jsp" + JspHelper.getUrlParam("dir", 
             URLEncoder.encode(testPath.toString(), "UTF-8"), true)
         + JspHelper.getUrlParam("namenodeInfoPort", Integer
@@ -86,18 +87,22 @@ public class TestDatanodeJsp {
     // check whether able to 'Go Back to File View' after tailing the file
     regex = "<a.+href=\"(.+?)\">Go\\s*Back\\s*to\\s*File\\s*View\\<\\/a\\>";
     assertFileContents(regex, "Go Back to File View");
+
+    regex = "<a href=\"///" + nnHttpAddress.getHostName() + ":" +
+      nnHttpAddress.getPort() + "/dfshealth.jsp\">Go back to DFS home</a>";
+    assertTrue("page should generate DFS home scheme without explicit scheme", viewFilePage.contains(regex));
   }
   
   private static void assertFileContents(String regex, String text)
       throws IOException {
     Pattern compile = Pattern.compile(regex);
     Matcher matcher = compile.matcher(viewFilePage);
-    URL hyperlink = null;
     if (matcher.find()) {
       // got hyperlink for Tail this file
-      hyperlink = new URL(matcher.group(1));
+      String u = matcher.group(1);
+      String urlString = u.startsWith("///") ? ("http://" + u.substring(3)) : u;
       viewFilePage = StringEscapeUtils.unescapeHtml(DFSTestUtil
-          .urlGet(hyperlink));
+          .urlGet(new URL(urlString)));
       assertTrue("page should show preview of file contents", viewFilePage
           .contains(FILE_DATA));
     } else {
@@ -166,6 +171,7 @@ public class TestDatanodeJsp {
     Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
         .when(reqMock).getParameter("nnaddr");
     Mockito.doReturn(testFile.toString()).when(reqMock).getPathInfo();
+    Mockito.doReturn("http").when(reqMock).getScheme();
   }
 
   static Path writeFile(FileSystem fs, Path f) throws IOException {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Oct 30 22:21:59 2013
@@ -17,14 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.EnumSet;
-import java.util.List;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -33,40 +25,28 @@ import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.*;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
-import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
-import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
-import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
-import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.server.protocol.*;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.security.Groups;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.Time;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.VersionInfo;
+import org.apache.hadoop.util.*;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.List;
+
 /**
  * Main class for a series of name-node benchmarks.
  * 
@@ -839,6 +819,7 @@ public class NNThroughputBenchmark imple
               DNS.getDefaultHost("default", "default"),
               "", getNodePort(dnIdx),
               DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+              DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
               DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
           new DataStorage(nsInfo, ""),
           new ExportedBlockKeys(), VersionInfo.getVersion());
@@ -1305,7 +1286,7 @@ public class NNThroughputBenchmark imple
 
   /**
    * Main method of the benchmark.
-   * @param args command line parameters
+   * @param aArgs command line parameters
    */
   @Override // Tool
   public int run(String[] aArgs) throws Exception {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java Wed Oct 30 22:21:59 2013
@@ -20,12 +20,13 @@ package org.apache.hadoop.hdfs.server.na
 
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.doAnswer;
 import static org.mockito.Mockito.spy;
 
 import java.lang.reflect.Field;
 import java.util.EnumSet;
-import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -119,7 +120,7 @@ public class TestAddBlockRetry {
         return ret;
       }
     }).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(),
-        Mockito.<DatanodeDescriptor>any(), Mockito.<HashMap<Node, Node>>any(),
+        Mockito.<DatanodeDescriptor>any(), Mockito.<HashSet<Node>>any(),
         Mockito.anyLong(), Mockito.<List<String>>any());
 
     // create file
@@ -139,4 +140,33 @@ public class TestAddBlockRetry {
     assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
     assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
   }
+
+  /*
+   * Since NameNode will not persist any locations of the block, addBlock()
+   * retry call after restart NN should re-select the locations and return to
+   * client. refer HDFS-5257
+   */
+  @Test
+  public void testAddBlockRetryShouldReturnBlockWithLocations()
+      throws Exception {
+    final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
+    NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
+    // create file
+    nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
+        new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
+        (short) 3, 1024);
+    // start first addBlock()
+    LOG.info("Starting first addBlock for " + src);
+    LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
+        INodeId.GRANDFATHER_INODE_ID, null);
+    assertTrue("Block locations should be present",
+        lb1.getLocations().length > 0);
+
+    cluster.restartNameNode();
+    nameNodeRpc = cluster.getNameNodeRpc();
+    LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
+        INodeId.GRANDFATHER_INODE_ID, null);
+    assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
+    assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
+  }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java Wed Oct 30 22:21:59 2013
@@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -54,37 +55,34 @@ public class TestAllowFormat {
   public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
   private static final Log LOG =
     LogFactory.getLog(TestAllowFormat.class.getName());
+  private static final File DFS_BASE_DIR = new File(PathUtils.getTestDir(TestAllowFormat.class), "dfs");
   private static Configuration config;
   private static MiniDFSCluster cluster = null;
-  private static File hdfsDir=null;
 
   @BeforeClass
   public static void setUp() throws Exception {
     config = new Configuration();
-    String baseDir = System.getProperty("test.build.data", "build/test/data");
-
-    hdfsDir = new File(baseDir, "dfs");
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not delete hdfs directory '" + hdfsDir +
+    if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
+      throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
                             "'");
     }
-
+    
     // Test has multiple name directories.
     // Format should not really prompt us if one of the directories exist,
     // but is empty. So in case the test hangs on an input, it means something
     // could be wrong in the format prompting code. (HDFS-1636)
-    LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
-    File nameDir1 = new File(hdfsDir, "name1");
-    File nameDir2 = new File(hdfsDir, "name2");
+    LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
+    File nameDir1 = new File(DFS_BASE_DIR, "name1");
+    File nameDir2 = new File(DFS_BASE_DIR, "name2");
 
     // To test multiple directory handling, we pre-create one of the name directories.
     nameDir1.mkdirs();
 
     // Set multiple name directories.
     config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
-    config.set(DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
+    config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());
 
-    config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
+    config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());
 
     FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
   }
@@ -99,9 +97,9 @@ public class TestAllowFormat {
       LOG.info("Stopping mini cluster");
     }
     
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+    if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
       throw new IOException("Could not delete hdfs directory in tearDown '"
-                            + hdfsDir + "'");
+                            + DFS_BASE_DIR + "'");
     }	
   }
 
@@ -170,7 +168,7 @@ public class TestAllowFormat {
     HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
 
     conf.set(DFS_NAMENODE_NAME_DIR_KEY,
-        new File(hdfsDir, "name").getAbsolutePath());
+        new File(DFS_BASE_DIR, "name").getAbsolutePath());
     conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
     conf.set(DFSUtil.addKeySuffixes(
         DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Wed Oct 30 22:21:59 2013
@@ -39,12 +39,13 @@ import org.apache.hadoop.fs.permission.F
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.HftpFileSystem;
 import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -58,8 +59,7 @@ import org.junit.Test;
  * A JUnit test that audit logs are generated
  */
 public class TestAuditLogs {
-  static final String auditLogFile = System.getProperty("test.build.dir",
-      "build/test") + "/audit.log";
+  static final String auditLogFile = PathUtils.getTestDirName(TestAuditLogs.class) + "/TestAuditLogs-audit.log";
   
   // Pattern for: 
   // allowed=(true|false) ugi=name ip=/address cmd={cmd} src={path} dst=null perm=null

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java Wed Oct 30 22:21:59 2013
@@ -96,6 +96,8 @@ public class TestBackupNode {
         "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
     c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
         "127.0.0.1:0");
+    c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
+            "127.0.0.1:0");
 
     BackupNode bn = (BackupNode)NameNode.createNameNode(
         new String[]{startupOpt.getName()}, c);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java Wed Oct 30 22:21:59 2013
@@ -40,7 +40,6 @@ import java.util.List;
 import java.util.Random;
 
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.io.filefilter.FileFilterUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -78,6 +77,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.StringUtils;
@@ -207,7 +207,7 @@ public class TestCheckpoint {
     ArrayList<URI> fsImageDirs = new ArrayList<URI>();
     ArrayList<URI> editsDirs = new ArrayList<URI>();
     File filePath =
-      new File(System.getProperty("test.build.data","/tmp"), "storageDirToCheck");
+      new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
     assertTrue("Couldn't create directory storageDirToCheck",
                filePath.exists() || filePath.mkdirs());
     fsImageDirs.add(filePath.toURI());
@@ -1226,7 +1226,6 @@ public class TestCheckpoint {
   }
   
   /* Test case to test CheckpointSignature */
-  @SuppressWarnings("deprecation")
   @Test
   public void testCheckpointSignature() throws IOException {
 
@@ -1565,12 +1564,65 @@ public class TestCheckpoint {
       Mockito.reset(faultInjector);
     }
   }
+  
+  /**
+   * Test that a fault while downloading edits the first time after the 2NN
+   * starts up does not prevent future checkpointing.
+   */
+  @Test(timeout = 30000)
+  public void testEditFailureOnFirstCheckpoint() throws IOException {
+    Configuration conf = new HdfsConfiguration();
+    SecondaryNameNode secondary = null;
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+          .build();
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      fs.mkdirs(new Path("test-file-1"));
+      
+      // Make sure the on-disk fsimage on the NN has txid > 0.
+      FSNamesystem fsns = cluster.getNamesystem();
+      fsns.enterSafeMode(false);
+      fsns.saveNamespace();
+      fsns.leaveSafeMode();
+      
+      secondary = startSecondaryNameNode(conf);
+
+      // Cause edit rename to fail during next checkpoint
+      Mockito.doThrow(new IOException("Injecting failure before edit rename"))
+          .when(faultInjector).beforeEditsRename();
+      
+      try {
+        secondary.doCheckpoint();
+        fail("Fault injection failed.");
+      } catch (IOException ioe) {
+        GenericTestUtils.assertExceptionContains(
+            "Injecting failure before edit rename", ioe);
+      }
+      Mockito.reset(faultInjector);
+      
+      // Next checkpoint should succeed
+      secondary.doCheckpoint();
+    } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
+      if (fs != null) {
+        fs.close();
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      Mockito.reset(faultInjector);
+    }
+  }
 
   /**
    * Test that the secondary namenode correctly deletes temporary edits
    * on startup.
    */
-
   @Test(timeout = 30000)
   public void testDeleteTemporaryEditsOnStartup() throws IOException {
     Configuration conf = new HdfsConfiguration();
@@ -1863,9 +1915,11 @@ public class TestCheckpoint {
       }
       
       // Start a new NN with the same host/port.
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-          .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true)
-          .build();
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0)
+          .nameNodePort(origPort)
+          .nameNodeHttpPort(origHttpPort)
+          .format(true).build();
 
       try {
         secondary.doCheckpoint();
@@ -1946,7 +2000,6 @@ public class TestCheckpoint {
    * Test that, if a storage directory is failed when a checkpoint occurs,
    * the non-failed storage directory receives the checkpoint.
    */
-  @SuppressWarnings("deprecation")
   @Test
   public void testCheckpointWithFailedStorageDir() throws Exception {
     MiniDFSCluster cluster = null;
@@ -2009,7 +2062,6 @@ public class TestCheckpoint {
    * should function correctly.
    * @throws Exception
    */
-  @SuppressWarnings("deprecation")
   @Test
   public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
     MiniDFSCluster cluster = null;
@@ -2089,7 +2141,8 @@ public class TestCheckpoint {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
     
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0)
           .format(true).build();
       FileSystem fs = cluster.getFileSystem();
       secondary = startSecondaryNameNode(conf);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Wed Oct 30 22:21:59 2013
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.junit.After;
@@ -72,7 +73,7 @@ public class TestClusterId {
   public void setUp() throws IOException {
     ExitUtil.disableSystemExit();
 
-    String baseDir = System.getProperty("test.build.data", "build/test/data");
+    String baseDir = PathUtils.getTestDirName(getClass());
 
     hdfsDir = new File(baseDir, "dfs/name");
     if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java Wed Oct 30 22:21:59 2013
@@ -18,15 +18,6 @@
 
 package org.apache.hadoop.hdfs.server.namenode;
 
-import static org.junit.Assert.fail;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyBoolean;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.spy;
-
-import java.io.IOException;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -37,6 +28,13 @@ import org.apache.hadoop.hdfs.server.blo
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.junit.Test;
 
+import java.io.IOException;
+
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Mockito.*;
+
 /**
  * Verify that TestCommitBlockSynchronization is idempotent.
  */
@@ -177,7 +175,7 @@ public class TestCommitBlockSynchronizat
     Block block = new Block(blockId, length, genStamp);
     FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
     DatanodeID[] newTargets = new DatanodeID[]{
-        new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0)};
+        new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0)};
 
     ExtendedBlock lastBlock = new ExtendedBlock();
     namesystemSpy.commitBlockSynchronization(

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Oct 30 22:21:59 2013
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
@@ -96,9 +97,8 @@ public class TestEditLog {
   static final int NUM_TRANSACTIONS = 100;
   static final int NUM_THREADS = 100;
   
-  static final File TEST_DIR = new File(
-    System.getProperty("test.build.data","build/test/data"));
-
+  static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class);
+  
   /** An edits log with 3 edits from 0.20 - the result of
    * a fresh namesystem followed by hadoop fs -touchz /myfile */
   static final byte[] HADOOP20_SOME_EDITS =
@@ -569,6 +569,7 @@ public class TestEditLog {
       fail("should not be able to start");
     } catch (IOException e) {
       // expected
+      assertNotNull("Cause of exception should be ChecksumException", e.getCause());
       assertEquals("Cause of exception should be ChecksumException",
           ChecksumException.class, e.getCause().getClass());
     }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java Wed Oct 30 22:21:59 2013
@@ -24,6 +24,7 @@ import static org.junit.Assert.assertTru
 import java.io.File;
 import java.io.IOException;
 
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.After;
@@ -34,8 +35,7 @@ import org.junit.Test;
  * Test the EditLogFileOutputStream
  */
 public class TestEditLogFileOutputStream {
-  private final static File TEST_DIR =
-      new File(System.getProperty("test.build.data", "/tmp"));
+  private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class);
   private static final File TEST_EDITS =
       new File(TEST_DIR, "testEditLogFileOutput.log");
   final static int MIN_PREALLOCATION_LENGTH =

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java Wed Oct 30 22:21:59 2013
@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.junit.After;
 import org.junit.Before;
@@ -192,8 +193,7 @@ public class TestEditLogJournalFailures 
     Configuration conf = new HdfsConfiguration();
     String[] nameDirs = new String[4];
     for (int i = 0; i < nameDirs.length; i++) {
-      File nameDir = new File(System.getProperty("test.build.data"),
-          "name-dir" + i);
+      File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
       nameDir.mkdirs();
       nameDirs[i] = nameDir.getAbsolutePath();
     }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Oct 30 22:21:59 2013
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
 import org.junit.Test;
 
@@ -60,8 +61,7 @@ public class TestFSEditLogLoader {
     ((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
   }
   
-  private static final File TEST_DIR = new File(
-      System.getProperty("test.build.data","build/test/data"));
+  private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class);
 
   private static final int NUM_DATA_NODES = 0;
   

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java Wed Oct 30 22:21:59 2013
@@ -20,8 +20,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
 
 import java.io.File;
 import java.io.IOException;
@@ -142,4 +141,21 @@ public class TestFSNamesystem {
     assertTrue("Replication queues weren't being populated after entering "
       + "safemode 2nd time", fsn.isPopulatingReplQueues());
   }
+  
+  @Test
+  public void testFsLockFairness() throws IOException, InterruptedException{
+    Configuration conf = new Configuration();
+
+    FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
+    FSImage fsImage = Mockito.mock(FSImage.class);
+    Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
+
+    conf.setBoolean("dfs.namenode.fslock.fair", true);
+    FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
+    assertTrue(fsNamesystem.getFsLockForTests().isFair());
+    
+    conf.setBoolean("dfs.namenode.fslock.fair", false);
+    fsNamesystem = new FSNamesystem(conf, fsImage);
+    assertFalse(fsNamesystem.getFsLockForTests().isFair());
+  }  
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java Wed Oct 30 22:21:59 2013
@@ -87,24 +87,27 @@ public class TestFileJournalManager {
     EditLogInputStream elis = null;
     try {
       while ((elis = allStreams.poll()) != null) {
-        elis.skipUntil(txId);
-        while (true) {
-          FSEditLogOp op = elis.readOp();
-          if (op == null) {
-            break;
-          }
-          if (abortOnGap && (op.getTransactionId() != txId)) {
-            LOG.info("getNumberOfTransactions: detected gap at txId " +
-                fromTxId);
-            return numTransactions;
+        try {
+          elis.skipUntil(txId);
+          while (true) {
+            FSEditLogOp op = elis.readOp();
+            if (op == null) {
+              break;
+            }
+            if (abortOnGap && (op.getTransactionId() != txId)) {
+              LOG.info("getNumberOfTransactions: detected gap at txId "
+                  + fromTxId);
+              return numTransactions;
+            }
+            txId = op.getTransactionId() + 1;
+            numTransactions++;
           }
-          txId = op.getTransactionId() + 1;
-          numTransactions++;
+        } finally {
+          IOUtils.cleanup(LOG, elis);
         }
       }
     } finally {
       IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
-      IOUtils.cleanup(LOG, elis);
     }
     return numTransactions;
   }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Oct 30 22:21:59 2013
@@ -83,7 +83,6 @@ import org.apache.log4j.RollingFileAppen
 import org.junit.Test;
 
 import com.google.common.collect.Sets;
-import org.mockito.Mockito;
 import static org.mockito.Mockito.*;
 
 /**
@@ -91,7 +90,7 @@ import static org.mockito.Mockito.*;
  */
 public class TestFsck {
   static final String auditLogFile = System.getProperty("test.build.dir",
-      "build/test") + "/audit.log";
+      "build/test") + "/TestFsck-audit.log";
   
   // Pattern for: 
   // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
@@ -159,7 +158,8 @@ public class TestFsck {
       cluster.shutdown();
       
       // restart the cluster; bring up namenode but not the data nodes
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(0).format(false).build();
       outStr = runFsck(conf, 1, true, "/");
       // expect the result is corrupt
       assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
@@ -891,6 +891,80 @@ public class TestFsck {
       }
     }
   }
+  
+  /**
+   * Tests that the # of misreplaced replicas is correct
+   * @throws IOException
+   */
+  @Test
+  public void testFsckMisPlacedReplicas() throws IOException {
+    // Desired replication factor
+    final short REPL_FACTOR = 2;
+    // Number of replicas to actually start
+    short NUM_DN = 2;
+    // Number of blocks to write
+    final short NUM_BLOCKS = 3;
+    // Set a small-ish blocksize
+    final long blockSize = 512;
+    
+    String [] racks = {"/rack1", "/rack1"};
+    String [] hosts = {"host1", "host2"};
+    
+    Configuration conf = new Configuration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
+    
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem dfs = null;
+    
+    try {
+      // Startup a minicluster
+      cluster = 
+          new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
+          .racks(racks).build();
+      assertNotNull("Failed Cluster Creation", cluster);
+      cluster.waitClusterUp();
+      dfs = (DistributedFileSystem) cluster.getFileSystem();
+      assertNotNull("Failed to get FileSystem", dfs);
+      
+      // Create a file that will be intentionally under-replicated
+      final String pathString = new String("/testfile");
+      final Path path = new Path(pathString);
+      long fileLen = blockSize * NUM_BLOCKS;
+      DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
+      
+      // Create an under-replicated file
+      NameNode namenode = cluster.getNameNode();
+      NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
+          .getDatanodeManager().getNetworkTopology();
+      // Add a new node on different rack, so previous blocks' replicas 
+      // are considered to be misplaced
+      nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
+      NUM_DN++;
+      
+      Map<String,String[]> pmap = new HashMap<String, String[]>();
+      Writer result = new StringWriter();
+      PrintWriter out = new PrintWriter(result, true);
+      InetAddress remoteAddress = InetAddress.getLocalHost();
+      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
+          NUM_DN, (short)REPL_FACTOR, remoteAddress);
+      
+      // Run the fsck and check the Result
+      final HdfsFileStatus file = 
+          namenode.getRpcServer().getFileInfo(pathString);
+      assertNotNull(file);
+      Result res = new Result(conf);
+      fsck.check(pathString, file, res);
+      // check misReplicatedBlock number.
+      assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
+    } finally {
+      if(dfs != null) {
+        dfs.close();
+      }
+      if(cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 
   /** Test fsck with FileNotFound */
   @Test

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java Wed Oct 30 22:21:59 2013
@@ -31,6 +31,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -920,7 +922,55 @@ public class TestINodeFile {
       assertTrue(parentId == status.getFileId());
       
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  @Test
+  public void testFilesInGetListingOps() throws Exception {
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+      final DistributedFileSystem hdfs = cluster.getFileSystem();
+      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
+
+      hdfs.mkdirs(new Path("/tmp"));
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f1"), 0, (short) 1, 0);
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f2"), 0, (short) 1, 0);
+      DFSTestUtil.createFile(hdfs, new Path("/tmp/f3"), 0, (short) 1, 0);
+
+      DirectoryListing dl = cluster.getNameNodeRpc().getListing("/tmp",
+          HdfsFileStatus.EMPTY_NAME, false);
+      assertTrue(dl.getPartialListing().length == 3);
+
+      String f2 = new String("f2");
+      dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false);
+      assertTrue(dl.getPartialListing().length == 1);
+
+      INode f2INode = fsdir.getINode("/tmp/f2");
+      String f2InodePath = "/.reserved/.inodes/" + f2INode.getId();
+      dl = cluster.getNameNodeRpc().getListing("/tmp", f2InodePath.getBytes(),
+          false);
+      assertTrue(dl.getPartialListing().length == 1);
+
+      // Test the deleted startAfter file
+      hdfs.delete(new Path("/tmp/f2"), false);
+      try {
+        dl = cluster.getNameNodeRpc().getListing("/tmp",
+            f2InodePath.getBytes(), false);
+        fail("Didn't get exception for the deleted startAfter token.");
+      } catch (IOException e) {
+        assertTrue(e instanceof DirectoryListingStartAfterNotFoundException);
+      }
+
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
     }
   }
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java Wed Oct 30 22:21:59 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.test.PathUtils;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -62,7 +63,7 @@ public class TestNameEditsConfigs {
 
   short replication = 3;
   private File base_dir = new File(
-      System.getProperty("test.build.data", "build/test/data"), "dfs/");
+      PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs");
 
   @Before
   public void setUp() throws IOException {
@@ -70,7 +71,7 @@ public class TestNameEditsConfigs {
       throw new IOException("Cannot remove directory " + base_dir);
     }
   }
-
+  
   private void writeFile(FileSystem fileSys, Path name, int repl)
       throws IOException {
     FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeJspHelper.java Wed Oct 30 22:21:59 2013
@@ -17,44 +17,71 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-
-import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.*;
-import static org.mockito.Mockito.*;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_EDITS;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.LOADING_FSIMAGE;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAFEMODE;
+import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.SAVING_CHECKPOINT;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Mockito.atLeastOnce;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.List;
+import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.web.resources.UserParam;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
+import org.apache.hadoop.util.VersionInfo;
+import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Before;
+import org.junit.BeforeClass;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+import org.znerd.xmlenc.XMLOutputter;
 
-public class TestNameNodeJspHelper {
+import com.google.common.collect.ImmutableSet;
 
-  private MiniDFSCluster cluster = null;
-  Configuration conf = null;
+public class TestNameNodeJspHelper {
 
-  @Before
-  public void setUp() throws Exception {
+  private static final int DATA_NODES_AMOUNT = 2;
+  
+  private static MiniDFSCluster cluster;
+  private static Configuration conf;
+  private static final String NAMENODE_ATTRIBUTE_KEY = "name.node";  
+  
+  @BeforeClass
+  public static void setUp() throws Exception {
     conf = new HdfsConfiguration();
-    cluster  = new MiniDFSCluster.Builder(conf).build();
-    cluster.waitActive();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(DATA_NODES_AMOUNT).build();
+    cluster.waitClusterUp();
   }
 
-  @After
-  public void tearDown() throws Exception {
+  @AfterClass
+  public static void tearDown() throws Exception {
     if (cluster != null)
       cluster.shutdown();
   }
@@ -66,23 +93,23 @@ public class TestNameNodeJspHelper {
     UserGroupInformation ugi = UserGroupInformation.createRemoteUser("auser");
     String tokenString = NamenodeJspHelper.getDelegationToken(nn, request,
         conf, ugi);
-    //tokenString returned must be null because security is disabled
+    // tokenString returned must be null because security is disabled
     Assert.assertEquals(null, tokenString);
   }
-  
+
   @Test
-  public void  tesSecurityModeText() {
+  public void testSecurityModeText() {
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
     UserGroupInformation.setConfiguration(conf);
     String securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be ON", 
+    Assert.assertTrue("security mode doesn't match. Should be ON",
         securityOnOff.contains("ON"));
-    //Security is enabled
+    // Security is enabled
     conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "simple");
     UserGroupInformation.setConfiguration(conf);
-    
+
     securityOnOff = NamenodeJspHelper.getSecurityModeText();
-    Assert.assertTrue("security mode doesn't match. Should be OFF", 
+    Assert.assertTrue("security mode doesn't match. Should be OFF",
         securityOnOff.contains("OFF"));
   }
 
@@ -107,9 +134,83 @@ public class TestNameNodeJspHelper {
     Assert.assertTrue(containsMatch(contents, SAFEMODE.getDescription()));
   }
 
+  @Test
+  public void testGetRollingUpgradeText() {
+    Assert.assertEquals("", NamenodeJspHelper.getRollingUpgradeText(null));
+  }
+
+  /**
+   * Tests for non-null, non-empty NameNode label.
+   */
+  @Test
+  public void testGetNameNodeLabel() {
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests for non-null, non-empty NameNode label when called before
+   * initialization of the NameNode RPC server.
+   */
+  @Test
+  public void testGetNameNodeLabelNullRpcServer() {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getRpcServer()).thenReturn(null);
+    String nameNodeLabel = NamenodeJspHelper.getNameNodeLabel(
+      cluster.getNameNode());
+    Assert.assertNotNull(nameNodeLabel);
+    Assert.assertFalse(nameNodeLabel.isEmpty());
+  }
+
+  /**
+   * Tests that passing a null FSNamesystem to generateSnapshotReport does not
+   * throw NullPointerException.
+   */
+  @Test
+  public void testGenerateSnapshotReportNullNamesystem() throws Exception {
+    NamenodeJspHelper.generateSnapshotReport(mock(JspWriter.class), null);
+  }
+
+  /**
+   * Tests that redirectToRandomDataNode does not throw NullPointerException if
+   * it finds a null FSNamesystem.
+   */
+  @Test(expected=IOException.class)
+  public void testRedirectToRandomDataNodeNullNamesystem() throws Exception {
+    NameNode nn = mock(NameNode.class);
+    when(nn.getNamesystem()).thenReturn(null);
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nn);
+    NamenodeJspHelper.redirectToRandomDataNode(context,
+      mock(HttpServletRequest.class), mock(HttpServletResponse.class));
+  }
+
+  /**
+   * Tests that XMLBlockInfo does not throw NullPointerException if it finds a
+   * null FSNamesystem.
+   */
+  @Test
+  public void testXMLBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLBlockInfo(null, 1L).toXML(doc);
+  }
+
+  /**
+   * Tests that XMLCorruptBlockInfo does not throw NullPointerException if it
+   * finds a null FSNamesystem.
+   */
+  @Test
+  public void testXMLCorruptBlockInfoNullNamesystem() throws IOException {
+    XMLOutputter doc = new XMLOutputter(mock(JspWriter.class), "UTF-8");
+    new NamenodeJspHelper.XMLCorruptBlockInfo(null, mock(Configuration.class),
+      10, 1L).toXML(doc);
+  }
+
   /**
    * Checks if the list contains any string that partially matches the regex.
-   * 
+   *
    * @param list List<String> containing strings to check
    * @param regex String regex to check
    * @return boolean true if some string in list partially matches regex
@@ -123,4 +224,149 @@ public class TestNameNodeJspHelper {
     }
     return false;
   }
+
+  @Test(timeout = 15000)
+  public void testGetRandomDatanode() {
+    ImmutableSet<String> set = ImmutableSet.of();
+    NameNode nameNode = cluster.getNameNode();
+    ImmutableSet.Builder<String> builder = ImmutableSet.builder();
+    for (DataNode dataNode : cluster.getDataNodes()) {
+      builder.add(dataNode.getDisplayName());
+    }
+    set = builder.build();
+
+    for (int i = 0; i < 10; i++) {
+      DatanodeDescriptor dnDescriptor = NamenodeJspHelper
+          .getRandomDatanode(nameNode);
+      assertTrue("testGetRandomDatanode error",
+          set.contains(dnDescriptor.toString()));
+    }
+  }
+      
+  @Test(timeout = 15000)
+  public void testNamenodeJspHelperRedirectToRandomDataNode() throws IOException, InterruptedException {
+    final String urlPart = "browseDirectory.jsp?namenodeInfoPort=";                     
+    
+    ServletContext context = mock(ServletContext.class);
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    HttpServletResponse resp = mock(HttpServletResponse.class);          
+    
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter(UserParam.NAME)).thenReturn("localuser");
+    when(context.getAttribute(NAMENODE_ATTRIBUTE_KEY)).thenReturn(
+        cluster.getNameNode());
+    when(context.getAttribute(JspHelper.CURRENT_CONF)).thenReturn(conf);    
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    doAnswer(new Answer<String>() {
+      @Override
+     public String answer(InvocationOnMock invocation) throws Throwable {
+        return null;
+        }
+    }).when(resp).sendRedirect(captor.capture());
+
+    NamenodeJspHelper.redirectToRandomDataNode(context, request, resp);    
+    assertTrue(captor.getValue().contains(urlPart));    
+  }
+  
+  private enum DataNodeStatus {
+    LIVE("[Live Datanodes(| +):(| +)]\\d"), 
+    DEAD("[Dead Datanodes(| +):(| +)]\\d");
+
+    private Pattern pattern;
+
+    public Pattern getPattern() {
+      return pattern;
+    }
+
+    DataNodeStatus(String line) {
+      this.pattern = Pattern.compile(line);
+    }
+  }
+
+  private void checkDeadLiveNodes(NameNode nameNode, int deadCount,
+      int lifeCount) {
+    FSNamesystem ns = nameNode.getNamesystem();
+    DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
+    List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+    List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+    dm.fetchDatanodes(live, dead, true);
+    assertTrue("checkDeadLiveNodes error !!!", (live.size() == lifeCount)
+        && dead.size() == deadCount);
+  }
+
+  @Test(timeout = 15000)
+  public void testNodeListJspGenerateNodesList() throws IOException {
+    String output;
+    NameNode nameNode = cluster.getNameNode();
+    ServletContext context = mock(ServletContext.class);
+    when(context.getAttribute("name.node")).thenReturn(nameNode);
+    when(context.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY))
+        .thenReturn(cluster.getNameNode().getHttpAddress());    
+    checkDeadLiveNodes(nameNode, 0, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.LIVE);
+    assertCounts(DataNodeStatus.LIVE, output, DATA_NODES_AMOUNT);
+    output = getOutputFromGeneratedNodesList(context, DataNodeStatus.DEAD);
+    assertCounts(DataNodeStatus.DEAD, output, 0);    
+  }
+
+  private void assertCounts(DataNodeStatus dataNodeStatus, String output,
+      int expectedCount) {
+    Matcher matcher = DataNodeStatus.LIVE.getPattern().matcher(output);
+    if (matcher.find()) {
+      String digitLine = output.substring(matcher.start(), matcher.end())
+          .trim();
+      assertTrue("assertCounts error. actual != expected",
+          Integer.valueOf(digitLine) == expectedCount);
+    } else {
+      fail("assertCount matcher error");
+    }
+  }
+
+  private String getOutputFromGeneratedNodesList(ServletContext context,
+      DataNodeStatus dnStatus) throws IOException {
+    JspWriter out = mock(JspWriter.class);
+    ArgumentCaptor<String> captor = ArgumentCaptor.forClass(String.class);
+    NamenodeJspHelper.NodeListJsp nodelistjsp = new NamenodeJspHelper.NodeListJsp();
+    final StringBuffer buffer = new StringBuffer();
+    doAnswer(new Answer<String>() {
+      @Override
+      public String answer(InvocationOnMock invok) {
+        Object[] args = invok.getArguments();
+        buffer.append((String) args[0]);
+        return null;
+      }
+    }).when(out).print(captor.capture());
+    HttpServletRequest request = mock(HttpServletRequest.class);
+    when(request.getScheme()).thenReturn("http");
+    when(request.getParameter("whatNodes")).thenReturn(dnStatus.name());
+    nodelistjsp.generateNodesList(context, out, request);
+    return buffer.toString();
+  }
+
+  @Test(timeout = 15000)
+  public void testGetInodeLimitText() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = 
+        ImmutableSet.of("files and directories", "Heap Memory used", "Non Heap Memory used");        
+    String line = NamenodeJspHelper.getInodeLimitText(fsn);    
+    for(String pattern: patterns) {
+      assertTrue("testInodeLimitText error " + pattern,
+          line.contains(pattern));
+    }    
+  }
+  
+  @Test(timeout = 15000)
+  public void testGetVersionTable() {
+    NameNode nameNode = cluster.getNameNode();
+    FSNamesystem fsn = nameNode.getNamesystem();
+    ImmutableSet<String> patterns = ImmutableSet.of(VersionInfo.getVersion(), 
+        VersionInfo.getRevision(), VersionInfo.getUser(), VersionInfo.getBranch(),
+        fsn.getClusterId(), fsn.getBlockPoolId());
+    String line = NamenodeJspHelper.getVersionTable(fsn);
+    for(String pattern: patterns) {
+       assertTrue("testGetVersionTable error " + pattern,
+           line.contains(pattern));
+    }
+  }  
 }

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java Wed Oct 30 22:21:59 2013
@@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.nam
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 
@@ -57,8 +58,7 @@ import com.google.common.collect.Sets;
 public class TestNameNodeRecovery {
   private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
   private static StartupOption recoverStartOpt = StartupOption.RECOVER;
-  private static final File TEST_DIR = new File(
-      System.getProperty("test.build.data","build/test/data"));
+  private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class);
 
   static {
     recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java Wed Oct 30 22:21:59 2013
@@ -33,12 +33,14 @@ import org.apache.hadoop.hdfs.DFSConfigK
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.Time;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestNameNodeResourceChecker {
+  private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class);
   private Configuration conf;
   private File baseDir;
   private File nameDir;
@@ -46,8 +48,7 @@ public class TestNameNodeResourceChecker
   @Before
   public void setUp () throws IOException {
     conf = new Configuration();
-    baseDir = new File(System.getProperty("test.build.data"));
-    nameDir = new File(baseDir, "resource-check-name-dir");
+    nameDir = new File(BASE_DIR, "resource-check-name-dir");
     nameDir.mkdirs();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
   }
@@ -141,8 +142,8 @@ public class TestNameNodeResourceChecker
   @Test
   public void testChecking2NameDirsOnOneVolume() throws IOException {
     Configuration conf = new Configuration();
-    File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
-    File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
+    File nameDir1 = new File(BASE_DIR, "name-dir1");
+    File nameDir2 = new File(BASE_DIR, "name-dir2");
     nameDir1.mkdirs();
     nameDir2.mkdirs();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
@@ -162,7 +163,7 @@ public class TestNameNodeResourceChecker
   @Test
   public void testCheckingExtraVolumes() throws IOException {
     Configuration conf = new Configuration();
-    File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
+    File nameDir = new File(BASE_DIR, "name-dir");
     nameDir.mkdirs();
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
     conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
@@ -182,8 +183,8 @@ public class TestNameNodeResourceChecker
   @Test
   public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
     Configuration conf = new Configuration();
-    File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
-    File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
+    File nameDir1 = new File(BASE_DIR, "name-dir1");
+    File nameDir2 = new File(BASE_DIR, "name-dir2");
     nameDir1.mkdirs();
     nameDir2.mkdirs();
     

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java Wed Oct 30 22:21:59 2013
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.na
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 import java.io.IOException;
 import java.util.EnumSet;
@@ -35,11 +36,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.ClientId;
@@ -47,7 +52,9 @@ import org.apache.hadoop.ipc.RPC.RpcKind
 import org.apache.hadoop.ipc.RetryCache.CacheEntry;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.ipc.Server;
+import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.LightWeightCache;
 import org.junit.After;
 import org.junit.Assert;
@@ -75,12 +82,13 @@ public class TestNamenodeRetryCache {
       "TestNamenodeRetryCache", null, FsPermission.getDefault());
   private static DistributedFileSystem filesystem;
   private static int callId = 100;
-  private static Configuration conf = new HdfsConfiguration();
+  private static Configuration conf;
   private static final int BlockSize = 512;
   
   /** Start a cluster */
   @Before
   public void setup() throws Exception {
+    conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
     cluster = new MiniDFSCluster.Builder(conf).build();
@@ -294,6 +302,40 @@ public class TestNamenodeRetryCache {
   }
   
   /**
+   * Make sure a retry call does not hang because of the exception thrown in the
+   * first call.
+   */
+  @Test(timeout = 60000)
+  public void testUpdatePipelineWithFailOver() throws Exception {
+    cluster.shutdown();
+    namesystem = null;
+    filesystem = null;
+    cluster = new MiniDFSCluster.Builder(conf).nnTopology(
+        MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
+    FSNamesystem ns0 = cluster.getNamesystem(0);
+    ExtendedBlock oldBlock = new ExtendedBlock();
+    ExtendedBlock newBlock = new ExtendedBlock();
+    DatanodeID[] newNodes = new DatanodeID[2];
+    
+    newCall();
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+      fail("Expect StandbyException from the updatePipeline call");
+    } catch (StandbyException e) {
+      // expected, since in the beginning both nn are in standby state
+      GenericTestUtils.assertExceptionContains(
+          HAServiceState.STANDBY.toString(), e);
+    }
+    
+    cluster.transitionToActive(0);
+    try {
+      ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes);
+    } catch (IOException e) {
+      // ignore call should not hang.
+    }
+  }
+  
+  /**
    * Test for crateSnapshot
    */
   @Test

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java Wed Oct 30 22:21:59 2013
@@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.util.MD5FileUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Logger;
 import org.junit.After;
@@ -419,8 +420,7 @@ public class TestStartup {
     Configuration conf = new Configuration();
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
-    File base_dir = new File(System.getProperty(
-        "test.build.data", "build/test/data"), "dfs/");
+    File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
         new File(base_dir, "name").getPath());
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java Wed Oct 30 22:21:59 2013
@@ -73,24 +73,28 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -111,11 +115,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -124,6 +130,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "RUNNING")
           .put("percentComplete", 0.5f)
           .put("steps", Collections.<Object>singletonList(
@@ -138,12 +145,14 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "PENDING")
           .put("percentComplete", 0.0f)
           .put("steps", Collections.emptyList())
@@ -164,11 +173,13 @@ public class TestStartupProgressServlet 
       .put("phases", Arrays.<Object>asList(
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingFsImage")
+          .put("desc", "Loading fsimage")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 100L)
               .put("total", 100L)
               .put("percentComplete", 1.0f)
@@ -177,6 +188,7 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "LoadingEdits")
+          .put("desc", "Loading edits")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
@@ -191,11 +203,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SavingCheckpoint")
+          .put("desc", "Saving checkpoint")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "Inodes")
+              .put("desc", "inodes")
               .put("count", 300L)
               .put("total", 300L)
               .put("percentComplete", 1.0f)
@@ -204,11 +218,13 @@ public class TestStartupProgressServlet 
           .build(),
         ImmutableMap.<String, Object>builder()
           .put("name", "SafeMode")
+          .put("desc", "Safe mode")
           .put("status", "COMPLETE")
           .put("percentComplete", 1.0f)
           .put("steps", Collections.<Object>singletonList(
             ImmutableMap.<String, Object>builder()
               .put("name", "AwaitingReportedBlocks")
+              .put("desc", "awaiting reported blocks")
               .put("count", 400L)
               .put("total", 400L)
               .put("percentComplete", 1.0f)

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java Wed Oct 30 22:21:59 2013
@@ -391,7 +391,8 @@ public class TestStorageRestore {
         (new File(path3, "current").getAbsolutePath()) : path3.toString();
 
     try {
-      cluster = new MiniDFSCluster.Builder(config).numDataNodes(0)
+      cluster = new MiniDFSCluster.Builder(config)
+          .numDataNodes(0)
           .manageNameDfsDirs(false).build();
       cluster.waitActive();
 

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Wed Oct 30 22:21:59 2013
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServerFunctionalTest;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Test;
 import org.mockito.Mockito;
@@ -48,8 +49,7 @@ import com.google.common.collect.Immutab
 
 public class TestTransferFsImage {
 
-  private static final File TEST_DIR = new File(
-      System.getProperty("test.build.data","build/test/data"));
+  private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class);
 
   /**
    * Regression test for HDFS-1997. Test that, if an exception

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Wed Oct 30 22:21:59 2013
@@ -56,8 +56,8 @@ public class TestBootstrapStandby {
 
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
       .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-        .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001))
-        .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001))
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002)));
     
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(topology)

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java Wed Oct 30 22:21:59 2013
@@ -70,13 +70,13 @@ public class TestDFSZKFailoverController
         CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
         0);
     
-    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003);
-    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004);
+    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023);
+    conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024);
 
     MiniDFSNNTopology topology = new MiniDFSNNTopology()
     .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
-        .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001))
-        .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002)));
+        .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021))
+        .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022)));
     cluster = new MiniDFSCluster.Builder(conf)
         .nnTopology(topology)
         .numDataNodes(0)

Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java?rev=1537330&r1=1537329&r2=1537330&view=diff
==============================================================================
--- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java (original)
+++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java Wed Oct 30 22:21:59 2013
@@ -96,8 +96,8 @@ public class TestDNFencing {
     // Increase max streams so that we re-replicate quickly.
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 1000);
     // See RandomDeleterPolicy javadoc.
-    conf.setClass("dfs.block.replicator.classname", RandomDeleterPolicy.class,
-        BlockPlacementPolicy.class); 
+    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+        RandomDeleterPolicy.class, BlockPlacementPolicy.class); 
     conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
     cluster = new MiniDFSCluster.Builder(conf)
       .nnTopology(MiniDFSNNTopology.simpleHATopology())