You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2012/04/11 07:47:46 UTC

svn commit: r1324567 [4/4] - in /hadoop/common/branches/HDFS-3042/hadoop-hdfs-project: ./ hadoop-hdfs/ hadoop-hdfs/dev-support/ hadoop-hdfs/src/contrib/ hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/ hadoop-hdfs/sr...

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java Wed Apr 11 05:47:40 2012
@@ -18,12 +18,19 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
+import java.io.InputStream;
+import java.io.PrintStream;
 import java.net.URI;
-import java.util.ArrayList;
+import java.security.Permission;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
@@ -40,11 +47,11 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
-
 public class TestClusterId {
   private static final Log LOG = LogFactory.getLog(TestClusterId.class);
   File hdfsDir;
-  
+  Configuration config;
+
   private String getClusterId(Configuration config) throws IOException {
     // see if cluster id not empty.
     Collection<URI> dirsToFormat = FSNamesystem.getNamespaceDirs(config);
@@ -59,33 +66,41 @@ public class TestClusterId {
     LOG.info("successfully formated : sd="+sd.getCurrentDir() + ";cid="+cid);
     return cid;
   }
-  
+
   @Before
   public void setUp() throws IOException {
+    System.setSecurityManager(new NoExitSecurityManager());
+
     String baseDir = System.getProperty("test.build.data", "build/test/data");
 
-    hdfsDir = new File(baseDir, "dfs");
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not delete test directory '" + 
-          hdfsDir + "'");
+    hdfsDir = new File(baseDir, "dfs/name");
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not delete test directory '" + hdfsDir + "'");
     }
     LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
+
+    // as some tests might change these values we reset them to defaults before
+    // every test
+    StartupOption.FORMAT.setForceFormat(false);
+    StartupOption.FORMAT.setInteractiveFormat(true);
+    
+    config = new Configuration();
+    config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
   }
-  
+
   @After
   public void tearDown() throws IOException {
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
-      throw new IOException("Could not tearDown test directory '" +
-          hdfsDir + "'");
+    System.setSecurityManager(null);
+
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
+      throw new IOException("Could not tearDown test directory '" + hdfsDir
+          + "'");
     }
   }
-  
+
   @Test
   public void testFormatClusterIdOption() throws IOException {
-    Configuration config = new Configuration();
     
-    config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
-
     // 1. should format without cluster id
     //StartupOption.FORMAT.setClusterId("");
     NameNode.format(config);
@@ -107,4 +122,356 @@ public class TestClusterId {
     String newCid = getClusterId(config);
     assertFalse("ClusterId should not be the same", newCid.equals(cid));
   }
-}
+
+  /**
+   * Test namenode format with -format option. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormat() throws IOException {
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when an empty name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyDir() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force options when name directory
+   * exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format -force -clusterid option when name
+   * directory exists. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithForceAndClusterId() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String myId = "testFormatWithForceAndClusterId";
+    String[] argv = { "-format", "-force", "-clusterid", myId };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cId = getClusterId(config);
+    assertEquals("ClusterIds do not match", myId, cId);
+  }
+
+  /**
+   * Test namenode format with -clusterid -force option. Format command should
+   * fail as no cluster id was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithInvalidClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "-force" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid options. Format should fail
+   * was no clusterid was sent.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNoClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid" };
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -clusterid and empty clusterid. Format
+   * should fail as no valid if was provided.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithEmptyClusterIdOption() throws IOException {
+
+    String[] argv = { "-format", "-clusterid", "" };
+
+    PrintStream origErr = System.err;
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    PrintStream stdErr = new PrintStream(baos);
+    System.setErr(stdErr);
+
+    NameNode.createNameNode(argv, config);
+
+    // Check if usage is printed
+    assertTrue(baos.toString("UTF-8").contains("Usage: java NameNode"));
+    System.setErr(origErr);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when a non empty
+   * name directory exists. Format should not succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractive() throws IOException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have been aborted with exit code 1", 1,
+          e.status);
+    }
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  /**
+   * Test namenode format with -format -nonInteractive options when name
+   * directory does not exist. Format should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveNameDirDoesNotExit()
+      throws IOException {
+
+    String[] argv = { "-format", "-nonInteractive" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -force -nonInteractive -force option. Format
+   * should succeed.
+   * 
+   * @throws IOException
+   */
+  @Test
+  public void testFormatWithNonInteractiveAndForce() throws IOException {
+
+    if (!hdfsDir.mkdirs()) {
+      fail("Failed to create dir " + hdfsDir.getPath());
+    }
+
+    String[] argv = { "-format", "-nonInteractive", "-force" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter Y when prompted and the format should succeed.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterYes() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("Y\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should have succeeded", 0, e.status);
+    }
+
+    System.setIn(origIn);
+
+    String cid = getClusterId(config);
+    assertTrue("Didn't get new ClusterId", (cid != null && !cid.equals("")));
+  }
+
+  /**
+   * Test namenode format with -format option when a non empty name directory
+   * exists. Enter N when prompted and format should be aborted.
+   * 
+   * @throws IOException
+   * @throws InterruptedException
+   */
+  @Test
+  public void testFormatWithoutForceEnterNo() throws IOException,
+      InterruptedException {
+
+    // we check for a non empty dir, so create a child path
+    File data = new File(hdfsDir, "file");
+    if (!data.mkdirs()) {
+      fail("Failed to create dir " + data.getPath());
+    }
+
+    // capture the input stream
+    InputStream origIn = System.in;
+    ByteArrayInputStream bins = new ByteArrayInputStream("N\n".getBytes());
+    System.setIn(bins);
+
+    String[] argv = { "-format" };
+    try {
+      NameNode.createNameNode(argv, config);
+      fail("createNameNode() did not call System.exit()");
+    } catch (ExitException e) {
+      assertEquals("Format should not have succeeded", 1, e.status);
+    }
+
+    System.setIn(origIn);
+
+    // check if the version file does not exists.
+    File version = new File(hdfsDir, "current/VERSION");
+    assertFalse("Check version should not exist", version.exists());
+  }
+
+  private static class ExitException extends SecurityException {
+    private static final long serialVersionUID = 1L;
+    public final int status;
+
+    public ExitException(int status) {
+      super("There is no escape!");
+      this.status = status;
+    }
+  }
+
+  private static class NoExitSecurityManager extends SecurityManager {
+    @Override
+    public void checkPermission(Permission perm) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkPermission(Permission perm, Object context) {
+      // allow anything.
+    }
+
+    @Override
+    public void checkExit(int status) {
+      super.checkExit(status);
+      throw new ExitException(status);
+    }
+  }
+}
\ No newline at end of file

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Apr 11 05:47:40 2012
@@ -179,8 +179,8 @@ public class TestEditLog extends TestCas
   }
   
   private long testLoad(byte[] data, FSNamesystem namesys) throws IOException {
-    FSEditLogLoader loader = new FSEditLogLoader(namesys);
-    return loader.loadFSEdits(new EditLogByteInputStream(data), 1);
+    FSEditLogLoader loader = new FSEditLogLoader(namesys, 0);
+    return loader.loadFSEdits(new EditLogByteInputStream(data), 1, null);
   }
 
   /**
@@ -315,7 +315,7 @@ public class TestEditLog extends TestCas
       //
       for (Iterator<StorageDirectory> it = 
               fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);
+        FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
         
         File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3,
             3 + expectedTxns - 1);
@@ -323,7 +323,7 @@ public class TestEditLog extends TestCas
         
         System.out.println("Verifying file: " + editFile);
         long numEdits = loader.loadFSEdits(
-            new EditLogFileInputStream(editFile), 3);
+            new EditLogFileInputStream(editFile), 3, null);
         int numLeases = namesystem.leaseManager.countLease();
         System.out.println("Number of outstanding leases " + numLeases);
         assertEquals(0, numLeases);
@@ -774,8 +774,8 @@ public class TestEditLog extends TestCas
     }
 
     @Override
-    public FSEditLogOp readOp() throws IOException {
-      return reader.readOp();
+    protected FSEditLogOp nextOp() throws IOException {
+      return reader.readOp(false);
     }
 
     @Override
@@ -788,16 +788,11 @@ public class TestEditLog extends TestCas
       input.close();
     }
 
-    @Override // JournalStream
+    @Override
     public String getName() {
       return "AnonEditLogByteInputStream";
     }
 
-    @Override // JournalStream
-    public JournalType getType() {
-      return JournalType.FILE;
-    }
-
     @Override
     public boolean isInProgress() {
       return true;

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java Wed Apr 11 05:47:40 2012
@@ -236,9 +236,9 @@ public class TestEditLogRace {
       File editFile = new File(sd.getCurrentDir(), logFileName);
         
       System.out.println("Verifying file: " + editFile);
-      FSEditLogLoader loader = new FSEditLogLoader(namesystem);
+      FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
       long numEditsThisLog = loader.loadFSEdits(new EditLogFileInputStream(editFile), 
-          startTxId);
+          startTxId, null);
       
       System.out.println("Number of edits: " + numEditsThisLog);
       assertTrue(numEdits == -1 || numEditsThisLog == numEdits);

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Apr 11 05:47:40 2012
@@ -92,8 +92,8 @@ public class TestFSEditLogLoader {
     rwf.close();
     
     StringBuilder bld = new StringBuilder();
-    bld.append("^Error replaying edit log at offset \\d+");
-    bld.append(" on transaction ID \\d+\n");
+    bld.append("^Error replaying edit log at offset \\d+.  ");
+    bld.append("Expected transaction ID was \\d+\n");
     bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java Wed Apr 11 05:47:40 2012
@@ -143,9 +143,9 @@ public class TestSecurityTokenEditLog ex
         File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
         System.out.println("Verifying file: " + editFile);
         
-        FSEditLogLoader loader = new FSEditLogLoader(namesystem);        
+        FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);        
         long numEdits = loader.loadFSEdits(
-            new EditLogFileInputStream(editFile), 1);
+            new EditLogFileInputStream(editFile), 1, null);
         assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
       }
     } finally {

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java Wed Apr 11 05:47:40 2012
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
+import java.io.ByteArrayInputStream;
 import java.io.File;
 import java.io.IOException;
 import java.net.URI;
@@ -40,6 +41,7 @@ import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.base.Suppliers;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
 
@@ -170,7 +172,7 @@ public class TestBootstrapStandby {
       int rc = BootstrapStandby.run(
           new String[]{"-force"},
           cluster.getConfiguration(1));
-      assertEquals(1, rc);
+      assertEquals(BootstrapStandby.ERR_CODE_LOGS_UNAVAILABLE, rc);
     } finally {
       logs.stopCapturing();
     }
@@ -184,7 +186,7 @@ public class TestBootstrapStandby {
     int rc = BootstrapStandby.run(
         new String[]{"-nonInteractive"},
         cluster.getConfiguration(1));
-    assertEquals(1, rc);
+    assertEquals(BootstrapStandby.ERR_CODE_ALREADY_FORMATTED, rc);
 
     // Should pass with -force
     rc = BootstrapStandby.run(
@@ -192,6 +194,24 @@ public class TestBootstrapStandby {
         cluster.getConfiguration(1));
     assertEquals(0, rc);
   }
+  
+  @Test(timeout=30000)
+  public void testOtherNodeNotActive() throws Exception {
+    cluster.transitionToStandby(0);
+    int rc = BootstrapStandby.run(
+        new String[]{"-nonInteractive"},
+        cluster.getConfiguration(1));
+    assertEquals(BootstrapStandby.ERR_CODE_OTHER_NN_NOT_ACTIVE, rc);
+    
+    // Answer "yes" to the prompt about transition to active
+    System.setIn(new ByteArrayInputStream("yes\n".getBytes()));
+    rc = BootstrapStandby.run(
+        new String[]{"-force"},
+        cluster.getConfiguration(1));
+    assertEquals(0, rc);
+    
+    assertFalse(nn0.getNamesystem().isInStandbyState());
+  }
 
   private void assertNNFilesMatch() throws Exception {
     List<File> curDirs = Lists.newArrayList();

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java Wed Apr 11 05:47:40 2012
@@ -25,6 +25,8 @@ import org.apache.hadoop.conf.Configurat
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -117,4 +119,17 @@ public class TestInitializeSharedEdits {
     assertFalse(NameNode.initializeSharedEdits(conf, false));
     assertTrue(NameNode.initializeSharedEdits(conf, false));
   }
+  
+  @Test
+  public void testInitializeSharedEditsConfiguresGenericConfKeys() {
+    Configuration conf = new Configuration();
+    conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "ns1");
+    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
+        "ns1"), "nn1,nn2");
+    conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
+        "ns1", "nn1"), "localhost:1234");
+    assertNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
+    NameNode.initializeSharedEdits(conf);
+    assertNotNull(conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY));
+  }
 }

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java Wed Apr 11 05:47:40 2012
@@ -68,6 +68,7 @@ public class MiniDFSClusterManager {
   private StartupOption dfsOpts;
   private String writeConfig;
   private Configuration conf;
+  private boolean format;
   
   private static final long SLEEP_INTERVAL_MS = 1000 * 60;
 
@@ -138,6 +139,7 @@ public class MiniDFSClusterManager {
     dfs = new MiniDFSCluster.Builder(conf).nameNodePort(nameNodePort)
                                           .numDataNodes(numDataNodes)
                                           .startupOption(dfsOpts)
+                                          .format(format)
                                           .build();
     dfs.waitActive();
     
@@ -196,8 +198,13 @@ public class MiniDFSClusterManager {
     // HDFS
     numDataNodes = intArgument(cli, "datanodes", 1);
     nameNodePort = intArgument(cli, "nnport", 0);
-    dfsOpts = cli.hasOption("format") ?
-        StartupOption.FORMAT : StartupOption.REGULAR;
+    if (cli.hasOption("format")) {
+      dfsOpts = StartupOption.FORMAT;
+      format = true;
+    } else {
+      dfsOpts = StartupOption.REGULAR;
+      format = false;
+    }
 
     // Runner
     writeDetails = cli.getOptionValue("writeDetails");

Modified: hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml?rev=1324567&r1=1324566&r2=1324567&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml (original)
+++ hadoop/common/branches/HDFS-3042/hadoop-hdfs-project/pom.xml Wed Apr 11 05:47:40 2012
@@ -31,6 +31,7 @@
     <module>hadoop-hdfs</module>
     <module>hadoop-hdfs-httpfs</module>
     <module>hadoop-hdfs/src/contrib/bkjournal</module>
+    <module>hadoop-hdfs/src/contrib/fuse-dfs</module>
   </modules>
 
   <build>