You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by el...@apache.org on 2011/06/12 20:17:05 UTC

svn commit: r1134951 [3/3] - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/cli/ src/test/hdfs/org/apache/hadoop/hdfs/ src/test/hdfs/org/apache/hadoop/hdfs/ser...

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java Sun Jun 12 18:17:05 2011
@@ -391,6 +391,7 @@ public class LeaseManager {
 
   /** Check the leases beginning from the oldest. */
   private synchronized void checkLeases() {
+    assert fsnamesystem.hasWriteLock();
     for(; sortedLeases.size() > 0; ) {
       final Lease oldest = sortedLeases.first();
       if (!oldest.expiredHardLimit()) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java Sun Jun 12 18:17:05 2011
@@ -56,7 +56,7 @@ public class TestHDFSCLI extends CLITest
                                                  .racks(racks)
                                                  .hosts(hosts)
                                                  .build();
-    
+    dfsCluster.waitClusterUp();
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     
     username = System.getProperty("user.name");

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java Sun Jun 12 18:17:05 2011
@@ -555,6 +555,15 @@ public class DFSTestUtil {
     IOUtils.copyBytes(is, os, s.length(), true);
   }
 
+  /* Append the given string to the given file */
+  public static void appendFile(FileSystem fs, Path p, String s) 
+      throws IOException {
+    assert fs.exists(p);
+    InputStream is = new ByteArrayInputStream(s.getBytes());
+    FSDataOutputStream os = fs.append(p);
+    IOUtils.copyBytes(is, os, s.length(), true);
+  }
+  
   // Returns url content as string.
   public static String urlGet(URL url) throws IOException {
     URLConnection conn = url.openConnection();

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java Sun Jun 12 18:17:05 2011
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.D
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import static org.junit.Assert.*;
 
 import org.junit.After;
@@ -201,7 +202,8 @@ public class TestDecommission {
     nodes.add(nodename);
     writeConfigFile(excludeFile, nodes);
     cluster.getNamesystem(nnIndex).refreshNodes(conf);
-    DatanodeInfo ret = cluster.getNamesystem(nnIndex).getDatanode(info[index]);
+    DatanodeInfo ret = NameNodeAdapter.getDatanode(
+        cluster.getNameNode(nnIndex), info[index]);
     waitNodeState(ret, waitForState);
     return ret;
   }
@@ -371,7 +373,7 @@ public class TestDecommission {
       // Stop decommissioning and verify stats
       writeConfigFile(excludeFile, null);
       fsn.refreshNodes(conf);
-      DatanodeInfo ret = fsn.getDatanode(downnode);
+      DatanodeInfo ret = NameNodeAdapter.getDatanode(namenode, downnode);
       waitNodeState(ret, AdminStates.NORMAL);
       verifyStats(namenode, fsn, ret, false);
     }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java Sun Jun 12 18:17:05 2011
@@ -20,20 +20,44 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 
-import junit.framework.TestCase;
+import static org.junit.Assert.*;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.Test;
 
 /**
  * Tests to verify safe mode correctness.
  */
-public class TestSafeMode extends TestCase {
-  
-  static Log LOG = LogFactory.getLog(TestSafeMode.class);
+public class TestSafeMode {
+  Configuration conf; 
+  MiniDFSCluster cluster;
+  FileSystem fs;
+  DistributedFileSystem dfs;
+
+  @Before
+  public void startUp() throws IOException {
+    conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();      
+    fs = cluster.getFileSystem();
+    dfs = (DistributedFileSystem)fs;
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (fs != null) {
+      fs.close();
+    }
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
 
   /**
    * This test verifies that if SafeMode is manually entered, name-node does not
@@ -51,61 +75,123 @@ public class TestSafeMode extends TestCa
    *  
    * @throws IOException
    */
-  public void testManualSafeMode() throws IOException {
-    MiniDFSCluster cluster = null;
-    DistributedFileSystem fs = null;
+  @Test
+  public void testManualSafeMode() throws IOException {      
+    fs = (DistributedFileSystem)cluster.getFileSystem();
+    Path file1 = new Path("/tmp/testManualSafeMode/file1");
+    Path file2 = new Path("/tmp/testManualSafeMode/file2");
+    
+    // create two files with one block each.
+    DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
+    DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
+    fs.close();
+    cluster.shutdown();
+    
+    // now bring up just the NameNode.
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
+    cluster.waitActive();
+    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    
+    assertTrue("No datanode is started. Should be in SafeMode", 
+               dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    
+    // manually set safemode.
+    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+    
+    // now bring up the datanode and wait for it to be active.
+    cluster.startDataNodes(conf, 1, true, null, null);
+    cluster.waitActive();
+    
+    // wait longer than dfs.namenode.safemode.extension
     try {
-      Configuration conf = new HdfsConfiguration();
-      // disable safemode extension to make the test run faster.
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      cluster.waitActive();
-      
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      Path file1 = new Path("/tmp/testManualSafeMode/file1");
-      Path file2 = new Path("/tmp/testManualSafeMode/file2");
-      
-      LOG.info("Created file1 and file2.");
-      
-      // create two files with one block each.
-      DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
-      DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
-      fs.close();
-      cluster.shutdown();
-      
-      // now bring up just the NameNode.
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
-      cluster.waitActive();
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      
-      LOG.info("Restarted cluster with just the NameNode");
-      
-      assertTrue("No datanode is started. Should be in SafeMode", 
-                 fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      // manually set safemode.
-      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      
-      // now bring up the datanode and wait for it to be active.
-      cluster.startDataNodes(conf, 1, true, null, null);
-      cluster.waitActive();
-      
-      LOG.info("Datanode is started.");
-
-      // wait longer than dfs.namenode.safemode.extension
-      try {
-        Thread.sleep(2000);
-      } catch (InterruptedException ignored) {}
-      
-      assertTrue("should still be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      assertFalse("should not be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-    } finally {
-      if(fs != null) fs.close();
-      if(cluster!= null) cluster.shutdown();
+      Thread.sleep(2000);
+    } catch (InterruptedException ignored) {}
+
+    assertTrue("should still be in SafeMode",
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    assertFalse("should not be in SafeMode", 
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
+  }
+
+  public interface FSRun {
+    public abstract void run(FileSystem fs) throws IOException;
+  }
+
+  /**
+   * Assert that the given function fails to run due to a safe 
+   * mode exception.
+   */
+  public void runFsFun(String msg, FSRun f) {
+    try {
+      f.run(fs);
+      fail(msg);
+     } catch (IOException ioe) {
+       assertTrue(ioe.getMessage().contains("safe mode"));
+     }
+  }
+
+  /**
+   * Run various fs operations while the NN is in safe mode,
+   * assert that they are either allowed or fail as expected.
+   */
+  @Test
+  public void testOperationsWhileInSafeMode() throws IOException {
+    final Path file1 = new Path("/file1");
+
+    assertFalse(dfs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
+    assertTrue("Could not enter SM", 
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
+
+    runFsFun("Set quota while in SM", new FSRun() { 
+      public void run(FileSystem fs) throws IOException {
+        ((DistributedFileSystem)fs).setQuota(file1, 1, 1); 
+      }});
+
+    runFsFun("Set perm while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setPermission(file1, FsPermission.getDefault());
+      }});
+
+    runFsFun("Set owner while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setOwner(file1, "user", "group");
+      }});
+
+    runFsFun("Set repl while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.setReplication(file1, (short)1);
+      }});
+
+    runFsFun("Append file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        DFSTestUtil.appendFile(fs, file1, "new bytes");
+      }});
+
+    runFsFun("Delete file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.delete(file1, false);
+      }});
+
+    runFsFun("Rename file while in SM", new FSRun() {
+      public void run(FileSystem fs) throws IOException {
+        fs.rename(file1, new Path("file2"));
+      }});
+
+    try {
+      fs.setTimes(file1, 0, 0);
+    } catch (IOException ioe) {
+      fail("Set times failed while in SM");
     }
+
+    try {
+      DFSTestUtil.readFile(fs, file1);
+    } catch (IOException ioe) {
+      fail("Set times failed while in SM");
+    }
+
+    assertFalse("Could not leave SM",
+        dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE));
   }
-}
+  
+}
\ No newline at end of file

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Sun Jun 12 18:17:05 2011
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 /**
@@ -77,4 +78,18 @@ public class NameNodeAdapter {
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
     return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
   }
+
+  /**
+   * Return the datanode descriptor for the given datanode.
+   */
+  public static DatanodeDescriptor getDatanode(NameNode namenode,
+      DatanodeID id) throws IOException {
+    FSNamesystem ns = namenode.getNamesystem();
+    ns.readLock();
+    try {
+      return ns.getDatanode(id);
+    } finally {
+      ns.readUnlock();
+    }
+  }
 }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java Sun Jun 12 18:17:05 2011
@@ -61,7 +61,14 @@ public class TestDeadDatanode {
     FSNamesystem namesystem = cluster.getNamesystem();
     String state = alive ? "alive" : "dead";
     while (System.currentTimeMillis() < stopTime) {
-      if (namesystem.getDatanode(nodeID).isAlive == alive) {
+      namesystem.readLock();
+      DatanodeDescriptor dd;
+      try {
+        dd = namesystem.getDatanode(nodeID);
+      } finally {
+        namesystem.readUnlock();
+      }
+      if (dd.isAlive == alive) {
         LOG.info("datanode " + nodeID + " is " + state);
         return;
       }

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java Sun Jun 12 18:17:05 2011
@@ -54,7 +54,13 @@ public class TestHeartbeatHandling exten
       final DatanodeRegistration nodeReg = 
         DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
         
-      DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
+      namesystem.readLock();
+      DatanodeDescriptor dd;
+      try {
+        dd = namesystem.getDatanode(nodeReg);
+      } finally {
+        namesystem.readUnlock();
+      }
       
       final int REMAINING_BLOCKS = 1;
       final int MAX_REPLICATE_LIMIT = 

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java Sun Jun 12 18:17:05 2011
@@ -24,7 +24,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
 public class TestNNThroughputBenchmark {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java Sun Jun 12 18:17:05 2011
@@ -20,109 +20,31 @@ package org.apache.hadoop.hdfs.server.na
 
 import java.io.IOException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import junit.framework.TestCase;
+
+import org.junit.Test;
+import static org.junit.Assert.*;
 
 /**
  * Tests to verify safe mode correctness.
  */
-public class TestSafeMode extends TestCase {
+public class TestSafeMode {
   
-  static Log LOG = LogFactory.getLog(TestSafeMode.class);
-
-  /**
-   * This test verifies that if SafeMode is manually entered, name-node does not
-   * come out of safe mode even after the startup safe mode conditions are met.
-   * <ol>
-   * <li>Start cluster with 1 data-node.</li>
-   * <li>Create 2 files with replication 1.</li>
-   * <li>Re-start cluster with 0 data-nodes. 
-   * Name-node should stay in automatic safe-mode.</li>
-   * <li>Enter safe mode manually.</li>
-   * <li>Start the data-node.</li>
-   * <li>Wait longer than <tt>dfs.namenode.safemode.extension</tt> and 
-   * verify that the name-node is still in safe mode.</li>
-   * </ol>
-   *  
-   * @throws IOException
-   */
-  public void testManualSafeMode() throws IOException {
-    MiniDFSCluster cluster = null;
-    DistributedFileSystem fs = null;
-    try {
-      Configuration conf = new HdfsConfiguration();
-      // disable safemode extension to make the test run faster.
-      conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
-      cluster = new MiniDFSCluster.Builder(conf).build();
-      cluster.waitActive();
-      
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      Path file1 = new Path("/tmp/testManualSafeMode/file1");
-      Path file2 = new Path("/tmp/testManualSafeMode/file2");
-      
-      LOG.info("Created file1 and file2.");
-      
-      // create two files with one block each.
-      DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
-      DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
-      fs.close();
-      cluster.shutdown();
-      
-      // now bring up just the NameNode.
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
-      cluster.waitActive();
-      fs = (DistributedFileSystem)cluster.getFileSystem();
-      
-      LOG.info("Restarted cluster with just the NameNode");
-      
-      assertTrue("No datanode is started. Should be in SafeMode", 
-                 fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      // manually set safemode.
-      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
-      
-      // now bring up the datanode and wait for it to be active.
-      cluster.startDataNodes(conf, 1, true, null, null);
-      cluster.waitActive();
-      
-      LOG.info("Datanode is started.");
-
-      // wait longer than dfs.namenode.safemode.extension
-      try {
-        Thread.sleep(2000);
-      } catch (InterruptedException ignored) {}
-      
-      assertTrue("should still be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-      
-      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
-      assertFalse("should not be in SafeMode",
-          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
-    } finally {
-      if(fs != null) fs.close();
-      if(cluster!= null) cluster.shutdown();
-    }
-  }
-
-
   /**
    * Verify that the NameNode stays in safemode when dfs.safemode.datanode.min
    * is set to a number greater than the number of live datanodes.
    */
+  @Test
   public void testDatanodeThreshold() throws IOException {
     MiniDFSCluster cluster = null;
     DistributedFileSystem fs = null;
     try {
-      Configuration conf = new Configuration();
+      Configuration conf = new HdfsConfiguration();
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
 

Modified: hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java?rev=1134951&r1=1134950&r2=1134951&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java (original)
+++ hadoop/hdfs/trunk/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java Sun Jun 12 18:17:05 2011
@@ -39,7 +39,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.junit.After;
-import static org.junit.Assert.assertFalse;
+
+import static org.junit.Assert.*;
 import org.junit.Before;
 import org.junit.Test;
 import static org.mockito.Matchers.anyString;
@@ -99,6 +100,17 @@ public class TestNNLeaseRecovery {
     }
   }
 
+  // Release the lease for the given file
+  private boolean releaseLease(FSNamesystem ns, LeaseManager.Lease lm, 
+      Path file) throws IOException {
+    fsn.writeLock();
+    try {
+      return fsn.internalReleaseLease(lm, file.toString(), null);
+    } finally {
+      fsn.writeUnlock();
+    }
+  }
+
   /**
    * Mocks FSNamesystem instance, adds an empty file and invokes lease recovery
    * method. 
@@ -118,7 +130,7 @@ public class TestNNLeaseRecovery {
     fsn.dir.addFile(file.toString(), ps, (short)3, 1l, 
       "test", "test-machine", dnd, 1001l);
     assertTrue("True has to be returned in this case",
-      fsn.internalReleaseLease(lm, file.toString(), null));
+        releaseLease(fsn, lm, file));
   }
   
   /**
@@ -143,9 +155,9 @@ public class TestNNLeaseRecovery {
     mockFileBlocks(2, null, 
       HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
     
-    fsn.internalReleaseLease(lm, file.toString(), null);
-    assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
-      "IOException here", false);
+    releaseLease(fsn, lm, file);
+    fail("FSNamesystem.internalReleaseLease suppose to throw " +
+      "IOException here");
   }  
 
   /**
@@ -169,15 +181,14 @@ public class TestNNLeaseRecovery {
     mockFileBlocks(2, HdfsConstants.BlockUCState.COMMITTED, 
       HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
 
-    fsn.internalReleaseLease(lm, file.toString(), null);
-    assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
-      "AlreadyBeingCreatedException here", false);
+    releaseLease(fsn, lm, file);
+    fail("FSNamesystem.internalReleaseLease suppose to throw " +
+      "IOException here");
   }
 
   /**
    * Mocks FSNamesystem instance, adds an empty file with 0 blocks
    * and invokes lease recovery method. 
-   * 
    */
   @Test
   public void testInternalReleaseLease_0blocks () throws IOException {
@@ -194,7 +205,7 @@ public class TestNNLeaseRecovery {
     mockFileBlocks(0, null, null, file, dnd, ps, false);
 
     assertTrue("True has to be returned in this case",
-      fsn.internalReleaseLease(lm, file.toString(), null));
+        releaseLease(fsn, lm, file));
   }
   
   /**
@@ -217,9 +228,9 @@ public class TestNNLeaseRecovery {
 
     mockFileBlocks(1, null, HdfsConstants.BlockUCState.COMMITTED, file, dnd, ps, false);
 
-    fsn.internalReleaseLease(lm, file.toString(), null);
-    assertTrue("FSNamesystem.internalReleaseLease suppose to throw " +
-      "AlreadyBeingCreatedException here", false);
+    releaseLease(fsn, lm, file);
+    fail("FSNamesystem.internalReleaseLease suppose to throw " +
+      "IOException here");
   }
 
   /**
@@ -244,7 +255,7 @@ public class TestNNLeaseRecovery {
       HdfsConstants.BlockUCState.UNDER_CONSTRUCTION, file, dnd, ps, false);
         
     assertFalse("False is expected in return in this case",
-      fsn.internalReleaseLease(lm, file.toString(), null));
+        releaseLease(fsn, lm, file));
   }
 
   @Test