You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/04/27 00:44:35 UTC

svn commit: r1331138 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

Author: szetszwo
Date: Thu Apr 26 22:44:34 2012
New Revision: 1331138

URL: http://svn.apache.org/viewvc?rev=1331138&view=rev
Log:
HDFS-3181. Fix a test case in TestLeaseRecovery2.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1331138&r1=1331137&r2=1331138&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Apr 26 22:44:34 2012
@@ -565,6 +565,8 @@ Release 2.0.0 - UNRELEASED 
     HDFS-3319. Change DFSOutputStream to not to start a thread in constructors.
     (szetszwo)
 
+    HDFS-3181. Fix a test case in TestLeaseRecovery2.  (szetszwo)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1331138&r1=1331137&r2=1331138&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Thu Apr 26 22:44:34 2012
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.FSDataOutput
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -49,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.log4j.Level;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -90,7 +92,7 @@ public class TestLeaseRecovery2 {
 
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster.waitActive();
-    dfs = (DistributedFileSystem)cluster.getFileSystem();
+    dfs = cluster.getFileSystem();
   }
   
   /**
@@ -406,17 +408,26 @@ public class TestLeaseRecovery2 {
    */
   @Test
   public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
-    hardLeaseRecoveryRestartHelper(false);
+    hardLeaseRecoveryRestartHelper(false, -1);
   }
-  
+
+  @Test
+  public void testHardLeaseRecoveryAfterNameNodeRestart2() throws Exception {
+    hardLeaseRecoveryRestartHelper(false, 1535);
+  }
+
   @Test
   public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
       throws Exception {
-    hardLeaseRecoveryRestartHelper(true);
+    hardLeaseRecoveryRestartHelper(true, -1);
   }
   
-  public void hardLeaseRecoveryRestartHelper(boolean doRename)
+  public void hardLeaseRecoveryRestartHelper(boolean doRename, int size)
       throws Exception {
+    if (size < 0) {
+      size =  AppendTestUtil.nextInt(FILE_SIZE + 1);
+    }
+
     //create a file
     String fileStr = "/hardLeaseRecovery";
     AppendTestUtil.LOG.info("filestr=" + fileStr);
@@ -426,7 +437,6 @@ public class TestLeaseRecovery2 {
     assertTrue(dfs.dfs.exists(fileStr));
 
     // write bytes into the file.
-    int size = AppendTestUtil.nextInt(FILE_SIZE);
     AppendTestUtil.LOG.info("size=" + size);
     stm.write(buffer, 0, size);
     
@@ -440,6 +450,11 @@ public class TestLeaseRecovery2 {
     AppendTestUtil.LOG.info("hflush");
     stm.hflush();
     
+    // check visible length
+    final HdfsDataInputStream in = (HdfsDataInputStream)dfs.open(filePath);
+    Assert.assertEquals(size, in.getVisibleLength());
+    in.close();
+    
     if (doRename) {
       fileStr += ".renamed";
       Path renamedPath = new Path(fileStr);
@@ -463,14 +478,11 @@ public class TestLeaseRecovery2 {
     // Make sure lease recovery begins.
     Thread.sleep(HdfsServerConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
     
-    assertEquals("lease holder should now be the NN", HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     cluster.restartNameNode(false);
     
-    assertEquals("lease holder should still be the NN after restart",
-        HdfsServerConstants.NAMENODE_LEASE_HOLDER,
-        NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+    checkLease(fileStr, size);
     
     // Let the DNs send heartbeats again.
     for (DataNode dn : cluster.getDataNodes()) {
@@ -492,12 +504,12 @@ public class TestLeaseRecovery2 {
     assertEquals(size, locatedBlocks.getFileLength());
 
     // make sure that the client can't write data anymore.
-    stm.write('b');
     try {
+      stm.write('b');
       stm.hflush();
       fail("Should not be able to flush after we've lost the lease");
     } catch (IOException e) {
-      LOG.info("Expceted exception on hflush", e);
+      LOG.info("Expceted exception on write/hflush", e);
     }
     
     try {
@@ -512,4 +524,16 @@ public class TestLeaseRecovery2 {
         "File size is good. Now validating sizes from datanodes...");
     AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
   }
+  
+  static void checkLease(String f, int size) {
+    final String holder = NameNodeAdapter.getLeaseHolderForPath(
+        cluster.getNameNode(), f); 
+    if (size == 0) {
+      assertEquals("lease holder should null, file is closed", null, holder);
+    } else {
+      assertEquals("lease holder should now be the NN",
+          HdfsServerConstants.NAMENODE_LEASE_HOLDER, holder);
+    }
+    
+  }
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1331138&r1=1331137&r2=1331138&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Thu Apr 26 22:44:34 2012
@@ -126,7 +126,8 @@ public class NameNodeAdapter {
   }
 
   public static String getLeaseHolderForPath(NameNode namenode, String path) {
-    return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
+    Lease l = namenode.getNamesystem().leaseManager.getLeaseByPath(path);
+    return l == null? null: l.getHolder();
   }
 
   /**