You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2009/10/17 02:39:44 UTC

svn commit: r826149 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: hairong
Date: Sat Oct 17 00:39:44 2009
New Revision: 826149

URL: http://svn.apache.org/viewvc?rev=826149&view=rev
Log:
HDFS-668. TestFileAppend3#TC7 sometimes hangs. Constributed by Hairong Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Sat Oct 17 00:39:44 2009
@@ -414,6 +414,8 @@
 
     HDFS-682. Fix bugs in TestBlockUnderConstruction.  (szetszwo)
 
+    HDFS-668. TestFileAppend3#TC7 sometimes hangs. (hairong)
+
 Release 0.20.2 - Unreleased
 
   BUG FIXES

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Sat Oct 17 00:39:44 2009
@@ -1102,7 +1102,7 @@
     // construct a RBW replica with the new GS
     File blkfile = replicaInfo.getBlockFile();
     FSVolume v = volumes.getNextVolume(estimateBlockLen);
-    File newBlkFile = v.createRbwFile(replicaInfo);
+    File newBlkFile = new File(v.rbwDir, replicaInfo.getBlockName());
     File oldmeta = replicaInfo.getMetaFile();
     ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten(
         replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS,

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Sat Oct 17 00:39:44 2009
@@ -1000,7 +1000,7 @@
                                DatanodeDescriptor node,
                                DatanodeDescriptor delNodeHint)
   throws IOException {
-    BlockInfo storedBlock = blocksMap.getStoredBlock(block);
+    BlockInfo storedBlock = findStoredBlock(block.getBlockId());
     if (storedBlock == null || storedBlock.getINode() == null) {
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
@@ -1668,6 +1668,18 @@
     blocksMap.removeBlock(block);
   }
   
+  /**
+   * Update the block with the new generation stamp and new length.
+   * 
+   * @param block block
+   * @param newGS new generation stamp
+   * @param newLen new block size
+   * @return the stored block in the blocks map
+   */
+  BlockInfo updateBlock(Block block, long newGS, long newLen) {
+    return blocksMap.updateBlock(block, newGS, newLen);
+  }
+  
   int getCapacity() {
     synchronized(namesystem) {
       return blocksMap.getCapacity();

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java Sat Oct 17 00:39:44 2009
@@ -86,6 +86,25 @@
   }
 
   /**
+   * Update the old block with the new generation stamp and new length.
+   * 
+   * After update, the block has a newer generation stamp so it requires 
+   * to remove the old entry first and reinsert the entry
+   * 
+   * @param block block
+   * @param newGS new generation stamp
+   * @param newLen new block size
+   * @return the stored block in the map
+   */
+  BlockInfo updateBlock(Block block, long newGS, long newLen) {
+    BlockInfo blockInfo = map.remove(block);
+    blockInfo.setGenerationStamp(newGS);
+    blockInfo.setNumBytes(newLen);
+    map.put(blockInfo, blockInfo);
+    return blockInfo;
+  }
+  
+  /**
    * Remove the block from the block map;
    * remove it from all data-node lists it belongs to;
    * and remove all data-node locations associated with the block.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Oct 17 00:39:44 2009
@@ -1399,6 +1399,9 @@
       //
       for (BlockInfo block: v.getBlocks()) {
         if (!blockManager.checkMinReplication(block)) {
+          LOG.info("BLOCK* NameSystem.checkFileProgress: "
+              + "block " + block + " has not reached minimal replication "
+              + blockManager.minReplication);
           return false;
         }
       }
@@ -1408,6 +1411,9 @@
       //
       BlockInfo b = v.getPenultimateBlock();
       if (b != null && !blockManager.checkMinReplication(b)) {
+        LOG.info("BLOCK* NameSystem.checkFileProgress: "
+            + "block " + b + " has not reached minimal replication "
+            + blockManager.minReplication);
         return false;
       }
     }
@@ -3942,6 +3948,8 @@
   synchronized void updatePipeline(String clientName, Block oldBlock, 
       Block newBlock, DatanodeID[] newNodes)
       throws IOException {
+    assert newBlock.getBlockId()==oldBlock.getBlockId() : newBlock + " and "
+    + oldBlock + " has different block identifier";
     LOG.info("updatePipeline(block=" + oldBlock
         + ", newGenerationStamp=" + newBlock.getGenerationStamp()
         + ", newLength=" + newBlock.getNumBytes()
@@ -3952,27 +3960,24 @@
     // check the vadility of the block and lease holder name
     final INodeFileUnderConstruction pendingFile = 
       checkUCBlock(oldBlock, clientName);
-    final BlockInfo oldblockinfo = pendingFile.getLastBlock();
+    final BlockInfoUnderConstruction blockinfo = pendingFile.getLastBlock();
 
     // check new GS & length: this is not expected
-    if (newBlock.getGenerationStamp() <= oldblockinfo.getGenerationStamp() ||
-        newBlock.getNumBytes() < oldblockinfo.getNumBytes()) {
+    if (newBlock.getGenerationStamp() <= blockinfo.getGenerationStamp() ||
+        newBlock.getNumBytes() < blockinfo.getNumBytes()) {
       String msg = "Update " + oldBlock + " (len = " + 
-      oldblockinfo.getNumBytes() + ") to an older state: " + newBlock + 
+      blockinfo.getNumBytes() + ") to an older state: " + newBlock + 
       " (len = " + newBlock.getNumBytes() +")";
       LOG.warn(msg);
       throw new IOException(msg);
     }
     
-    // Remove old block from blocks map. This always have to be done
+    // Remove old block from the raw map in blocks map. 
+    // This does not change any other value of the oldblockinfo
+    // This always have to be done
     // because the generation stamp of this block is changing.
-    blockManager.removeBlockFromMap(oldblockinfo);
-
-    // update last block, construct newblockinfo and add it to the blocks map
-    BlockInfoUnderConstruction newblockinfo = 
-      new BlockInfoUnderConstruction(
-          newBlock, pendingFile.getReplication());
-    blockManager.addINode(newblockinfo, pendingFile);
+    blockManager.updateBlock(oldBlock, 
+        newBlock.getGenerationStamp(), newBlock.getNumBytes());
 
     // find the DatanodeDescriptor objects
     DatanodeDescriptor[] descriptors = null;
@@ -3982,8 +3987,7 @@
         descriptors[i] = getDatanode(newNodes[i]);
       }
     }
-    // add locations into the INodeUnderConstruction
-    pendingFile.setLastBlock(newblockinfo, descriptors);
+    blockinfo.setExpectedLocations(descriptors);
 
     // persist blocks only if append is supported
     String src = leaseManager.findPath(pendingFile);

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=826149&r1=826148&r2=826149&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java Sat Oct 17 00:39:44 2009
@@ -24,6 +24,7 @@
 import junit.framework.Test;
 import junit.framework.TestSuite;
 
+import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
@@ -33,9 +34,23 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.log4j.Level;
 
 /** This class implements some of tests posted in HADOOP-2658. */
 public class TestFileAppend3 extends junit.framework.TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
+  }
+
   static final long BLOCK_SIZE = 64 * 1024;
   static final short REPLICATION = 3;
   static final int DATANODE_NUM = 5;