You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2008/11/11 01:07:39 UTC

svn commit: r712881 - in /hadoop/core/trunk: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Author: hairong
Date: Mon Nov 10 16:07:38 2008
New Revision: 712881

URL: http://svn.apache.org/viewvc?rev=712881&view=rev
Log:
HADOOP-4556. Block went missing. Contributed by Hairong Kuang.

Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=712881&r1=712880&r2=712881&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Nov 10 16:07:38 2008
@@ -1098,6 +1098,8 @@
     HADOOP-3883. Limit namenode to assign at most one generation stamp for
     a particular block within a short period. (szetszwo)
 
+    HADOOP-4556. Block went missing. (hairong)
+
 Release 0.18.2 - 2008-11-03
 
   BUG FIXES

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java?rev=712881&r1=712880&r2=712881&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java Mon Nov 10 16:07:38 2008
@@ -211,6 +211,7 @@
     this.dfsUsed = 0;
     this.xceiverCount = 0;
     this.blockList = null;
+    this.invalidateBlocks.clear();
   }
 
   public int numBlocks() {

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=712881&r1=712880&r2=712881&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Nov 10 16:07:38 2008
@@ -947,7 +947,7 @@
       LOG.info("Reducing replication for file " + src 
                + ". New replication is " + replication);
       for(int idx = 0; idx < fileBlocks.length; idx++)
-        proccessOverReplicatedBlock(fileBlocks[idx], replication, null, null);
+        processOverReplicatedBlock(fileBlocks[idx], replication, null, null);
     }
     return true;
   }
@@ -1474,20 +1474,40 @@
   }
 
   /**
+   * Remove a datanode from the invalidatesSet
+   * @param n datanode
+   */
+  private void removeFromInvalidates(DatanodeInfo n) {
+    recentInvalidateSets.remove(n.getStorageID());
+  }
+
+  /**
    * Adds block to list of blocks which will be invalidated on 
-   * specified datanode.
+   * specified datanode and log the move
+   * @param b block
+   * @param n datanode
    */
   private void addToInvalidates(Block b, DatanodeInfo n) {
+    addToInvalidatesNoLog(b, n);
+    NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
+        + b.getBlockName() + " is added to invalidSet of " + n.getName());
+  }
+
+  /**
+   * Adds block to list of blocks which will be invalidated on 
+   * specified datanode
+   * @param b block
+   * @param n datanode
+   */
+  private void addToInvalidatesNoLog(Block b, DatanodeInfo n) {
     Collection<Block> invalidateSet = recentInvalidateSets.get(n.getStorageID());
     if (invalidateSet == null) {
       invalidateSet = new HashSet<Block>();
       recentInvalidateSets.put(n.getStorageID(), invalidateSet);
     }
     invalidateSet.add(b);
-    NameNode.stateChangeLog.info("BLOCK* NameSystem.delete: "
-        + b.getBlockName() + " is added to invalidSet of " + n.getName());
   }
-
+  
   /**
    * Adds block to list of blocks which will be invalidated on 
    * all its datanodes.
@@ -2639,6 +2659,7 @@
 
   void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
     nodeDescr.resetBlocks();
+    removeFromInvalidates(nodeDescr);
     NameNode.stateChangeLog.debug(
                                   "BLOCK* NameSystem.unprotectedRemoveDatanode: "
                                   + nodeDescr.getName() + " is out of service now.");
@@ -2929,7 +2950,7 @@
       updateNeededReplications(block, curReplicaDelta, 0);
     }
     if (numCurrentReplica > fileReplication) {
-      proccessOverReplicatedBlock(block, fileReplication, node, delNodeHint);
+      processOverReplicatedBlock(block, fileReplication, node, delNodeHint);
     }
     // If the file replication has reached desired value
     // we can remove any corrupt replicas the block may have
@@ -3010,7 +3031,7 @@
       if (numCurrentReplica > expectedReplication) {
         // over-replicated block
         nrOverReplicated++;
-        proccessOverReplicatedBlock(block, expectedReplication, null, null);
+        processOverReplicatedBlock(block, expectedReplication, null, null);
       }
     }
     LOG.info("Total number of blocks = " + blocksMap.size());
@@ -3024,7 +3045,7 @@
    * If there are any extras, call chooseExcessReplicates() to
    * mark them in the excessReplicateMap.
    */
-  private void proccessOverReplicatedBlock(Block block, short replication, 
+  private void processOverReplicatedBlock(Block block, short replication, 
       DatanodeDescriptor addedNode, DatanodeDescriptor delNodeHint) {
     if(addedNode == delNodeHint) {
       delNodeHint = null;
@@ -3156,14 +3177,9 @@
       // should be deleted.  Items are removed from the invalidate list
       // upon giving instructions to the namenode.
       //
-      Collection<Block> invalidateSet = recentInvalidateSets.get(cur.getStorageID());
-      if (invalidateSet == null) {
-        invalidateSet = new ArrayList<Block>();
-        recentInvalidateSets.put(cur.getStorageID(), invalidateSet);
-      }
-      invalidateSet.add(b);
-      NameNode.stateChangeLog.debug("BLOCK* NameSystem.chooseExcessReplicates: "
-                                    +"("+cur.getName()+", "+b+") is added to recentInvalidateSets");
+      addToInvalidatesNoLog(b, cur);
+      NameNode.stateChangeLog.info("BLOCK* NameSystem.chooseExcessReplicates: "
+                +"("+cur.getName()+", "+b+") is added to recentInvalidateSets");
     }
   }
 
@@ -3205,6 +3221,7 @@
         excessReplicateMap.remove(node.getStorageID());
       }
     }
+    
     // Remove the replica from corruptReplicas
     corruptReplicas.removeFromCorruptReplicasMap(block, node);
   }