You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ha...@apache.org on 2008/11/14 01:57:30 UTC

svn commit: r713890 - in /hadoop/core/branches/branch-0.19: ./ src/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/test/org/apache/hadoop/hdfs/ src/test/org/apache/hadoop/hdfs/server/namenode/

Author: hairong
Date: Thu Nov 13 16:57:29 2008
New Revision: 713890

URL: http://svn.apache.org/viewvc?rev=713890&view=rev
Log:
Merge -r 713887:713888 from trunk to main to move the change of HADOOP-4643 into branch 0.19

Added:
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
      - copied unchanged from r713888, hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
Modified:
    hadoop/core/branches/branch-0.19/   (props changed)
    hadoop/core/branches/branch-0.19/CHANGES.txt   (contents, props changed)
    hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
    hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

Propchange: hadoop/core/branches/branch-0.19/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Nov 13 16:57:29 2008
@@ -1 +1 @@
-/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881
+/hadoop/core/trunk:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,709040,709303,712881,713888

Modified: hadoop/core/branches/branch-0.19/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/CHANGES.txt?rev=713890&r1=713889&r2=713890&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/CHANGES.txt (original)
+++ hadoop/core/branches/branch-0.19/CHANGES.txt Thu Nov 13 16:57:29 2008
@@ -1001,6 +1001,9 @@
 
     HADOOP-4556. Block went missing. (hairong)
 
+    HADOOP-4643. NameNode should exclude excessive replicas when counting
+    live replicas for a block. (hairong)
+
 Release 0.18.2 - 2008-11-03
 
   BUG FIXES

Propchange: hadoop/core/branches/branch-0.19/CHANGES.txt
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Thu Nov 13 16:57:29 2008
@@ -1 +1 @@
-/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881
+/hadoop/core/trunk/CHANGES.txt:697306,698176,699056,699098,699415,699424,699444,699490,699517,700163,700628,700923,701273,701398,703923,704203,704261,704701,704703,704707,704712,704732,704748,704989,705391,705420,705430,705762,706350,706707,706719,706796,706802,707258,707262,708623,708641,708710,708723,709040,709303,711717,712881,713888

Modified: hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=713890&r1=713889&r2=713890&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/core/branches/branch-0.19/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Nov 13 16:57:29 2008
@@ -184,7 +184,7 @@
   // eventually remove these extras.
   // Mapping: StorageID -> TreeSet<Block>
   //
-  private Map<String, Collection<Block>> excessReplicateMap = 
+  Map<String, Collection<Block>> excessReplicateMap = 
     new TreeMap<String, Collection<Block>>();
 
   //
@@ -2428,7 +2428,7 @@
           replIndex--;
           NameNode.stateChangeLog.info("BLOCK* "
               + "Removing block " + block
-              + " from neededReplications as it does not belong to any file.");
+              + " from neededReplications as it has enough replicas.");
           continue;
         }
 
@@ -2502,26 +2502,30 @@
     int live = 0;
     int decommissioned = 0;
     int corrupt = 0;
+    int excess = 0;
     Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
+    Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(block);
     while(it.hasNext()) {
       DatanodeDescriptor node = it.next();
-      Collection<DatanodeDescriptor> nodes = corruptReplicas.getNodes(block);
-      if ((nodes != null) && (nodes.contains(node)))
+      Collection<Block> excessBlocks = 
+        excessReplicateMap.get(node.getStorageID());
+      if ((nodesCorrupt != null) && (nodesCorrupt.contains(node)))
         corrupt++;
-      else if(!node.isDecommissionInProgress() && !node.isDecommissioned())
-        live++;
-      else
+      else if (node.isDecommissionInProgress() || node.isDecommissioned())
         decommissioned++;
+      else if (excessBlocks != null && excessBlocks.contains(block)) {
+        excess++;
+      } else {
+        live++;
+      }
       containingNodes.add(node);
       // Check if this replica is corrupt
       // If so, do not select the node as src node
-      if ((nodes != null) && nodes.contains(node))
+      if ((nodesCorrupt != null) && nodesCorrupt.contains(node))
         continue;
       if(node.getNumberOfBlocksToBeReplicated() >= maxReplicationStreams)
         continue; // already reached replication limit
       // the block must not be scheduled for removal on srcNode
-      Collection<Block> excessBlocks = 
-        excessReplicateMap.get(node.getStorageID());
       if(excessBlocks != null && excessBlocks.contains(block))
         continue;
       // never use already decommissioned nodes
@@ -2541,7 +2545,7 @@
         srcNode = node;
     }
     if(numReplicas != null)
-      numReplicas.initialize(live, decommissioned, corrupt);
+      numReplicas.initialize(live, decommissioned, corrupt, excess);
     return srcNode;
   }
 
@@ -3511,23 +3515,25 @@
    * A immutable object that stores the number of live replicas and
    * the number of decommissined Replicas.
    */
-  private static class NumberReplicas {
+  static class NumberReplicas {
     private int liveReplicas;
     private int decommissionedReplicas;
     private int corruptReplicas;
+    private int excessReplicas;
 
     NumberReplicas() {
-      initialize(0, 0, 0);
+      initialize(0, 0, 0, 0);
     }
 
-    NumberReplicas(int live, int decommissioned, int corrupt) {
-      initialize(live, decommissioned, corrupt);
+    NumberReplicas(int live, int decommissioned, int corrupt, int excess) {
+      initialize(live, decommissioned, corrupt, excess);
     }
 
-    void initialize(int live, int decommissioned, int corrupt) {
+    void initialize(int live, int decommissioned, int corrupt, int excess) {
       liveReplicas = live;
       decommissionedReplicas = decommissioned;
       corruptReplicas = corrupt;
+      excessReplicas = excess;
     }
 
     int liveReplicas() {
@@ -3539,6 +3545,9 @@
     int corruptReplicas() {
       return corruptReplicas;
     }
+    int excessReplicas() {
+      return excessReplicas;
+    }
   } 
 
   /**
@@ -3550,6 +3559,7 @@
     int count = 0;
     int live = 0;
     int corrupt = 0;
+    int excess = 0;
     Collection<DatanodeDescriptor> nodesCorrupt = corruptReplicas.getNodes(b);
     while ( nodeIter.hasNext() ) {
       DatanodeDescriptor node = nodeIter.next();
@@ -3559,17 +3569,23 @@
       else if (node.isDecommissionInProgress() || node.isDecommissioned()) {
         count++;
       }
-      else {
-        live++;
+      else  {
+        Collection<Block> blocksExcess = 
+          excessReplicateMap.get(node.getStorageID());
+        if (blocksExcess != null && blocksExcess.contains(b)) {
+          excess++;
+        } else {
+          live++;
+        }
       }
     }
-    return new NumberReplicas(live, count, corrupt);
+    return new NumberReplicas(live, count, corrupt, excess);
   }
 
   /**
    * Return the number of nodes that are live and decommissioned.
    */
-  private NumberReplicas countNodes(Block b) {
+  NumberReplicas countNodes(Block b) {
     return countNodes(b, blocksMap.nodeIterator(b));
   }
 

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=713890&r1=713889&r2=713890&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu Nov 13 16:57:29 2008
@@ -51,7 +51,7 @@
  */
 public class MiniDFSCluster {
 
-  private class DataNodeProperties {
+  public class DataNodeProperties {
     DataNode datanode;
     Configuration conf;
     String[] dnArgs;
@@ -602,50 +602,55 @@
   /*
    * Shutdown a particular datanode
    */
-  boolean stopDataNode(int i) {
+  DataNodeProperties stopDataNode(int i) {
     if (i < 0 || i >= dataNodes.size()) {
-      return false;
+      return null;
     }
-    DataNode dn = dataNodes.remove(i).datanode;
+    DataNodeProperties dnprop = dataNodes.remove(i);
+    DataNode dn = dnprop.datanode;
     System.out.println("MiniDFSCluster Stopping DataNode " + 
                        dn.dnRegistration.getName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
     dn.shutdown();
     numDataNodes--;
-    return true;
+    return dnprop;
   }
 
-  /*
-   * Restart a particular datanode
+  /**
+   * Restart a datanode
+   * @param dnprop datanode's property
+   * @return true if restarting is successful
+   * @throws IOException
    */
-  synchronized boolean restartDataNode(int i) throws IOException {
-    if (i < 0 || i >= dataNodes.size()) {
-      return false;
-    }
-    DataNodeProperties dnprop = dataNodes.remove(i);
-    DataNode dn = dnprop.datanode;
+  public synchronized boolean restartDataNode(DataNodeProperties dnprop)
+  throws IOException {
     Configuration conf = dnprop.conf;
     String[] args = dnprop.dnArgs;
-    System.out.println("MiniDFSCluster Restart DataNode " + 
-                       dn.dnRegistration.getName() +
-                       " from a total of " + (dataNodes.size() + 1) + 
-                       " datanodes.");
-    dn.shutdown();
-
-    // recreate new datanode with the same configuration as the one
-    // that was stopped.
     Configuration newconf = new Configuration(conf); // save cloned config
     dataNodes.add(new DataNodeProperties(
                      DataNode.createDataNode(args, conf), 
                      newconf, args));
+    numDataNodes++;
     return true;
+
+  }
+  /*
+   * Restart a particular datanode
+   */
+  synchronized boolean restartDataNode(int i) throws IOException {
+    DataNodeProperties dnprop = stopDataNode(i);
+    if (dnprop == null) {
+      return false;
+    } else {
+      return restartDataNode(dnprop);
+    }
   }
 
   /*
    * Shutdown a datanode by name.
    */
-  synchronized boolean stopDataNode(String name) {
+  public synchronized DataNodeProperties stopDataNode(String name) {
     int i;
     for (i = 0; i < dataNodes.size(); i++) {
       DataNode dn = dataNodes.get(i).datanode;

Modified: hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=713890&r1=713889&r2=713890&view=diff
==============================================================================
--- hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/core/branches/branch-0.19/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Thu Nov 13 16:57:29 2008
@@ -367,7 +367,7 @@
     // corruptReplicasMap
     corruptReplicaSize = cluster.getNameNode().namesystem.
                           corruptReplicas.numCorruptReplicas(blk);
-    while (corruptReplicaSize != 0) {
+    while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
       try {
         LOG.info("Looping until corrupt replica is invalidated");
         Thread.sleep(1000);