You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2011/01/11 00:19:57 UTC

svn commit: r1057414 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/

Author: hairong
Date: Mon Jan 10 23:19:57 2011
New Revision: 1057414

URL: http://svn.apache.org/viewvc?rev=1057414&view=rev
Log:
HDFS-1536. Improve HDFS WebUI. Contributed by Hairong Kuang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Mon Jan 10 23:19:57 2011
@@ -7,6 +7,8 @@ Trunk (unreleased changes)
     HDFS-1526. Dfs client name for a map/reduce task should be unique
     among threads. (hairong)
 
+    HDFS-1536. Improve HDFS WebUI. (hairong)
+
   NEW FEATURES
 
     HDFS-1482. Add listCorruptFileBlocks to DistributedFileSystem.

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Mon Jan 10 23:19:57 2011
@@ -118,8 +118,6 @@ public class BlockManager {
    * Last block index used for replication work.
    */
   private int replIndex = 0;
-  private long missingBlocksInCurIter = 0;
-  private long missingBlocksInPrevIter = 0;
   Random r = new Random();
 
   // for block replicas placement
@@ -668,6 +666,11 @@ public class BlockManager {
     corruptReplicaBlocksCount = corruptReplicas.size();
   }
 
+  /** Return number of under-replicated but not missing blocks */
+  int getUnderReplicatedNotMissingBlocks() {
+    return neededReplications.getUnderReplicatedBlockCount();
+  }
+  
   /**
    * Schedule blocks for deletion at datanodes
    * @param nodesToProcess number of datanodes to schedule deletion work
@@ -749,8 +752,6 @@ public class BlockManager {
     try {
       synchronized (neededReplications) {
         if (neededReplications.size() == 0) {
-          missingBlocksInCurIter = 0;
-          missingBlocksInPrevIter = 0;
           return blocksToReplicate;
         }
 
@@ -769,8 +770,6 @@ public class BlockManager {
           if (!neededReplicationsIterator.hasNext()) {
             // start from the beginning
             replIndex = 0;
-            missingBlocksInPrevIter = missingBlocksInCurIter;
-            missingBlocksInCurIter = 0;
             blocksToProcess = Math.min(blocksToProcess, neededReplications
                 .size());
             if (blkCnt >= blocksToProcess)
@@ -827,10 +826,6 @@ public class BlockManager {
         containingNodes = new ArrayList<DatanodeDescriptor>();
         NumberReplicas numReplicas = new NumberReplicas();
         srcNode = chooseSourceDatanode(block, containingNodes, numReplicas);
-        if ((numReplicas.liveReplicas() + numReplicas.decommissionedReplicas())
-            <= 0) {
-          missingBlocksInCurIter++;
-        }
         if(srcNode == null) // block can not be replicated from any node
           return false;
 
@@ -1724,7 +1719,7 @@ public class BlockManager {
   
   long getMissingBlocksCount() {
     // not locking
-    return Math.max(missingBlocksInPrevIter, missingBlocksInCurIter);
+    return this.neededReplications.getCorruptBlockSize();
   }
 
   BlockInfo addINode(BlockInfo block, INodeFile iNode) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Jan 10 23:19:57 2011
@@ -4488,6 +4488,11 @@ public class FSNamesystem implements FSC
     return blockManager.underReplicatedBlocksCount;
   }
 
+  /** Return number of under-replicated but not missing blocks */
+  public long getUnderReplicatedNotMissingBlocks() {
+    return blockManager.getUnderReplicatedNotMissingBlocks();
+  }
+
   /** Returns number of blocks with corrupt replicas */
   public long getCorruptReplicaBlocks() {
     return blockManager.corruptReplicaBlocksCount;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Mon Jan 10 23:19:57 2011
@@ -137,7 +137,7 @@ class NamenodeJspHelper {
     // Ideally this should be displayed in RED
     long missingBlocks = fsn.getMissingBlocksCount();
     if (missingBlocks > 0) {
-      return "<br> WARNING :" + " There are about " + missingBlocks
+      return "<br> WARNING :" + " There are " + missingBlocks
           + " missing blocks. Please check the log or run fsck. <br><br>";
     }
     return "";
@@ -159,6 +159,10 @@ class NamenodeJspHelper {
       return "<td id=\"col" + ++colNum + "\"> ";
     }
 
+    private String colTxt(String title) {
+      return "<td id=\"col" + ++colNum + "\" title=\"" + title + "\"> ";
+    }
+
     private void counterReset() {
       colNum = 0;
       rowNum = 0;
@@ -294,9 +298,9 @@ class NamenodeJspHelper {
           + "<a href=\"dfsnodelist.jsp?whatNodes=DECOMMISSIONING\">"
           + "Decommissioning Nodes</a> "
           + colTxt() + ":" + colTxt() + decommissioning.size() 
-          + rowTxt() + colTxt()
+          + rowTxt() + colTxt("Excludes missing blocks.")
           + "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
-          + fsn.getUnderReplicatedBlocks()
+          + fsn.getUnderReplicatedNotMissingBlocks()
           + "</table></div><br>\n");
 
       if (live.isEmpty() && dead.isEmpty()) {

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java Mon Jan 10 23:19:57 2011
@@ -26,8 +26,8 @@ import org.apache.hadoop.hdfs.protocol.B
  * Blocks have only one replicas has the highest
  */
 class UnderReplicatedBlocks implements Iterable<Block> {
-  static final int LEVEL = 4;
-  static public final int QUEUE_WITH_CORRUPT_BLOCKS = 2;
+  static final int LEVEL = 5;
+  static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
   private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
       
   /* constructor */
@@ -55,6 +55,20 @@ class UnderReplicatedBlocks implements I
     return size;
   }
 
+  /* Return the number of under replication blocks excluding corrupt blocks */
+  synchronized int getUnderReplicatedBlockCount() {
+    int size = 0;
+    for (int i=0; i<QUEUE_WITH_CORRUPT_BLOCKS; i++) {
+      size += priorityQueues.get(i).size();
+    }
+    return size;
+  }
+  
+  /** Return the number of corrupt blocks */
+  synchronized int getCorruptBlockSize() {
+    return priorityQueues.get(QUEUE_WITH_CORRUPT_BLOCKS).size();
+  }
+  
   /* Check if a block is in the neededReplication queue */
   synchronized boolean contains(Block block) {
     for(TreeSet<Block> set:priorityQueues) {

Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1057414&r1=1057413&r2=1057414&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Mon Jan 10 23:19:57 2011
@@ -49,6 +49,7 @@ public class TestMissingBlocksAlert exte
       //minimize test delay
       conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 0);
       int fileLen = 10*1024;
+      conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, fileLen/2);
 
       //start a cluster with single datanode
       cluster = new MiniDFSCluster.Builder(conf).build();
@@ -83,13 +84,16 @@ public class TestMissingBlocksAlert exte
         Thread.sleep(100);
       }
       assertTrue(dfs.getMissingBlocksCount() == 1);
+      assertEquals(4, dfs.getUnderReplicatedBlocksCount());
+      assertEquals(3, 
+          cluster.getNamesystem().getUnderReplicatedNotMissingBlocks());
 
 
       // Now verify that it shows up on webui
       URL url = new URL("http://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY) + 
                         "/dfshealth.jsp");
       String dfsFrontPage = DFSTestUtil.urlGet(url);
-      String warnStr = "WARNING : There are about ";
+      String warnStr = "WARNING : There are ";
       assertTrue("HDFS Front page does not contain expected warning", 
                  dfsFrontPage.contains(warnStr + "1 missing blocks"));
 
@@ -103,6 +107,10 @@ public class TestMissingBlocksAlert exte
         Thread.sleep(100);
       }
 
+      assertEquals(2, dfs.getUnderReplicatedBlocksCount());
+      assertEquals(2, 
+          cluster.getNamesystem().getUnderReplicatedNotMissingBlocks());
+
       // and make sure WARNING disappears
       // Now verify that it shows up on webui
       dfsFrontPage = DFSTestUtil.urlGet(url);