You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2009/08/28 19:23:39 UTC

svn commit: r808962 - in /hadoop/hdfs/trunk: ./ src/java/org/apache/hadoop/hdfs/server/datanode/ src/java/org/apache/hadoop/hdfs/server/namenode/ src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/ src/webapps/hdfs/

Author: szetszwo
Date: Fri Aug 28 17:23:38 2009
New Revision: 808962

URL: http://svn.apache.org/viewvc?rev=808962&view=rev
Log:
HDFS-492. Add two JSON JSP pages to the Namenode for providing corrupt blocks/replicas information.  Contributed by Bill Zeller

Added:
    hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java
    hadoop/hdfs/trunk/src/webapps/hdfs/block_info_xml.jsp
    hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_replicas_xml.jsp
Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Fri Aug 28 17:23:38 2009
@@ -19,6 +19,9 @@
 
     HDFS-461. Tool to analyze file size distribution in HDFS. (shv)
 
+    HDFS-492. Add two JSON JSP pages to the Namenode for providing corrupt
+    blocks/replicas information.  (Bill Zeller via szetszwo)
+
   IMPROVEMENTS
 
     HDFS-381. Remove blocks from DataNode maps when corresponding file

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java Fri Aug 28 17:23:38 2009
@@ -258,6 +258,10 @@
     out.print("<B>Total number of blocks: " + blocks.size() + "</B><br>");
     // generate a table and dump the info
     out.println("\n<table>");
+    
+    String namenodeHost = datanode.getNameNodeAddr().getHostName();
+    String namenodeHostName = InetAddress.getByName(namenodeHost).getCanonicalHostName();
+    
     for (LocatedBlock cur : blocks) {
       out.print("<tr>");
       final String blockidstring = Long.toString(cur.getBlock().getBlockId());
@@ -277,14 +281,18 @@
             + "&genstamp=" + cur.getBlock().getGenerationStamp()
             + "&namenodeInfoPort=" + namenodeInfoPort
             + "&chunkSizeToView=" + chunkSizeToView;
+
+        String blockInfoUrl = "http://" + namenodeHostName + ":"
+            + namenodeInfoPort
+            + "/block_info_xml.jsp?blockId=" + blockidstring;
         out.print("<td>&nbsp</td><td><a href=\"" + blockUrl + "\">"
-            + datanodeAddr + "</a></td>");
+            + datanodeAddr + "</a></td><td>"
+            + "<a href=\"" + blockInfoUrl + "\">View Block Info</a></td>");
       }
       out.println("</tr>");
     }
     out.println("</table>");
     out.print("<hr>");
-    String namenodeHost = datanode.getNameNodeAddr().getHostName();
     out.print("<br><a href=\"http://"
         + InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":"
         + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
@@ -577,4 +585,4 @@
     out.print("</textarea>");
     dfs.close();
   }
-}
\ No newline at end of file
+}

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java Fri Aug 28 17:23:38 2009
@@ -1419,4 +1419,26 @@
   float getLoadFactor() {
     return blocksMap.getLoadFactor();
   }
+  
+  
+  /**
+   * Return a range of corrupt replica block ids. Up to numExpectedBlocks 
+   * blocks starting at the next block after startingBlockId are returned
+   * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId 
+   * is null, up to numExpectedBlocks blocks are returned from the beginning.
+   * If startingBlockId cannot be found, null is returned.
+   *
+   * @param numExpectedBlocks Number of block ids to return.
+   *  0 <= numExpectedBlocks <= 100
+   * @param startingBlockId Block id from which to start. If null, start at
+   *  beginning.
+   * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
+   *
+   */
+  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+                                   Long startingBlockId) {
+    return corruptReplicas.getCorruptReplicaBlockIds(numExpectedBlocks,
+                                                     startingBlockId);
+  }  
+  
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java Fri Aug 28 17:23:38 2009
@@ -33,7 +33,7 @@
 
 public class CorruptReplicasMap{
 
-  private Map<Block, Collection<DatanodeDescriptor>> corruptReplicasMap =
+  private SortedMap<Block, Collection<DatanodeDescriptor>> corruptReplicasMap =
     new TreeMap<Block, Collection<DatanodeDescriptor>>();
   
   /**
@@ -126,4 +126,59 @@
   public int size() {
     return corruptReplicasMap.size();
   }
+
+  /**
+   * Return a range of corrupt replica block ids. Up to numExpectedBlocks 
+   * blocks starting at the next block after startingBlockId are returned
+   * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId 
+   * is null, up to numExpectedBlocks blocks are returned from the beginning.
+   * If startingBlockId cannot be found, null is returned.
+   *
+   * @param numExpectedBlocks Number of block ids to return.
+   *  0 <= numExpectedBlocks <= 100
+   * @param startingBlockId Block id from which to start. If null, start at
+   *  beginning.
+   * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
+   *
+   */
+  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+                                   Long startingBlockId) {
+    if (numExpectedBlocks < 0 || numExpectedBlocks > 100) {
+      return null;
+    }
+    
+    Iterator<Block> blockIt = corruptReplicasMap.keySet().iterator();
+    
+    // if the starting block id was specified, iterate over keys until
+    // we find the matching block. If we find a matching block, break
+    // to leave the iterator on the next block after the specified block. 
+    if (startingBlockId != null) {
+      boolean isBlockFound = false;
+      while (blockIt.hasNext()) {
+        Block b = blockIt.next();
+        if (b.getBlockId() == startingBlockId) {
+          isBlockFound = true;
+          break; 
+        }
+      }
+      
+      if (!isBlockFound) {
+        return null;
+      }
+    }
+
+    ArrayList<Long> corruptReplicaBlockIds = new ArrayList<Long>();
+
+    // append up to numExpectedBlocks blockIds to our list
+    for(int i=0; i<numExpectedBlocks && blockIt.hasNext(); i++) {
+      corruptReplicaBlockIds.add(blockIt.next().getBlockId());
+    }
+    
+    long[] ret = new long[corruptReplicaBlockIds.size()];
+    for(int i=0; i<ret.length; i++) {
+      ret[i] = corruptReplicaBlockIds.get(i);
+    }
+    
+    return ret;
+  }  
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Aug 28 17:23:38 2009
@@ -3720,4 +3720,25 @@
   DatanodeDescriptor getDatanode(String nodeID) {
     return datanodeMap.get(nodeID);
   }
+
+  /**
+   * Return a range of corrupt replica block ids. Up to numExpectedBlocks 
+   * blocks starting at the next block after startingBlockId are returned
+   * (fewer if numExpectedBlocks blocks are unavailable). If startingBlockId 
+   * is null, up to numExpectedBlocks blocks are returned from the beginning.
+   * If startingBlockId cannot be found, null is returned.
+   *
+   * @param numExpectedBlocks Number of block ids to return.
+   *  0 <= numExpectedBlocks <= 100
+   * @param startingBlockId Block id from which to start. If null, start at
+   *  beginning.
+   * @return Up to numExpectedBlocks blocks from startingBlockId if it exists
+   *
+   */
+  long[] getCorruptReplicaBlockIds(int numExpectedBlocks,
+                                   Long startingBlockId) {  
+    return blockManager.getCorruptReplicaBlockIds(numExpectedBlocks,
+                                                  startingBlockId);
+  }
+
 }

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=808962&r1=808961&r2=808962&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Fri Aug 28 17:23:38 2009
@@ -28,6 +28,8 @@
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.jsp.JspWriter;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -38,6 +40,8 @@
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.VersionInfo;
 
+import org.znerd.xmlenc.*;
+
 class NamenodeJspHelper {
   static String getSafeModeText(FSNamesystem fsn) {
     if (!fsn.isInSafeMode())
@@ -449,4 +453,195 @@
       }
     }
   }
+  
+  // utility class used in block_info_xml.jsp
+  static class XMLBlockInfo {
+    final Block block;
+    final INodeFile inode;
+    final FSNamesystem fsn;
+    
+    public XMLBlockInfo(FSNamesystem fsn, Long blockId) {
+      this.fsn = fsn;
+      if (blockId == null) {
+        this.block = null;
+        this.inode = null;
+      } else {
+        this.block = new Block(blockId);
+        this.inode = fsn.blockManager.getINode(block);
+      }
+    }
+
+    private String getLocalParentDir(INode inode) {
+      StringBuilder pathBuf = new StringBuilder();
+      INode node = inode;
+      
+      // loop up to directory root, prepending each directory name to buffer
+      while ((node = node.getParent()) != null && node.getLocalName() != "") {
+        pathBuf.insert(0, '/').insert(0, node.getLocalName());
+      }
+
+      return pathBuf.toString();
+    }
+
+    public void toXML(XMLOutputter doc) throws IOException {
+      doc.startTag("block_info");
+      if (block == null) {
+        doc.startTag("error");
+        doc.pcdata("blockId must be a Long");
+        doc.endTag();
+      }else{
+        doc.startTag("block_id");
+        doc.pcdata(""+block.getBlockId());
+        doc.endTag();
+
+        doc.startTag("block_name");
+        doc.pcdata(block.getBlockName());
+        doc.endTag();
+
+        if (inode != null) {
+          doc.startTag("file");
+
+          doc.startTag("local_name");
+          doc.pcdata(inode.getLocalName());
+          doc.endTag();
+
+          doc.startTag("local_directory");
+          doc.pcdata(getLocalParentDir(inode));
+          doc.endTag();
+
+          doc.startTag("user_name");
+          doc.pcdata(inode.getUserName());
+          doc.endTag();
+
+          doc.startTag("group_name");
+          doc.pcdata(inode.getGroupName());
+          doc.endTag();
+
+          doc.startTag("is_directory");
+          doc.pcdata(""+inode.isDirectory());
+          doc.endTag();
+
+          doc.startTag("access_time");
+          doc.pcdata(""+inode.getAccessTime());
+          doc.endTag();
+
+          doc.startTag("is_under_construction");
+          doc.pcdata(""+inode.isUnderConstruction());
+          doc.endTag();
+
+          doc.startTag("ds_quota");
+          doc.pcdata(""+inode.getDsQuota());
+          doc.endTag();
+
+          doc.startTag("permission_status");
+          doc.pcdata(inode.getPermissionStatus().toString());
+          doc.endTag();
+
+          doc.startTag("replication");
+          doc.pcdata(""+inode.getReplication());
+          doc.endTag();
+
+          doc.startTag("disk_space_consumed");
+          doc.pcdata(""+inode.diskspaceConsumed());
+          doc.endTag();
+
+          doc.startTag("preferred_block_size");
+          doc.pcdata(""+inode.getPreferredBlockSize());
+          doc.endTag();
+
+          doc.endTag(); // </file>
+        } 
+
+        doc.startTag("replicas");
+       
+        if (fsn.blockManager.blocksMap.contains(block)) {
+          Iterator<DatanodeDescriptor> it =
+            fsn.blockManager.blocksMap.nodeIterator(block);
+
+          while (it.hasNext()) {
+            doc.startTag("replica");
+
+            DatanodeDescriptor dd = it.next();
+
+            doc.startTag("host_name");
+            doc.pcdata(dd.getHostName());
+            doc.endTag();
+
+            boolean isCorrupt = fsn.getCorruptReplicaBlockIds(0,
+                                  block.getBlockId()) != null;
+            
+            doc.startTag("is_corrupt");
+            doc.pcdata(""+isCorrupt);
+            doc.endTag();
+            
+            doc.endTag(); // </replica>
+          }
+
+        } 
+        doc.endTag(); // </replicas>
+                
+      }
+      
+      doc.endTag(); // </block_info>
+      
+    }
+  }
+  
+  // utility class used in corrupt_replicas_xml.jsp
+  static class XMLCorruptBlockInfo {
+    final FSNamesystem fsn;
+    final Configuration conf;
+    final Long startingBlockId;
+    final int numCorruptBlocks;
+    
+    public XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
+                               int numCorruptBlocks, Long startingBlockId) {
+      this.fsn = fsn;
+      this.conf = conf;
+      this.numCorruptBlocks = numCorruptBlocks;
+      this.startingBlockId = startingBlockId;
+    }
+
+
+    public void toXML(XMLOutputter doc) throws IOException {
+      
+      doc.startTag("corrupt_block_info");
+      
+      if (numCorruptBlocks < 0 || numCorruptBlocks > 100) {
+        doc.startTag("error");
+        doc.pcdata("numCorruptBlocks must be >= 0 and <= 100");
+        doc.endTag();
+      }
+      
+      doc.startTag("dfs_replication");
+      doc.pcdata(""+conf.getInt("dfs.replication", 3));
+      doc.endTag();
+      
+      doc.startTag("num_missing_blocks");
+      doc.pcdata(""+fsn.getMissingBlocksCount());
+      doc.endTag();
+      
+      doc.startTag("num_corrupt_replica_blocks");
+      doc.pcdata(""+fsn.getCorruptReplicaBlocks());
+      doc.endTag();
+     
+      doc.startTag("corrupt_replica_block_ids");
+      long[] corruptBlockIds
+        = fsn.getCorruptReplicaBlockIds(numCorruptBlocks,
+                                        startingBlockId);
+      if (corruptBlockIds != null) {
+        for (Long blockId: corruptBlockIds) {
+          doc.startTag("block_id");
+          doc.pcdata(""+blockId);
+          doc.endTag();
+        }
+      }
+      
+      doc.endTag(); // </corrupt_replica_block_ids>
+
+      doc.endTag(); // </corrupt_block_info>
+      
+      doc.getWriter().flush();
+    }
+  }    
 }
\ No newline at end of file

Added: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java?rev=808962&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java (added)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptReplicaInfo.java Fri Aug 28 17:23:38 2009
@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.*;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+
+
+/**
+ * This test makes sure that 
+ *   CorruptReplicasMap::numBlocksWithCorruptReplicas and
+ *   CorruptReplicasMap::getCorruptReplicaBlockIds
+ *   return the correct values
+ */
+public class TestCorruptReplicaInfo extends TestCase {
+  
+  private static final Log LOG = 
+                           LogFactory.getLog(TestCorruptReplicaInfo.class);
+  
+  private Map<Long, Block> block_map =
+    new HashMap<Long, Block>();  
+    
+  // Allow easy block creation by block id
+  // Return existing block if one with same block id already exists
+  private Block getBlock(Long block_id) {
+    if (!block_map.containsKey(block_id)) {
+      block_map.put(block_id, new Block(block_id,0,0));
+    }
+    
+    return block_map.get(block_id);
+  }
+  
+  private Block getBlock(int block_id) {
+    return getBlock((long)block_id);
+  }
+  
+  public void testCorruptReplicaInfo() throws IOException, 
+                                       InterruptedException {
+    
+      CorruptReplicasMap crm = new CorruptReplicasMap();
+      
+      // Make sure initial values are returned correctly
+      assertEquals("Number of corrupt blocks must initially be 0", 0, crm.size());
+      assertNull("Param n cannot be less than 0", crm.getCorruptReplicaBlockIds(-1, null));
+      assertNull("Param n cannot be greater than 100", crm.getCorruptReplicaBlockIds(101, null));
+      long[] l = crm.getCorruptReplicaBlockIds(0, null);
+      assertNotNull("n = 0 must return non-null", l);
+      assertEquals("n = 0 must return an empty list", 0, l.length);
+
+      // create a list of block_ids. A list is used to allow easy validation of the
+      // output of getCorruptReplicaBlockIds
+      int NUM_BLOCK_IDS = 140;
+      List<Long> block_ids = new LinkedList<Long>();
+      for (int i=0;i<NUM_BLOCK_IDS;i++) {
+        block_ids.add((long)i);
+      }
+      
+      DatanodeDescriptor dn1 = new DatanodeDescriptor();
+      DatanodeDescriptor dn2 = new DatanodeDescriptor();
+      DatanodeDescriptor dn3 = new DatanodeDescriptor();
+      
+      crm.addToCorruptReplicasMap(getBlock(0), dn1);
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   1, crm.size());
+      crm.addToCorruptReplicasMap(getBlock(1), dn1);
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   2, crm.size());
+      
+      crm.addToCorruptReplicasMap(getBlock(1), dn2);
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   2, crm.size());
+      
+      crm.removeFromCorruptReplicasMap(getBlock(1));
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   1, crm.size());
+      
+      crm.removeFromCorruptReplicasMap(getBlock(0));
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   0, crm.size());
+      
+      for (Long block_id: block_ids) {
+        crm.addToCorruptReplicasMap(getBlock(block_id), dn1);
+      }
+            
+      assertEquals("Number of corrupt blocks not returning correctly",
+                   NUM_BLOCK_IDS, crm.size());
+      
+      assertTrue("First five block ids not returned correctly ",
+                Arrays.equals(new long[]{0,1,2,3,4},
+                              crm.getCorruptReplicaBlockIds(5, null)));
+                              
+      LOG.info(crm.getCorruptReplicaBlockIds(10, 7L));
+      LOG.info(block_ids.subList(7, 18));
+
+      assertTrue("10 blocks after 7 not returned correctly ",
+                Arrays.equals(new long[]{8,9,10,11,12,13,14,15,16,17},
+                              crm.getCorruptReplicaBlockIds(10, 7L)));
+      
+  }
+}

Added: hadoop/hdfs/trunk/src/webapps/hdfs/block_info_xml.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/block_info_xml.jsp?rev=808962&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/block_info_xml.jsp (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/block_info_xml.jsp Fri Aug 28 17:23:38 2009
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="UTF-8"?><%!
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file 
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+ /*
+ 
+  This script outputs information about a block (as XML). The script accepts a 
+  GET parameter named blockId which should be block id (as a long).
+
+  Example output is below (the blockId was 8888705098093096373):
+    <block_info>
+      <block_id>8888705098093096373</block_id>
+      <block_name>blk_8888705098093096373</block_name>
+      <file>
+        <local_name>some_file_name</local_name>
+        <local_directory>/input/</local_directory>
+        <user_name>user_name</user_name>
+        <group_name>supergroup</group_name>
+        <is_directory>false</is_directory>
+        <access_time>1251166313680</access_time>
+        <is_under_construction>false</is_under_construction>
+        <ds_quota>-1</ds_quota>
+        <permission_status>user_name:supergroup:rw-r--r--</permission_status>
+        <replication>1</replication>
+        <disk_space_consumed>2815</disk_space_consumed>
+        <preferred_block_size>67108864</preferred_block_size>
+      </file>
+      <replicas>
+        <replica>
+          <host_name>hostname</host_name>
+          <is_corrupt>false</is_corrupt>
+        </replica>
+      </replicas>
+    </block_info> 
+
+  Notes:
+    - block_info/file will only exist if the file can be found
+    - block_info/replicas can contain 0 or more children 
+    - If an error exists, block_info/error will exist and contain a human
+      readable error message
+ 
+*/
+ 
+%>
+<%@ page
+  contentType="application/xml"
+  import="java.io.IOException"
+  import="java.util.Iterator"
+  import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.hdfs.protocol.Block"
+  import="org.apache.hadoop.hdfs.server.namenode.INode"
+  import="org.apache.hadoop.hdfs.server.namenode.BlocksMap"
+  import="org.apache.hadoop.hdfs.server.namenode.BlockInfo"
+  import="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
+  import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLBlockInfo"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.util.ServletUtil"
+  import="org.znerd.xmlenc.*"
+  
+%>
+<%!
+  //for java.io.Serializable
+  private static final long serialVersionUID = 1L;  
+%>
+<%
+NameNode nn = (NameNode)application.getAttribute("name.node");
+String namenodeRole = nn.getRole().toString();
+FSNamesystem fsn = nn.getNamesystem();
+
+Long blockId = null;
+try {
+  blockId = JspHelper.validateLong(request.getParameter("blockId"));
+} catch(NumberFormatException e) {
+  blockId = null;
+}
+
+
+XMLBlockInfo bi = new XMLBlockInfo(fsn, blockId);
+XMLOutputter doc = new XMLOutputter(out, "UTF-8");
+bi.toXML(doc);
+
+%>

Added: hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_replicas_xml.jsp
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_replicas_xml.jsp?rev=808962&view=auto
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_replicas_xml.jsp (added)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/corrupt_replicas_xml.jsp Fri Aug 28 17:23:38 2009
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="UTF-8"?><%!
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file 
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+ 
+ /*
+ 
+  This script outputs information about corrupt replicas on the system (as XML). 
+  
+  The script takes two GET parameters:
+    - numCorruptBlocks The number of corrupt blocks to return. Must be >= 0 &&
+      <= 100. Defaults to 10.
+    - startingBlockId The block id (as a long) from which to begin iterating. 
+      Output does not include the starting block id (it begins at the following
+      block id). If not given, iteration starts from beginning. 
+
+  Example output is below:
+      <corrupt_block_info>
+        <dfs_replication>1</dfs_replication>
+        <num_missing_blocks>1</num_missing_blocks>
+        <num_corrupt_replica_blocks>1</num_corrupt_replica_blocks>
+        <corrupt_replica_block_ids>
+          <block_id>-2207002825050436217</block_id>
+        </corrupt_replica_block_ids>
+      </corrupt_block_info>
+
+  Notes:
+    - corrupt_block_info/corrupt_replica_block_ids will 0 to numCorruptBlocks
+      children
+    - If an error exists, corrupt_block_info/error will exist and
+      contain a human readable error message
+ 
+*/
+ 
+%>
+<%@ page
+  contentType="application/xml"
+  import="java.io.IOException"
+  import="java.util.List"
+  import="org.apache.hadoop.conf.Configuration"
+  import="org.apache.hadoop.hdfs.server.common.JspHelper"
+  import="org.apache.hadoop.hdfs.server.namenode.NamenodeJspHelper.XMLCorruptBlockInfo"
+  import="org.apache.hadoop.util.ServletUtil"
+  import="org.znerd.xmlenc.*"
+%>
+<%!
+  private static final long serialVersionUID = 1L;
+%>
+<%
+
+  NameNode nn = (NameNode)application.getAttribute("name.node");
+  FSNamesystem fsn = nn.getNamesystem();
+
+  Integer numCorruptBlocks = 10;
+  try {
+    Long l = JspHelper.validateLong(request.getParameter("numCorruptBlocks"));
+    if (l != null) {
+      numCorruptBlocks = l.intValue();
+    }
+  } catch(NumberFormatException e) {
+    
+  }
+
+  Long startingBlockId = null;
+  try {
+    startingBlockId =
+      JspHelper.validateLong(request.getParameter("startingBlockId"));
+  } catch(NumberFormatException e) { 
+  }  
+
+  XMLCorruptBlockInfo cbi = new XMLCorruptBlockInfo(fsn,
+                                                    new Configuration(),
+                                                    numCorruptBlocks,
+                                                    startingBlockId);
+  XMLOutputter doc = new XMLOutputter(out, "UTF-8");
+  cbi.toXML(doc);
+%>
\ No newline at end of file