You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/12/09 03:19:03 UTC

svn commit: r724578 - in /hadoop/core/trunk: CHANGES.txt src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java src/webapps/hdfs/dfshealth.jsp src/webapps/hdfs/dfsnodelist.jsp

Author: szetszwo
Date: Mon Dec  8 18:19:03 2008
New Revision: 724578

URL: http://svn.apache.org/viewvc?rev=724578&view=rev
Log:
HADOOP-4029. Add NameNode storage information to the dfshealth page and move DataNode information to a separated page. (Boris Shkolnik via szetszwo)

Added:
    hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp
Modified:
    hadoop/core/trunk/CHANGES.txt
    hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
    hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp

Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=724578&r1=724577&r2=724578&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Mon Dec  8 18:19:03 2008
@@ -78,6 +78,10 @@
     HADOOP-4179. Add Vaidya tool to analyze map/reduce job logs for performanc
     problems. (Suhas Gogate via omalley)
 
+    HADOOP-4029. Add NameNode storage information to the dfshealth page and
+    move DataNode information to a separated page. (Boris Shkolnik via
+    szetszwo)
+
   IMPROVEMENTS
 
     HADOOP-4234. Fix KFS "glue" layer to allow applications to interface

Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java?rev=724578&r1=724577&r2=724578&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSImage.java Mon Dec  8 18:19:03 2008
@@ -33,6 +33,7 @@
 import java.util.Collection;
 import java.util.Date;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Properties;
 import java.util.Random;
 import java.util.Map;
@@ -111,6 +112,12 @@
   protected long checkpointTime = -1L;
   private FSEditLog editLog = null;
   private boolean isUpgradeFinalized = false;
+  
+  /**
+   * list of failed (and thus removed) storages
+   */
+  protected List<StorageDirectory> removedStorageDirs = new ArrayList<StorageDirectory>();
+  
   /**
    * Directories for importing an image from a checkpoint.
    */
@@ -163,6 +170,7 @@
                         Collection<File> fsEditsDirs
                              ) throws IOException {
     this.storageDirs = new ArrayList<StorageDirectory>();
+    this.removedStorageDirs = new ArrayList<StorageDirectory>();
    // Add all name dirs with appropriate NameNodeDirType 
     for (File dirName : fsNameDirs) {
       boolean isAlsoEdits = false;
@@ -196,6 +204,10 @@
     return new File(sd.getCurrentDir(), type.getName());
   }
   
+  List<StorageDirectory> getRemovedStorageDirs() {
+	  return this.removedStorageDirs;
+  }
+  
   File getEditFile(StorageDirectory sd) {
     return getImageFile(sd, NameNodeFile.EDITS);
   }
@@ -616,6 +628,9 @@
         // Close any edits stream associated with this dir and remove directory
      if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS))
        editLog.processIOError(sd);
+     
+   //add storage to the removed list
+     removedStorageDirs.add(sd);
      it.remove();
       }
     }
@@ -627,10 +642,14 @@
   
   void processIOError(File dirName) {
     for (Iterator<StorageDirectory> it = 
-            dirIterator(); it.hasNext();) {
+      dirIterator(); it.hasNext();) {
       StorageDirectory sd = it.next();
-      if (sd.getRoot().getPath().equals(dirName.getParent()))
+      if (sd.getRoot().getPath().equals(dirName.getPath())) {
+        //add storage to the removed list
+        LOG.info(" removing " + dirName.getPath());
+        removedStorageDirs.add(sd);
         it.remove();
+      }
     }
   }
 
@@ -1304,6 +1323,8 @@
           // Close edit stream, if this directory is also used for edits
           if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS))
             editLog.processIOError(sd);
+        // add storage to the removed list
+          removedStorageDirs.add(sd);
           it.remove();
         }
       }
@@ -1335,6 +1356,8 @@
         // Close edit stream, if this directory is also used for edits
         if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS))
           editLog.processIOError(sd);
+      //add storage to the removed list
+        removedStorageDirs.add(sd);
         it.remove();
       }
     }

Modified: hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp?rev=724578&r1=724577&r2=724578&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp (original)
+++ hadoop/core/trunk/src/webapps/hdfs/dfshealth.jsp Mon Dec  8 18:19:03 2008
@@ -8,6 +8,8 @@
   import="org.apache.hadoop.hdfs.*"
   import="org.apache.hadoop.hdfs.server.namenode.*"
   import="org.apache.hadoop.hdfs.server.datanode.*"
+  import="org.apache.hadoop.hdfs.server.common.Storage"
+  import="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
   import="org.apache.hadoop.hdfs.protocol.*"
   import="org.apache.hadoop.util.*"
   import="java.text.DateFormat"
@@ -116,6 +118,41 @@
               "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
               "\" class=\"blocks\">" + d.numBlocks() + "\n");
   }
+  
+  
+  public void generateConfReport( JspWriter out,
+		  FSNamesystem fsn,
+		  HttpServletRequest request)
+  throws IOException {
+	  long underReplicatedBlocks = fsn.getUnderReplicatedBlocks();
+	  FSImage fsImage = fsn.getFSImage();
+	  List<Storage.StorageDirectory> removedStorageDirs = fsImage.getRemovedStorageDirs();
+	  String storageDirsSizeStr="", removedStorageDirsSizeStr="", storageDirsStr="", removedStorageDirsStr="", storageDirsDiv="", removedStorageDirsDiv="";
+
+	  //FS Image storage configuration
+	  out.print("<h3> NameNode Storage: </h3>");
+	  out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0 title=\"NameNode Storage\">\n"+
+	  "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>");
+	  
+	  StorageDirectory st =null;
+	  for (Iterator<StorageDirectory> it = fsImage.dirIterator(); it.hasNext();) {
+	      st = it.next();
+	      String dir = "" +  st.getRoot();
+		  String type = "" + st.getStorageDirType();
+		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td>Active</td></tr>");
+	  }
+	  
+	  long storageDirsSize = removedStorageDirs.size();
+	  for(int i=0; i< storageDirsSize; i++){
+		  st = removedStorageDirs.get(i);
+		  String dir = "" +  st.getRoot();
+		  String type = "" + st.getStorageDirType();
+		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td><font color=red>Failed</font></td></tr>");
+	  }
+	  
+	  out.print("</table></div><br>\n");
+  }
+
 
   public void generateDFSHealthReport(JspWriter out,
                                       NameNode nn,
@@ -173,75 +210,16 @@
 	       rowTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() +
 	       StringUtils.limitDecimalTo2(percentRemaining) + " %" +
 	       rowTxt() + colTxt() +
-               "<a href=\"#LiveNodes\">Live Nodes</a> " +
-               colTxt() + ":" + colTxt() + live.size() +
+	       		"<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> " +
+	       		colTxt() + ":" + colTxt() + live.size() +
 	       rowTxt() + colTxt() +
-               "<a href=\"#DeadNodes\">Dead Nodes</a> " +
-               colTxt() + ":" + colTxt() + dead.size() +
-               "</table></div><br><hr>\n" );
+	       		"<a href=\"dfsnodelist.jsp?whatNodes=DEAD\">Dead Nodes</a> " +
+	       		colTxt() + ":" + colTxt() + dead.size() +
+               "</table></div><br>\n" );
     
     if (live.isEmpty() && dead.isEmpty()) {
         out.print("There are no datanodes in the cluster");
     }
-    else {
-        
-	out.print( "<div id=\"dfsnodetable\"> "+
-                   "<a name=\"LiveNodes\" id=\"title\">" +
-                   "Live Datanodes : " + live.size() + "</a>" +
-                   "<br><br>\n<table border=1 cellspacing=0>\n" );
-
-        counterReset();
-        
-  int nnHttpPort = nn.getHttpAddress().getPort();
-	if ( live.size() > 0 ) {
-            
-            if ( live.get(0).getCapacity() > 1024 * diskBytes ) {
-                diskBytes *= 1024;
-                diskByteStr = "TB";
-            }
-
-      out.print( "<tr class=\"headerRow\"> <th " +
-                 NodeHeaderStr("name") + "> Node <th " +
-                 NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " +
-                 NodeHeaderStr("adminstate") + "> Admin State <th " +
-                 NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + 
-                 diskByteStr + ") <th " + 
-                 NodeHeaderStr("used") + "> Used <br>(" + 
-                 diskByteStr + ") <th " + 
-                 NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + 
-                 diskByteStr + ") <th " + 
-                 NodeHeaderStr("remaining") + "> Remaining <br>(" + 
-                 diskByteStr + ") <th " + 
-                 NodeHeaderStr("pcused") + "> Used <br>(%) <th " + 
-                 NodeHeaderStr("pcused") + "> Used <br>(%) <th " +
-                 NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " +
-                 NodeHeaderStr("blocks") + "> Blocks\n" );
-            
-      jspHelper.sortNodeList(live, sorterField, sorterOrder);
-      for ( int i=0; i < live.size(); i++ ) {
-        generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
-      }
-    }
-    out.print("</table>\n");
-    
-    counterReset();
-	
-	out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " +
-                  " Dead Datanodes : " +dead.size() + "</a><br><br>\n");
-
-	if ( dead.size() > 0 ) {
-	    out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
-		       "<td> Node \n" );
-	    
-      jspHelper.sortNodeList(dead, "name", "ASC");
-	    for ( int i=0; i < dead.size() ; i++ ) {
-        generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
-	    }
-	    
-	    out.print("</table>\n");
-	}
-	out.print("</div>");
-    }
   }%>
 
 <%
@@ -276,7 +254,10 @@
 <%
     generateDFSHealthReport(out, nn, request); 
 %>
-
+<hr>
+<%
+	generateConfReport(out, fsn, request);
+%>
 <%
 out.println(ServletUtil.htmlFooter());
 %>

Added: hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp?rev=724578&view=auto
==============================================================================
--- hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp (added)
+++ hadoop/core/trunk/src/webapps/hdfs/dfsnodelist.jsp Mon Dec  8 18:19:03 2008
@@ -0,0 +1,265 @@
+<%@ page
+contentType="text/html; charset=UTF-8"
+	import="javax.servlet.*"
+	import="javax.servlet.http.*"
+	import="java.io.*"
+	import="java.util.*"
+	import="org.apache.hadoop.fs.*"
+	import="org.apache.hadoop.hdfs.*"
+	import="org.apache.hadoop.hdfs.server.common.*"
+	import="org.apache.hadoop.hdfs.server.namenode.*"
+	import="org.apache.hadoop.hdfs.server.datanode.*"
+	import="org.apache.hadoop.hdfs.protocol.*"
+	import="org.apache.hadoop.util.*"
+	import="java.text.DateFormat"
+	import="java.lang.Math"
+	import="java.net.URLEncoder"
+%>
+<%!
+	JspHelper jspHelper = new JspHelper();
+
+	int rowNum = 0;
+	int colNum = 0;
+
+	String rowTxt() { colNum = 0;
+	return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
+	+ "\"> "; }
+	String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
+	void counterReset () { colNum = 0; rowNum = 0 ; }
+
+	long diskBytes = 1024 * 1024 * 1024;
+	String diskByteStr = "GB";
+
+	String sorterField = null;
+	String sorterOrder = null;
+	String whatNodes = "LIVE";
+
+String NodeHeaderStr(String name) {
+	String ret = "class=header";
+	String order = "ASC";
+	if ( name.equals( sorterField ) ) {
+		ret += sorterOrder;
+		if ( sorterOrder.equals("ASC") )
+			order = "DSC";
+	}
+	ret += " onClick=\"window.document.location=" +
+	"'/dfsnodelist.jsp?whatNodes="+whatNodes+"&sorter/field=" + name + "&sorter/order=" +
+	order + "'\" title=\"sort on this column\"";
+
+	return ret;
+}
+
+public void generateNodeData( JspWriter out, DatanodeDescriptor d,
+		String suffix, boolean alive,
+		int nnHttpPort )
+throws IOException {
+
+	/* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5
+we use:
+1) d.getHostName():d.getPort() to display.
+Domain and port are stripped if they are common across the nodes.
+i.e. "dn1"
+2) d.getHost():d.Port() for "title".
+i.e. "192.168.0.5:50010"
+3) d.getHostName():d.getInfoPort() for url.
+i.e. "http://dn1.hadoop.apache.org:50075/..."
+Note that "d.getHost():d.getPort()" is what DFS clients use
+to interact with datanodes.
+	 */
+	// from nn_browsedfscontent.jsp:
+	String url = "http://" + d.getHostName() + ":" + d.getInfoPort() +
+	"/browseDirectory.jsp?namenodeInfoPort=" +
+	nnHttpPort + "&dir=" +
+	URLEncoder.encode("/", "UTF-8");
+
+	String name = d.getHostName() + ":" + d.getPort();
+	if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
+		name = name.replaceAll( "\\.[^.:]*", "" );    
+	int idx = (suffix != null && name.endsWith( suffix )) ?
+			name.indexOf( suffix ) : -1;
+
+			out.print( rowTxt() + "<td class=\"name\"><a title=\""
+					+ d.getHost() + ":" + d.getPort() +
+					"\" href=\"" + url + "\">" +
+					(( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" +
+					(( alive ) ? "" : "\n") );
+			if ( !alive )
+				return;
+
+			long c = d.getCapacity();
+			long u = d.getDfsUsed();
+			long nu = d.getNonDfsUsed();
+			long r = d.getRemaining();
+			String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());    
+			String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent());    
+
+			String adminState = (d.isDecommissioned() ? "Decommissioned" :
+				(d.isDecommissionInProgress() ? "Decommission In Progress":
+				"In Service"));
+
+			long timestamp = d.getLastUpdate();
+			long currentTime = System.currentTimeMillis();
+			out.print("<td class=\"lastcontact\"> " +
+					((currentTime - timestamp)/1000) +
+					"<td class=\"adminstate\">" +
+					adminState +
+					"<td align=\"right\" class=\"capacity\">" +
+					StringUtils.limitDecimalTo2(c*1.0/diskBytes) +
+					"<td align=\"right\" class=\"used\">" +
+					StringUtils.limitDecimalTo2(u*1.0/diskBytes) +      
+					"<td align=\"right\" class=\"nondfsused\">" +
+					StringUtils.limitDecimalTo2(nu*1.0/diskBytes) +      
+					"<td align=\"right\" class=\"remaining\">" +
+					StringUtils.limitDecimalTo2(r*1.0/diskBytes) +      
+					"<td align=\"right\" class=\"pcused\">" + percentUsed +
+					"<td class=\"pcused\">" +
+					ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
+					"<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
+					"<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
+					"\" class=\"blocks\">" + d.numBlocks() + "\n");
+}
+
+
+
+public void generateDFSNodesList(JspWriter out, 
+		NameNode nn,
+		HttpServletRequest request)
+throws IOException {
+	ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();    
+	ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+	jspHelper.DFSNodesStatus(live, dead);
+
+	whatNodes = request.getParameter("whatNodes"); // show only live or only dead nodes
+	sorterField = request.getParameter("sorter/field");
+	sorterOrder = request.getParameter("sorter/order");
+	if ( sorterField == null )
+		sorterField = "name";
+	if ( sorterOrder == null )
+		sorterOrder = "ASC";
+
+	jspHelper.sortNodeList(live, sorterField, sorterOrder);
+	jspHelper.sortNodeList(dead, "name", "ASC");
+
+	// Find out common suffix. Should this be before or after the sort?
+	String port_suffix = null;
+	if ( live.size() > 0 ) {
+		String name = live.get(0).getName();
+		int idx = name.indexOf(':');
+		if ( idx > 0 ) {
+			port_suffix = name.substring( idx );
+		}
+
+		for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
+			if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
+				port_suffix = null;
+				break;
+			}
+		}
+	}
+
+	counterReset();
+
+	try {
+		Thread.sleep(1000);
+	} catch (InterruptedException e) {}
+
+	if (live.isEmpty() && dead.isEmpty()) {
+		out.print("There are no datanodes in the cluster");
+	}
+	else {
+
+		int nnHttpPort = nn.getHttpAddress().getPort();
+		out.print( "<div id=\"dfsnodetable\"> ");
+		if(whatNodes.equals("LIVE")) {
+
+			out.print( 
+					"<a name=\"LiveNodes\" id=\"title\">" +
+					"Live Datanodes : " + live.size() + "</a>" +
+			"<br><br>\n<table border=1 cellspacing=0>\n" );
+
+			counterReset();
+
+			if ( live.size() > 0 ) {
+
+				if ( live.get(0).getCapacity() > 1024 * diskBytes ) {
+					diskBytes *= 1024;
+					diskByteStr = "TB";
+				}
+
+				out.print( "<tr class=\"headerRow\"> <th " +
+						NodeHeaderStr("name") + "> Node <th " +
+						NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " +
+						NodeHeaderStr("adminstate") + "> Admin State <th " +
+						NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + 
+						diskByteStr + ") <th " + 
+						NodeHeaderStr("used") + "> Used <br>(" + 
+						diskByteStr + ") <th " + 
+						NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + 
+						diskByteStr + ") <th " + 
+						NodeHeaderStr("remaining") + "> Remaining <br>(" + 
+						diskByteStr + ") <th " + 
+						NodeHeaderStr("pcused") + "> Used <br>(%) <th " + 
+						NodeHeaderStr("pcused") + "> Used <br>(%) <th " +
+						NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " +
+						NodeHeaderStr("blocks") + "> Blocks\n" );
+
+				jspHelper.sortNodeList(live, sorterField, sorterOrder);
+				for ( int i=0; i < live.size(); i++ ) {
+					generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
+				}
+			}
+			out.print("</table>\n");
+		} else {
+
+			out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " +
+					" Dead Datanodes : " +dead.size() + "</a><br><br>\n");
+
+			if ( dead.size() > 0 ) {
+				out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
+				"<td> Node \n" );
+
+				jspHelper.sortNodeList(dead, "name", "ASC");
+				for ( int i=0; i < dead.size() ; i++ ) {
+					generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
+				}
+
+				out.print("</table>\n");
+			}
+		}
+		out.print("</div>");
+	}
+}%>
+
+<%
+NameNode nn = (NameNode)application.getAttribute("name.node");
+FSNamesystem fsn = nn.getNamesystem();
+String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
+%>
+
+<html>
+
+<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
+<title>Hadoop NameNode <%=namenodeLabel%></title>
+  
+<body>
+<h1>NameNode '<%=namenodeLabel%>'</h1>
+
+
+<div id="dfstable"> <table>	  
+<tr> <td id="col1"> Started: <td> <%= fsn.getStartTime()%>
+<tr> <td id="col1"> Version: <td> <%= VersionInfo.getVersion()%>, r<%= VersionInfo.getRevision()%>
+<tr> <td id="col1"> Compiled: <td> <%= VersionInfo.getDate()%> by <%= VersionInfo.getUser()%>
+<tr> <td id="col1"> Upgrades: <td> <%= jspHelper.getUpgradeStatusText()%>
+</table></div><br>				      
+
+<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
+<b><a href="/logs/">Namenode Logs</a></b><br>
+<b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
+<hr>
+<%
+	generateDFSNodesList(out, nn, request); 
+%>
+
+<%
+out.println(ServletUtil.htmlFooter());
+%>