You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2011/06/03 00:28:35 UTC

svn commit: r1130855 - in /hadoop/hdfs/trunk: CHANGES.txt src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java src/webapps/hdfs/dfsclusterhealth.xsl

Author: suresh
Date: Thu Jun  2 22:28:34 2011
New Revision: 1130855

URL: http://svn.apache.org/viewvc?rev=1130855&view=rev
Log:
HDFS-1995. Federation: Minor bug fixes and modification cluster web UI. Contributed by Tanping Wang.

Modified:
    hadoop/hdfs/trunk/CHANGES.txt
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
    hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
    hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl

Modified: hadoop/hdfs/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/CHANGES.txt?rev=1130855&r1=1130854&r2=1130855&view=diff
==============================================================================
--- hadoop/hdfs/trunk/CHANGES.txt (original)
+++ hadoop/hdfs/trunk/CHANGES.txt Thu Jun  2 22:28:34 2011
@@ -672,6 +672,9 @@ Trunk (unreleased changes)
     HDFS-2014. Change HDFS scripts to work in developer enviroment post
     RPM packaging changes. (Eric Yang via suresh)
 
+    HDFS-1995. Federation: Minor bug fixes and modification cluster web UI.
+    (Tanping Wang via suresh)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java?rev=1130855&r1=1130854&r2=1130855&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java Thu Jun  2 22:28:34 2011
@@ -369,11 +369,10 @@ class ClusterJspHelper {
       nn.filesAndDirectories = mxbeanProxy.getTotalFiles();
       nn.capacity = mxbeanProxy.getTotal();
       nn.free = mxbeanProxy.getFree();
-      nn.dfsUsed = mxbeanProxy.getUsed();
+      nn.bpUsed = mxbeanProxy.getBlockPoolUsedSpace();
       nn.nonDfsUsed = mxbeanProxy.getNonDfsUsedSpace();
       nn.blocksCount = mxbeanProxy.getTotalBlocks();
       nn.missingBlocksCount = mxbeanProxy.getNumberOfMissingBlocks();
-      nn.capacity = mxbeanProxy.getTotal();
       nn.free = mxbeanProxy.getFree();
       nn.httpAddress = DFSUtil.getInfoServer(rpcAddress, conf);
       getLiveNodeCount(mxbeanProxy.getLiveNodes(), nn);
@@ -535,9 +534,9 @@ class ClusterJspHelper {
     String clusterid = "";
     long total_sum = 0;
     long free_sum = 0;
-    long used = 0;
+    long clusterDfsUsed = 0;
     long nonDfsUsed_sum = 0;
-    long totalFilesAndBlocks = 0;
+    long totalFilesAndDirectories = 0;
     
     /** List of namenodes in the cluster */
     final List<NamenodeStatus> nnList = new ArrayList<NamenodeStatus>();
@@ -553,10 +552,10 @@ class ClusterJspHelper {
       nnList.add(nn);
       
       // Add namenode status to cluster status
-      totalFilesAndBlocks += nn.filesAndDirectories;
+      totalFilesAndDirectories += nn.filesAndDirectories;
       total_sum += nn.capacity;
       free_sum += nn.free;
-      used += nn.dfsUsed;
+      clusterDfsUsed += nn.bpUsed;
       nonDfsUsed_sum += nn.nonDfsUsed;
     }
 
@@ -580,7 +579,7 @@ class ClusterJspHelper {
         total = total_sum / size;
         free = free_sum / size;
         nonDfsUsed = nonDfsUsed_sum / size;
-        dfsUsedPercent = DFSUtil.getPercentUsed(used, total_sum);
+        dfsUsedPercent = DFSUtil.getPercentUsed(clusterDfsUsed, total);
         dfsRemainingPercent = DFSUtil.getPercentRemaining(free, total);
       }
     
@@ -589,23 +588,23 @@ class ClusterJspHelper {
     
       doc.startTag("storage");
     
-      toXmlItemBlock(doc, "Total Files And Blocks",
-          Long.toString(totalFilesAndBlocks));
+      toXmlItemBlock(doc, "Total Files And Directories",
+          Long.toString(totalFilesAndDirectories));
     
       toXmlItemBlock(doc, "Configured Capacity", StringUtils.byteDesc(total));
     
-      toXmlItemBlock(doc, "Used", StringUtils.byteDesc(used));
+      toXmlItemBlock(doc, "DFS Used", StringUtils.byteDesc(clusterDfsUsed));
     
       toXmlItemBlock(doc, "Non DFS Used", StringUtils.byteDesc(nonDfsUsed));
     
-      toXmlItemBlock(doc, "Remaining", StringUtils.byteDesc(free));
+      toXmlItemBlock(doc, "DFS Remaining", StringUtils.byteDesc(free));
     
       // dfsUsedPercent
-      toXmlItemBlock(doc, "Used%", StringUtils.limitDecimalTo2(dfsUsedPercent)
-          + "%");
+      toXmlItemBlock(doc, "DFS Used%", 
+          StringUtils.limitDecimalTo2(dfsUsedPercent)+ "%");
     
       // dfsRemainingPercent
-      toXmlItemBlock(doc, "Remaining%",
+      toXmlItemBlock(doc, "DFS Remaining%",
           StringUtils.limitDecimalTo2(dfsRemainingPercent) + "%");
     
       doc.endTag(); // storage
@@ -617,8 +616,8 @@ class ClusterJspHelper {
       for (NamenodeStatus nn : nnList) {
         doc.startTag("node");
         toXmlItemBlockWithLink(doc, nn.host, nn.httpAddress, "NameNode");
-        toXmlItemBlock(doc, "Used",
-            StringUtils.byteDesc(nn.dfsUsed));
+        toXmlItemBlock(doc, "Blockpool Used",
+            StringUtils.byteDesc(nn.bpUsed));
         toXmlItemBlock(doc, "Files And Directories",
             Long.toString(nn.filesAndDirectories));
         toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
@@ -648,7 +647,7 @@ class ClusterJspHelper {
     String host = "";
     long capacity = 0L;
     long free = 0L;
-    long dfsUsed = 0L;
+    long bpUsed = 0L;
     long nonDfsUsed = 0L;
     long filesAndDirectories = 0L;
     long blocksCount = 0L;

Modified: hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java?rev=1130855&r1=1130854&r2=1130855&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java (original)
+++ hadoop/hdfs/trunk/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java Thu Jun  2 22:28:34 2011
@@ -268,7 +268,7 @@ class NamenodeJspHelper {
       long nonDFS = total - remaining - used;
       nonDFS = nonDFS < 0 ? 0 : nonDFS;
       float percentUsed = DFSUtil.getPercentUsed(used, total);
-      float percentRemaining = DFSUtil.getPercentRemaining(used, total);
+      float percentRemaining = DFSUtil.getPercentRemaining(remaining, total);
       float median = 0;
       float max = 0;
       float min = 0;

Modified: hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl?rev=1130855&r1=1130854&r2=1130855&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl (original)
+++ hadoop/hdfs/trunk/src/webapps/hdfs/dfsclusterhealth.xsl Thu Jun  2 22:28:34 2011
@@ -29,6 +29,7 @@
     <html>
       <head>
         <link rel="stylesheet" type="text/css" href="static/hadoop.css" />
+        <style type="text/css">th,span {width:8em;}</style>
         <title>
           Hadoop cluster
           <xsl:value-of select="cluster/@clusterId" />
@@ -102,7 +103,7 @@
               <thead>
                 <xsl:for-each select="cluster/namenodes/node[1]/item">
                   <th>
-                    <xsl:value-of select="@label" />
+                    <SPAN><xsl:value-of select="@label" /></SPAN>
                   </th>
                 </xsl:for-each>
               </thead>