You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by om...@apache.org on 2011/03/04 05:38:50 UTC

svn commit: r1077635 - in /hadoop/common/branches/branch-0.20-security-patches/src: hdfs/org/apache/hadoop/hdfs/server/datanode/ hdfs/org/apache/hadoop/hdfs/server/namenode/ test/org/apache/hadoop/hdfs/server/datanode/ test/org/apache/hadoop/hdfs/serve...

Author: omalley
Date: Fri Mar  4 04:38:50 2011
New Revision: 1077635

URL: http://svn.apache.org/viewvc?rev=1077635&view=rev
Log:
commit 7c6ab75ce0b1adee2509d21d6a7d628f9c20ee67
Author: Tanping Wang <ta...@yahoo-inc.com>
Date:   Tue Aug 3 15:51:50 2010 -0700

    HADOOP:1318 from https://issues.apache.org/jira/secure/attachment/12451080/HDFS-1318.y20.2.patch

Added:
    hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
    hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
Modified:
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1077635&r1=1077634&r2=1077635&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Mar  4 04:38:50 2011
@@ -35,6 +35,7 @@ import java.security.SecureRandom;
 import java.util.AbstractList;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.LinkedList;
@@ -71,7 +72,7 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeInstrumentation;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -117,7 +118,6 @@ import java.lang.management.ManagementFa
 import javax.management.InstanceAlreadyExistsException;
 import javax.management.MBeanRegistrationException;
 import javax.management.MBeanServer; 
-import javax.management.ObjectInstance;
 import javax.management.ObjectName;
 
 /**********************************************************
@@ -360,8 +360,8 @@ public class DataNode extends Configured
       this.data = new FSDataset(storage, conf);
     }
       
-    // register datanode MBean
-    registerMBean();
+    // register datanode MXBean
+    registerMXBean();
     
     // find free port or use privileged port provide
     ServerSocket ss;
@@ -464,14 +464,14 @@ public class DataNode extends Configured
     LOG.info("dnRegistration = " + dnRegistration);
   }
 
-  private void registerMBean() {
+  private void registerMXBean() {
     // register MXBean
     MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
     try {
-      ObjectName mxbeanName = new ObjectName("hadoop:type=DataNodeInfo");
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
       mbs.registerMBean(this, mxbeanName);
-    } catch (javax.management.JMException e1) {
-      LOG.warn("Failed to register DataNode MBean");
+    } catch ( javax.management.JMException e ) {
+      LOG.warn("Failed to register NameNode MXBean", e);
     }
   }
   
@@ -1829,63 +1829,44 @@ public class DataNode extends Configured
     return NetUtils.createSocketAddr(address);
   }
 
-  /**
-   * Class for representing the Datanode volume information in MBean interface
-   */
-  class VolumeInfo{
-    private final String directory;
-    private final long usedSpace;
-    private final long freeSpace;
-    private final long reservedSpace;
-    
-    VolumeInfo(String dir, long usedSpace, long freeSpace, long reservedSpace) {
-      this.directory = dir;
-      this.usedSpace = usedSpace;
-      this.freeSpace = freeSpace;
-      this.reservedSpace = reservedSpace;
-    }
-  }
-
-  @Override
+  @Override // DataNodeMXBean
   public String getVersion() {
     return VersionInfo.getVersion();
   }
   
-  @Override
+  @Override // DataNodeMXBean
   public String getRpcPort(){
     InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
         this.getConf().get("dfs.datanode.ipc.address"));
     return Integer.toString(ipcAddr.getPort());
   }
 
-  @Override
+  @Override // DataNodeMXBean
   public String getHttpPort(){
     return this.getConf().get("dfs.datanode.info.port");
   }
 
-  @Override
+  @Override // DataNodeMXBean
   public String getNamenodeAddress(){
     return nameNodeAddr.getHostName();
   }
 
-  @Override
-  public synchronized String getVolumeInfo() {
-    List<VolumeInfo> list = new ArrayList<VolumeInfo>(3);
-    FSVolume[] volumes = ((FSDataset) this.data).volumes.volumes;
-    for (int idx = 0; idx < volumes.length; idx++) {
-      try {
-        VolumeInfo info = new VolumeInfo(volumes[idx].toString(),
-                                         volumes[idx].getDfsUsed(),
-                                         volumes[idx].getAvailable(),
-                                         volumes[idx].getReserved());
-        list.add(info);
-      } catch (IOException e) {
-        LOG.warn("Exception while accessing volume info ", e);
-      }
-        
+  /**
+   * Returned information is a JSON representation of a map with 
+   * volume name as the key and value is a map of volume attribute 
+   * keys to its values
+   */
+  @Override // DataNodeMXBean
+  public String getVolumeInfo() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    Collection<VolumeInfo> volumes = ((FSDataset)this.data).getVolumeInfo();
+    for (VolumeInfo v : volumes) {
+      final Map<String, Object> innerInfo = new HashMap<String, Object>();
+      innerInfo.put("usedSpace", v.usedSpace);
+      innerInfo.put("freeSpace", v.freeSpace);
+      innerInfo.put("reservedSpace", v.reservedSpace);
+      info.put(v.directory, innerInfo);
     }
-    VolumeInfo[] result = new VolumeInfo[list.size()];
-    list.toArray(result);
-    return JSON.toString(result);
+    return JSON.toString(info);
   }
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java?rev=1077635&r1=1077634&r2=1077635&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeMXBean.java Fri Mar  4 04:38:50 2011
@@ -22,9 +22,40 @@ package org.apache.hadoop.hdfs.server.da
  * This is the JMX management interface for data node information
  */
 public interface DataNodeMXBean {
+  
+  /**
+   * Gets the version of Hadoop.
+   * 
+   * @return the version of Hadoop
+   */
   public String getVersion();
+  
+  /**
+   * Gets the rpc port.
+   * 
+   * @return the rpc port
+   */
   public String getRpcPort();
+  
+  /**
+   * Gets the http port.
+   * 
+   * @return the http port
+   */
   public String getHttpPort();
+  
+  /**
+   * Gets the namenode IP address.
+   * 
+   * @return the namenode IP address
+   */
   public String getNamenodeAddress();
+  
+  /**
+   * Gets the information of each volume on the Datanode. Please
+   * see the implementation for the format of returned information.
+   * 
+   * @return the volume info
+   */
   public String getVolumeInfo();
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=1077635&r1=1077634&r2=1077635&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Fri Mar  4 04:38:50 2011
@@ -26,6 +26,7 @@ import java.io.InputStream;
 import java.io.RandomAccessFile;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.List;
@@ -611,6 +612,7 @@ public class FSDataset implements FSCons
       
       return removed_vols;
     }
+    
       
     public String toString() {
       StringBuffer sb = new StringBuffer();
@@ -1577,4 +1579,46 @@ public class FSDataset implements FSCons
   public String getStorageInfo() {
     return toString();
   }
+  
+  /**
+   * Class for representing the Datanode volume information
+   */
+  static class VolumeInfo {
+    final String directory;
+    final long usedSpace;
+    final long freeSpace;
+    final long reservedSpace;
+
+    VolumeInfo(String dir, long usedSpace, long freeSpace, long reservedSpace) {
+      this.directory = dir;
+      this.usedSpace = usedSpace;
+      this.freeSpace = freeSpace;
+      this.reservedSpace = reservedSpace;
+    }
+  }  
+  
+  synchronized Collection<VolumeInfo> getVolumeInfo() {
+    Collection<VolumeInfo> info = new ArrayList<VolumeInfo>();
+    synchronized(volumes.volumes) {
+      for (FSVolume volume : volumes.volumes) {
+        long used = 0;
+        try {
+          used = volume.getDfsUsed();
+        } catch (IOException e) {
+          DataNode.LOG.warn(e.getMessage());
+        }
+        
+        long free= 0;
+        try {
+          free = volume.getAvailable();
+        } catch (IOException e) {
+          DataNode.LOG.warn(e.getMessage());
+        }
+        
+        info.add(new VolumeInfo(volume.toString(), used, free, 
+            volume.getReserved()));
+      }
+      return info;
+    }
+  }
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1077635&r1=1077634&r2=1077635&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Fri Mar  4 04:38:50 2011
@@ -82,7 +82,6 @@ import java.util.Map.Entry;
 
 import javax.management.MBeanServer;
 import javax.management.NotCompliantMBeanException;
-import javax.management.ObjectInstance;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
@@ -307,7 +306,7 @@ public class FSNamesystem implements FSC
 
   // precision of access times.
   private long accessTimePrecision = 0;
-
+  
   /**
    * FSNamesystem constructor.
    */
@@ -380,14 +379,7 @@ public class FSNamesystem implements FSC
       dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
     }
     
-    // regist MXBean
-    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
-    try{
-      ObjectName mxbeanName = new ObjectName("hadoop:type=NameNodeInfo");
-      ObjectInstance oi = mbs.registerMBean(this, mxbeanName);
-    }catch(Exception e){
-      LOG.warn("caught exception while registrate MXBean for NameNodeInfo : " + e.getMessage());
-    }
+    registerMXBean();
   }
 
   public static Collection<File> getNamespaceDirs(Configuration conf) {
@@ -5138,165 +5130,147 @@ public class FSNamesystem implements FSC
     return authMethod;
   }
 
-  //implement NameNodeMXBean
   /**
-   * Class representing Namenode information for JMX interfaces
+   * Register NameNodeMXBean
    */
-  class NodeInfo{
-    private Map<String, Map<String,String>> ni;
-
-    public void setNodeInfo(Map<String, Map<String,String>>  input){
-      this.ni = input;
-    }
-    Map<String, Map<String, String>> getNodeInfo(){
-      return ni;
-    }
-
-    @Override
-    public String toString(){
-      return new JSON().toJSON(ni);
+  private void registerMXBean() {
+    // register MXBean
+    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+    try {
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      mbs.registerMBean(this, mxbeanName);
+    } catch ( javax.management.JMException e ) {
+      LOG.warn("Failed to register NameNodeMXBean", e);
     }
   }
-  @Override
+
+  /**
+   * Class representing Namenode information for JMX interfaces
+   */
+  @Override // NameNodeMXBean
   public String getVersion() {
     return VersionInfo.getVersion();
   }
 
-  @Override
-  public String getUsed(){
-    return Long.toString(this.getCapacityUsed());
+  @Override // NameNodeMXBean
+  public long getUsed() {
+    return this.getCapacityUsed();
   }
 
-  @Override
-  public String getFree(){
-    return Long.toString(this.getCapacityRemaining());
+  @Override // NameNodeMXBean
+  public long getFree() {
+    return this.getCapacityRemaining();
   }
 
-  @Override
-  public String getTotal(){
-    return Long.toString(this.getCapacityTotal());
+  @Override // NameNodeMXBean
+  public long getTotal() {
+    return this.getCapacityTotal();
   }
 
-  @Override
+  @Override // NameNodeMXBean
   public String getSafemode() {
     if (!this.isInSafeMode())
       return "";
     return "Safe mode is ON." + this.getSafeModeTip();
   }
 
-  @Override
-  public boolean isUpgradeFinalized(){
+  @Override // NameNodeMXBean
+  public boolean isUpgradeFinalized() {
     return this.getFSImage().isUpgradeFinalized();
   }
-  
-  @Override
-  public String getNondfs(){
-    return Long.toString(getCapacityUsedNonDFS());
+
+  @Override // NameNodeMXBean
+  public long getNonDfsUsedSpace() {
+    return getCapacityUsedNonDFS();
   }
-  
-  @Override
-  public String getPercentused(){
-    return Float.toString(getCapacityUsedPercent());
+
+  @Override // NameNodeMXBean
+  public float getPercentUsed() {
+    return getCapacityUsedPercent();
   }
-  
-  @Override
-  public String getPercentRemaining(){
-    return Float.toString(getCapacityRemainingPercent());
+
+  @Override // NameNodeMXBean
+  public float getPercentRemaining() {
+    return getCapacityRemainingPercent();
   }
-  
-  @Override
-  public String getTotalblocks(){
-    return Long.toString(getBlocksTotal());
+
+  @Override // NameNodeMXBean
+  public long getTotalBlocks() {
+    return getBlocksTotal();
   }
-  
-  @Override
-  public String getTotalfiles(){
-    return Long.toString(getFilesTotal());
+
+  @Override // NameNodeMXBean
+  public long getTotalFiles() {
+    return getFilesTotal();
   }
-  
-  @Override
-  public String getAliveNodeInfo(){
-    NodeInfo ni = new NodeInfo();
-    ArrayList<DatanodeDescriptor> aliveNodeList = new ArrayList<DatanodeDescriptor>();
-    ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>();
-    this.DFSNodesStatus(aliveNodeList, deadNodeList);
-    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
-    for (DatanodeDescriptor node : aliveNodeList ){
-      // key -- hostname
-      String hostname = node.getHostName();
-      // value -- Map<String, String> innerinfo
-      Map<String, String> innerinfo = new HashMap<String, String>();
-      // lastcontact
-      String lastContactKey = "lastcontact";
-      String lastContactValue = new Long(getLastContact(node)).toString();
-      innerinfo.put(lastContactKey, lastContactValue);
-      // usedspace
-      String usedspaceKey = "usedspace";
-      String usedspaceValue = getDfsUsed(node);
-      innerinfo.put(usedspaceKey, usedspaceValue);
-      info.put(hostname, innerinfo);
+
+  @Override // NameNodeMXBean
+  public int getThreads() {
+    return ManagementFactory.getThreadMXBean().getThreadCount();
+  }
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of live node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getLiveNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> aliveNodeList =
+      this.getDatanodeListForReport(DatanodeReportType.LIVE); 
+    for (DatanodeDescriptor node : aliveNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("lastContact", getLastContact(node));
+      innerinfo.put("usedSpace", getDfsUsed(node));
+      info.put(node.getHostName(), innerinfo);
     }
-    ni.setNodeInfo(info);
-    return ni.toString();
+    return JSON.toString(info);
   }
-  
-  @Override
-  public String getDeadNodeInfo(){
-    NodeInfo ni = new NodeInfo();
-    ArrayList<DatanodeDescriptor> aliveNodeList = new ArrayList<DatanodeDescriptor>();
-    ArrayList<DatanodeDescriptor> deadNodeList = new ArrayList<DatanodeDescriptor>();
-    this.DFSNodesStatus(aliveNodeList, deadNodeList);
-    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
-    for (DatanodeDescriptor node : deadNodeList ){
-      // key -- hostname
-      String hostname = node.getHostName();
-      // value -- Map<String, String> innerinfo
-      Map<String, String> innerinfo = new HashMap<String, String>();
-      // lastcontact
-      String lastContactKey = "lastcontact";
-      String lastContactValue = new Long(getLastContact(node)).toString();
-      innerinfo.put(lastContactKey, lastContactValue);
-      info.put(hostname, innerinfo);
-    }
-    ni.setNodeInfo(info);
-    return ni.toString();
-  }
-
-  @Override
-  public String getDecomNodeInfo(){
-    NodeInfo ni = new NodeInfo();
-    ArrayList<DatanodeDescriptor> decomNodeList = new ArrayList<DatanodeDescriptor>();
-    decomNodeList = this.getDecommissioningNodes();
-    Map<String, Map<String,String>>  info = new HashMap<String, Map<String,String>>();
-    for (DatanodeDescriptor node : decomNodeList ){
-      // key -- hostname
-      String hostname = node.getHostName();
-      // value -- Map<String, String> innerinfo
-      Map<String, String> innerinfo = new HashMap<String, String>();
-      // UnderReplicatedBlocks 
-      String underReplicatedBlocksKey = "underReplicatedBlocksValue";
-      String underReplicatedBlocksValue = new Integer(node.decommissioningStatus.getUnderReplicatedBlocks()).toString() ;
-      innerinfo.put(underReplicatedBlocksKey, underReplicatedBlocksValue);
-      // decommissionOnlyReplicas
-      String decommissionOnlyReplicasKey = "decommissionOnlyReplicas";
-      String decommissionOnlyReplicasValue = new Integer(node.decommissioningStatus.getDecommissionOnlyReplicas() ).toString() ;
-      innerinfo.put(decommissionOnlyReplicasKey, decommissionOnlyReplicasValue);
-      // decommissionOnlyReplicas
-      String underReplicatedInOpenFilesKey = "underReplicatedInOpenFiles";
-      String underReplicatedInOpenFilesValue = new Integer(node.decommissioningStatus.getUnderReplicatedInOpenFiles()  ).toString() ;
-      innerinfo.put(underReplicatedInOpenFilesKey, underReplicatedInOpenFilesValue);
-      info.put(hostname, innerinfo);
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of dead node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getDeadNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> deadNodeList =
+      this.getDatanodeListForReport(DatanodeReportType.DEAD); 
+    for (DatanodeDescriptor node : deadNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("lastContact", getLastContact(node));
+      info.put(node.getHostName(), innerinfo);
     }
-    ni.setNodeInfo(info);
-    return ni.toString();
+    return JSON.toString(info);
   }
-  
-  private String getLastContact (DatanodeDescriptor alivenode) {
-    return Long.toString((System.currentTimeMillis() - alivenode.getLastUpdate())/1000);    
+
+  /**
+   * Returned information is a JSON representation of map with host name as the
+   * key and value is a map of decomisioning node attribute keys to its values
+   */
+  @Override // NameNodeMXBean
+  public String getDecomNodes() {
+    final Map<String, Object> info = new HashMap<String, Object>();
+    final ArrayList<DatanodeDescriptor> decomNodeList = 
+      this.getDecommissioningNodes();
+    for (DatanodeDescriptor node : decomNodeList) {
+      final Map<String, Object> innerinfo = new HashMap<String, Object>();
+      innerinfo.put("underReplicatedBlocks", node.decommissioningStatus
+          .getUnderReplicatedBlocks());
+      innerinfo.put("decommissionOnlyReplicas", node.decommissioningStatus
+          .getDecommissionOnlyReplicas());
+      innerinfo.put("underReplicateInOpenFiles", node.decommissioningStatus
+          .getUnderReplicatedInOpenFiles());
+      info.put(node.getHostName(), innerinfo);
+    }
+    return JSON.toString(info);
   }
 
-  private String getDfsUsed(DatanodeDescriptor alivenode){
-    return Long.toString(alivenode.getDfsUsed());
+  private long getLastContact(DatanodeDescriptor alivenode) {
+    return (System.currentTimeMillis() - alivenode.getLastUpdate())/1000;
   }
 
+  private long getDfsUsed(DatanodeDescriptor alivenode) {
+    return alivenode.getDfsUsed();
+  }
 }

Modified: hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1077635&r1=1077634&r2=1077635&view=diff
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/common/branches/branch-0.20-security-patches/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Fri Mar  4 04:38:50 2011
@@ -23,21 +23,111 @@ package org.apache.hadoop.hdfs.server.na
  */
 public interface NameNodeMXBean {
 
+  /**
+   * Gets the version of Hadoop.
+   * 
+   * @return the version
+   */
   public String getVersion();
-  public String getUsed();
-  public String getFree();
-  public String getTotal();
+  
+  /**
+   * Gets the used space by data nodes.
+   * 
+   * @return the used space by data nodes
+   */
+  public long getUsed();
+  
+  /**
+   * Gets total non-used raw bytes.
+   * 
+   * @return total non-used raw bytes
+   */
+  public long getFree();
+  
+  /**
+   * Gets total raw bytes including non-dfs used space.
+   * 
+   * @return the total raw bytes including non-dfs used space
+   */
+  public long getTotal();
+  
+  /**
+   * Gets the safemode status
+   * 
+   * @return the safemode status
+   * 
+   */
   public String getSafemode();
+  
+  /**
+   * Checks if upgrade is finalized.
+   * 
+   * @return true, if upgrade is finalized
+   */
   public boolean isUpgradeFinalized();
-  public String getNondfs();
-  public String getPercentused();
-  public String getPercentRemaining();
-  public String getTotalblocks();
-  public String getTotalfiles();
+  
+  /**
+   * Gets total used space by data nodes for non DFS purposes such as storing
+   * temporary files on the local file system
+   * 
+   * @return the non dfs space of the cluster
+   */
+  public long getNonDfsUsedSpace();
+  
+  /**
+   * Gets the total used space by data nodes as percentage of total capacity
+   * 
+   * @return the percentage of used space on the cluster.
+   */
+  public float getPercentUsed();
+  
+  /**
+   * Gets the total remaining space by data nodes as percentage of total 
+   * capacity
+   * 
+   * @return the percentage of the remaining space on the cluster
+   */
+  public float getPercentRemaining();
+  
+  /**
+   * Gets the total numbers of blocks on the cluster.
+   * 
+   * @return the total number of blocks of the cluster
+   */
+  public long getTotalBlocks();
+  
+  /**
+   * Gets the total number of files on the cluster
+   * 
+   * @return the total number of files on the cluster
+   */
+  public long getTotalFiles();
+  
+  /**
+   * Gets the number of threads.
+   * 
+   * @return the number of threads
+   */
+  public int getThreads();
 
-
-  // each data node 
-  public String getAliveNodeInfo();
-  public String getDeadNodeInfo();
-  public String getDecomNodeInfo();
+  /**
+   * Gets the live node information of the cluster.
+   * 
+   * @return the live node information
+   */
+  public String getLiveNodes();
+  
+  /**
+   * Gets the dead node information of the cluster.
+   * 
+   * @return the dead node information
+   */
+  public String getDeadNodes();
+  
+  /**
+   * Gets the decommissioning node information of the cluster.
+   * 
+   * @return the decommissioning node information
+   */
+  public String getDecomNodes();
 }

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java?rev=1077635&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java Fri Mar  4 04:38:50 2011
@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.lang.management.ManagementFactory;
+import java.util.List;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+import junit.framework.Assert;
+
+/**
+ * Class for testing {@link DataNodeMXBean} implementation
+ */
+public class TestDataNodeMXBean {
+  @Test
+  public void testDataNodeMXBean() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+
+    try {
+      List<DataNode> datanodes = cluster.getDataNodes();
+      Assert.assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=DataNodeInfo");
+      // get attribute "Version"
+      String version = (String)mbs.getAttribute(mxbeanName, "Version");
+      Assert.assertEquals(datanode.getVersion(),version);
+      // get attribute "RpcPort"
+      String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
+      Assert.assertEquals(datanode.getRpcPort(),rpcPort);
+      // get attribute "HttpPort"
+      String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
+      Assert.assertEquals(datanode.getHttpPort(),httpPort);
+      // get attribute "NamenodeAddress"
+      String namenodeAddress = (String)mbs.getAttribute(mxbeanName, 
+          "NamenodeAddress");
+      Assert.assertEquals(datanode.getNamenodeAddress(),namenodeAddress);
+      // get attribute "getVolumeInfo"
+      String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
+      Assert.assertEquals(datanode.getVolumeInfo(),volumeInfo);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

Added: hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1077635&view=auto
==============================================================================
--- hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (added)
+++ hadoop/common/branches/branch-0.20-security-patches/src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Fri Mar  4 04:38:50 2011
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.lang.management.ManagementFactory;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+import org.junit.Test;
+import junit.framework.Assert;
+
+/**
+ * Class for testing {@link NameNodeMXBean} implementation
+ */
+public class TestNameNodeMXBean {
+  @Test
+  public void testNameNodeMXBeanInfo() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      FSNamesystem fsn = cluster.getNameNode().namesystem;
+
+      MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+      ObjectName mxbeanName = new ObjectName("HadoopInfo:type=NameNodeInfo");
+      // get attribute "Version"
+      String version = (String) mbs.getAttribute(mxbeanName, "Version");
+      Assert.assertEquals(fsn.getVersion(), version);
+      // get attribute "Used"
+      Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
+      Assert.assertEquals(fsn.getUsed(), used.longValue());
+      // get attribute "Total"
+      Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
+      Assert.assertEquals(fsn.getTotal(), total.longValue());
+      // get attribute "safemode"
+      String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
+      Assert.assertEquals(fsn.getSafemode(), safemode);
+      // get attribute nondfs
+      Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
+      Assert.assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
+      // get attribute percentremaining
+      Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
+          "PercentRemaining"));
+      Assert.assertEquals(fsn.getPercentRemaining(), percentremaining
+          .floatValue());
+      // get attribute Totalblocks
+      Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
+      Assert.assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
+      // get attribute alivenodeinfo
+      String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
+          "LiveNodes"));
+      Assert.assertEquals(fsn.getLiveNodes(), alivenodeinfo);
+      // get attribute deadnodeinfo
+      String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
+          "DeadNodes"));
+      Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}