You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wh...@apache.org on 2014/08/08 06:27:47 UTC

svn commit: r1616669 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: CHANGES.txt src/main/webapps/hdfs/dfshealth.html src/main/webapps/hdfs/dfshealth.js src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

Author: wheat9
Date: Fri Aug  8 04:27:47 2014
New Revision: 1616669

URL: http://svn.apache.org/r1616669
Log:
HDFS-6722. Display readable last contact time for dead nodes on NN webUI. Contributed by Ming Ma.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1616669&r1=1616668&r2=1616669&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Aug  8 04:27:47 2014
@@ -376,6 +376,9 @@ Release 2.6.0 - UNRELEASED
     HDFS-6740. Make FSDataset support adding data volumes dynamically. (Lei
     Xu via atm)
 
+    HDFS-6722. Display readable last contact time for dead nodes on NN webUI.
+    (Ming Ma via wheat9)
+
   OPTIMIZATIONS
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html?rev=1616669&r1=1616668&r2=1616669&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html Fri Aug  8 04:27:47 2014
@@ -281,7 +281,7 @@
   {#DeadNodes}
   <tr class="danger">
     <td>{name} ({xferaddr})</td>
-    <td>{lastContact}</td>
+    <td>{#helper_lastcontact_tostring value="{lastContact}"/}</td>
     <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
     <td>-</td>
     <td>-</td>

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js?rev=1616669&r1=1616668&r2=1616669&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js Fri Aug  8 04:27:47 2014
@@ -139,6 +139,14 @@
   }
 
   function load_datanode_info() {
+
+    var HELPERS = {
+      'helper_lastcontact_tostring' : function (chunk, ctx, bodies, params) {
+        var value = dust.helpers.tap(params.value, chunk, ctx);
+        return chunk.write('' + new Date(Date.now()-1000*Number(value)));
+      }
+    };
+
     function workaround(r) {
       function node_map_to_array(nodes) {
         var res = [];
@@ -160,7 +168,8 @@
       '/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo',
       guard_with_startup_progress(function (resp) {
         var data = workaround(resp.beans[0]);
-        dust.render('datanode-info', data, function(err, out) {
+        var base = dust.makeBase(HELPERS);
+        dust.render('datanode-info', base.push(data), function(err, out) {
           $('#tab-datanode').html(out);
           $('#ui-tabs a[href="#tab-datanode"]').tab('show');
         });

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1616669&r1=1616668&r2=1616669&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Fri Aug  8 04:27:47 2014
@@ -30,9 +30,13 @@ import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.util.VersionInfo;
@@ -58,11 +62,14 @@ public class TestNameNodeMXBean {
   public void testNameNodeMXBeanInfo() throws Exception {
     Configuration conf = new Configuration();
     conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-      NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+        NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
+    conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
+
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       cluster.waitActive();
 
       FSNamesystem fsn = cluster.getNameNode().namesystem;
@@ -70,6 +77,29 @@ public class TestNameNodeMXBean {
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanName = new ObjectName(
           "Hadoop:service=NameNode,name=NameNodeInfo");
+
+      // Define include file to generate deadNodes metrics
+      FileSystem localFileSys = FileSystem.getLocal(conf);
+      Path workingDir = localFileSys.getWorkingDirectory();
+      Path dir = new Path(workingDir,
+          "build/test/data/temp/TestNameNodeMXBean");
+      Path includeFile = new Path(dir, "include");
+      assertTrue(localFileSys.mkdirs(dir));
+      StringBuilder includeHosts = new StringBuilder();
+      for(DataNode dn : cluster.getDataNodes()) {
+        includeHosts.append(dn.getDisplayName()).append("\n");
+      }
+      DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
+      conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
+      fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
+
+      cluster.stopDataNode(0);
+      while (fsn.getNumDatanodesInService() != 2) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {}
+      }
+
       // get attribute "ClusterId"
       String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
       assertEquals(fsn.getClusterId(), clusterId);
@@ -121,6 +151,15 @@ public class TestNameNodeMXBean {
       String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
           "DeadNodes"));
       assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+      Map<String, Map<String, Object>> deadNodes =
+          (Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
+      assertTrue(deadNodes.size() > 0);
+      for (Map<String, Object> deadNode : deadNodes.values()) {
+        assertTrue(deadNode.containsKey("lastContact"));
+        assertTrue(deadNode.containsKey("decommissioned"));
+        assertTrue(deadNode.containsKey("xferaddr"));
+      }
+
       // get attribute NodeUsage
       String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
           "NodeUsage"));
@@ -181,7 +220,7 @@ public class TestNameNodeMXBean {
       assertEquals(1, statusMap.get("active").size());
       assertEquals(1, statusMap.get("failed").size());
       assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
-      assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() * 
+      assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
           cluster.getDataNodes().size(),
               mbs.getAttribute(mxbeanName, "CacheCapacity"));
     } finally {