You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by at...@apache.org on 2012/02/25 23:14:53 UTC

svn commit: r1293707 - in /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/org/apache/hadoop/hdfs/server/namenode/ src/test/java/org/apache/hadoop/hdfs/server/namenode/

Author: atm
Date: Sat Feb 25 22:14:53 2012
New Revision: 1293707

URL: http://svn.apache.org/viewvc?rev=1293707&view=rev
Log:
HDFS-2978. The NameNode should expose name dir statuses via JMX. Contributed by Aaron T. Myers.

Modified:
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
    hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1293707&r1=1293706&r2=1293707&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Sat Feb 25 22:14:53 2012
@@ -228,7 +228,9 @@ Release 0.23.3 - UNRELEASED 
 
   INCOMPATIBLE CHANGES
 
-  NEW FEATURES                                                                    
+  NEW FEATURES
+
+    HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
   
   IMPROVEMENTS
 

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1293707&r1=1293706&r2=1293707&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Sat Feb 25 22:14:53 2012
@@ -141,6 +141,8 @@ import org.apache.hadoop.hdfs.server.com
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.Util;
@@ -4496,6 +4498,30 @@ public class FSNamesystem implements Nam
   public String getBlockPoolId() {
     return blockPoolId;
   }
+  
+  @Override  // NameNodeMXBean
+  public String getNameDirStatuses() {
+    Map<String, Map<File, StorageDirType>> statusMap =
+      new HashMap<String, Map<File, StorageDirType>>();
+    
+    Map<File, StorageDirType> activeDirs = new HashMap<File, StorageDirType>();
+    for (Iterator<StorageDirectory> it
+        = getFSImage().getStorage().dirIterator(); it.hasNext();) {
+      StorageDirectory st = it.next();
+      activeDirs.put(st.getRoot(), st.getStorageDirType());
+    }
+    statusMap.put("active", activeDirs);
+    
+    List<Storage.StorageDirectory> removedStorageDirs
+        = getFSImage().getStorage().getRemovedStorageDirs();
+    Map<File, StorageDirType> failedDirs = new HashMap<File, StorageDirType>();
+    for (StorageDirectory st : removedStorageDirs) {
+      failedDirs.put(st.getRoot(), st.getStorageDirType());
+    }
+    statusMap.put("failed", failedDirs);
+    
+    return JSON.toString(statusMap);
+  }
 
   /** @return the block manager. */
   public BlockManager getBlockManager() {

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java?rev=1293707&r1=1293706&r2=1293707&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java Sat Feb 25 22:14:53 2012
@@ -166,4 +166,12 @@ public interface NameNodeMXBean {
    * @return the block pool id
    */
   public String getBlockPoolId();
+
+  /**
+   * Get status information about the directories storing image and edits logs
+   * of the NN.
+   * 
+   * @return the name dir status information, as a JSON string.
+   */
+  public String getNameDirStatuses();
 }

Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1293707&r1=1293706&r2=1293707&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Sat Feb 25 22:14:53 2012
@@ -17,23 +17,33 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.junit.Assert.*;
+
+import java.io.File;
 import java.lang.management.ManagementFactory;
+import java.net.URI;
+import java.util.Collection;
+import java.util.Map;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.util.VersionInfo;
 
 import org.junit.Test;
+import org.mortbay.util.ajax.JSON;
+
 import junit.framework.Assert;
 
 /**
  * Class for testing {@link NameNodeMXBean} implementation
  */
 public class TestNameNodeMXBean {
+  @SuppressWarnings({ "unchecked", "deprecation" })
   @Test
   public void testNameNodeMXBeanInfo() throws Exception {
     Configuration conf = new Configuration();
@@ -88,8 +98,46 @@ public class TestNameNodeMXBean {
       String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
           "DeadNodes"));
       Assert.assertEquals(fsn.getDeadNodes(), deadnodeinfo);
+      // get attribute NameDirStatuses
+      String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
+          "NameDirStatuses"));
+      Assert.assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
+      Map<String, Map<String, String>> statusMap =
+        (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
+      Collection<URI> nameDirUris = cluster.getNameDirs(0);
+      for (URI nameDirUri : nameDirUris) {
+        File nameDir = new File(nameDirUri);
+        System.out.println("Checking for the presence of " + nameDir +
+            " in active name dirs.");
+        assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
+      }
+      assertEquals(2, statusMap.get("active").size());
+      assertEquals(0, statusMap.get("failed").size());
+      
+      // This will cause the first dir to fail.
+      File failedNameDir = new File(nameDirUris.toArray(new URI[0])[0]);
+      assertEquals(0, FileUtil.chmod(failedNameDir.getAbsolutePath(), "000"));
+      cluster.getNameNodeRpc().rollEditLog();
+      
+      nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
+          "NameDirStatuses"));
+      statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
+      for (URI nameDirUri : nameDirUris) {
+        File nameDir = new File(nameDirUri);
+        String expectedStatus =
+            nameDir.equals(failedNameDir) ? "failed" : "active";
+        System.out.println("Checking for the presence of " + nameDir +
+            " in " + expectedStatus + " name dirs.");
+        assertTrue(statusMap.get(expectedStatus).containsKey(
+            nameDir.getAbsolutePath()));
+      }
+      assertEquals(1, statusMap.get("active").size());
+      assertEquals(1, statusMap.get("failed").size());
     } finally {
       if (cluster != null) {
+        for (URI dir : cluster.getNameDirs(0)) {
+          FileUtil.chmod(new File(dir).toString(), "700");
+        }
         cluster.shutdown();
       }
     }