You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ar...@apache.org on 2013/11/20 06:31:04 UTC

svn commit: r1543710 - in /hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/java/ src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/apache/hadoop/hdfs/qjournal/server/ src/main/java/org/apache/hadoop/hdfs/ser...

Author: arp
Date: Wed Nov 20 05:31:02 2013
New Revision: 1543710

URL: http://svn.apache.org/r1543710
Log:
Merging r1543613 through r1543709 from trunk to branch HDFS-2832

Modified:
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/   (props changed)
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
    hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1543510-1543709

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Nov 20 05:31:02 2013
@@ -199,6 +199,9 @@ Trunk (Unreleased)
 
     HDFS-5366. recaching improvements (cmccabe)
 
+    HDFS-5511. improve CacheManipulator interface to allow better unit testing
+    (cmccabe)
+
   OPTIMIZATIONS
     HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
 
@@ -379,7 +382,9 @@ Trunk (Unreleased)
     nextEntryId (cmccabe)
 
     HDFS-5512. CacheAdmin -listPools fails with NPE when user lacks permissions
-    to view all pools (awang via cmccabe)
+    to view all pools (wang via cmccabe)
+
+    HDFS-5513. CacheAdmin commands fail when using . as the path. (wang)
 
 Release 2.3.0 - UNRELEASED
 
@@ -506,6 +511,8 @@ Release 2.3.0 - UNRELEASED
 
     HDFS-5073. TestListCorruptFileBlocks fails intermittently. (Arpit Agarwal)
 
+    HDFS-1386. TestJMXGet fails in jdk7 (jeagles)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)

Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
  Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1543510-1543709

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/PathBasedCacheDirective.java Wed Nov 20 05:31:02 2013
@@ -17,8 +17,6 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
-import java.net.URI;
-
 import org.apache.commons.lang.builder.EqualsBuilder;
 import org.apache.commons.lang.builder.HashCodeBuilder;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -61,9 +59,7 @@ public class PathBasedCacheDirective {
      */
     public Builder(PathBasedCacheDirective directive) {
       this.id = directive.getId();
-      // deep-copy URI
-      URI uri = directive.getPath().toUri();
-      this.path = new Path(uri.getScheme(), uri.getAuthority(), uri.getPath());
+      this.path = directive.getPath();
       this.replication = directive.getReplication();
       this.pool = directive.getPool();
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java Wed Nov 20 05:31:02 2013
@@ -24,6 +24,8 @@ import java.net.InetSocketAddress;
 import java.util.HashMap;
 import java.util.Map;
 
+import javax.management.ObjectName;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -61,6 +63,7 @@ public class JournalNode implements Tool
   private JournalNodeRpcServer rpcServer;
   private JournalNodeHttpServer httpServer;
   private Map<String, Journal> journalsById = Maps.newHashMap();
+  private ObjectName journalNodeInfoBeanName;
 
   private File localDir;
 
@@ -181,6 +184,11 @@ public class JournalNode implements Tool
     for (Journal j : journalsById.values()) {
       IOUtils.cleanup(LOG, j);
     }
+
+    if (journalNodeInfoBeanName != null) {
+      MBeans.unregister(journalNodeInfoBeanName);
+      journalNodeInfoBeanName = null;
+    }
   }
 
   /**
@@ -256,7 +264,7 @@ public class JournalNode implements Tool
    * Register JournalNodeMXBean
    */
   private void registerJNMXBean() {
-    MBeans.register("JournalNode", "JournalNodeInfo", this);
+    journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this);
   }
   
   private class ErrorReporter implements StorageErrorReporter {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Nov 20 05:31:02 2013
@@ -664,8 +664,9 @@ class BlockReceiver implements Closeable
         //                     
         long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES;
         if (dropPos > 0 && dropCacheBehindWrites) {
-          NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
-              outFd, 0, dropPos, NativeIO.POSIX.POSIX_FADV_DONTNEED);
+          NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+              block.getBlockName(), outFd, 0, dropPos,
+              NativeIO.POSIX.POSIX_FADV_DONTNEED);
         }
         lastCacheManagementOffset = offsetInBlock;
       }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Wed Nov 20 05:31:02 2013
@@ -375,8 +375,9 @@ class BlockSender implements java.io.Clo
         ((dropCacheBehindAllReads) ||
          (dropCacheBehindLargeReads && isLongRead()))) {
       try {
-        NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
-            blockInFd, lastCacheDropOffset, offset - lastCacheDropOffset,
+        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+            block.getBlockName(), blockInFd, lastCacheDropOffset,
+            offset - lastCacheDropOffset,
             NativeIO.POSIX.POSIX_FADV_DONTNEED);
       } catch (Exception e) {
         LOG.warn("Unable to drop cache on file close", e);
@@ -674,8 +675,9 @@ class BlockSender implements java.io.Clo
 
     if (isLongRead() && blockInFd != null) {
       // Advise that this file descriptor will be accessed sequentially.
-      NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
-          blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
+      NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+          block.getBlockName(), blockInFd, 0, 0,
+          NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
     }
     
     // Trigger readahead of beginning of file if configured.
@@ -761,9 +763,9 @@ class BlockSender implements java.io.Clo
       long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES;
       if (offset >= nextCacheDropOffset) {
         long dropLength = offset - lastCacheDropOffset;
-        NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
-            blockInFd, lastCacheDropOffset, dropLength,
-            NativeIO.POSIX.POSIX_FADV_DONTNEED);
+        NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+            block.getBlockName(), blockInFd, lastCacheDropOffset,
+            dropLength, NativeIO.POSIX.POSIX_FADV_DONTNEED);
         lastCacheDropOffset = offset;
       }
     }

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Nov 20 05:31:02 2013
@@ -95,6 +95,8 @@ import java.security.PrivilegedException
 import java.util.*;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import javax.management.ObjectName;
+
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
@@ -209,6 +211,7 @@ public class DataNode extends Configured
   private boolean connectToDnViaHostname;
   ReadaheadPool readaheadPool;
   private final boolean getHdfsBlockLocationsEnabled;
+  private ObjectName dataNodeInfoBeanName;
 
   /**
    * Create the DataNode given a configuration, an array of dataDirs,
@@ -658,7 +661,7 @@ public class DataNode extends Configured
             " size (%s) is greater than zero and native code is not available.",
             DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
       }
-      long ulimit = NativeIO.getMemlockLimit();
+      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
       if (dnConf.maxLockedMemory > ulimit) {
       throw new RuntimeException(String.format(
           "Cannot start datanode because the configured max locked memory" +
@@ -890,7 +893,7 @@ public class DataNode extends Configured
   }
   
   private void registerMXBean() {
-    MBeans.register("DataNode", "DataNodeInfo", this);
+    dataNodeInfoBeanName = MBeans.register("DataNode", "DataNodeInfo", this);
   }
   
   @VisibleForTesting
@@ -1215,6 +1218,10 @@ public class DataNode extends Configured
     if (metrics != null) {
       metrics.shutdown();
     }
+    if (dataNodeInfoBeanName != null) {
+      MBeans.unregister(dataNodeInfoBeanName);
+      dataNodeInfoBeanName = null;
+    }
   }
   
   

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java Wed Nov 20 05:31:02 2013
@@ -163,7 +163,8 @@ public class FsDatasetCache {
   private final UsedBytesCount usedBytesCount;
 
   public static class PageRounder {
-    private final long osPageSize = NativeIO.getOperatingSystemPageSize();
+    private final long osPageSize =
+        NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
 
     /**
      * Round up a number to the operating system page size.

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java Wed Nov 20 05:31:02 2013
@@ -82,7 +82,7 @@ public class MappableBlock implements Cl
         throw new IOException("Block InputStream has no FileChannel.");
       }
       mmap = blockChannel.map(MapMode.READ_ONLY, 0, length);
-      NativeIO.POSIX.cacheManipulator.mlock(blockFileName, mmap, length);
+      NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length);
       verifyChecksum(length, metaIn, blockChannel, blockFileName);
       mappableBlock = new MappableBlock(mmap, length);
     } finally {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Wed Nov 20 05:31:02 2013
@@ -5513,6 +5513,7 @@ public class FSNamesystem implements Nam
   }
   
   private ObjectName mbeanName;
+  private ObjectName mxbeanName;
 
   /**
    * Register the FSNamesystem MBean using the name
@@ -5536,6 +5537,11 @@ public class FSNamesystem implements Nam
   void shutdown() {
     if (mbeanName != null) {
       MBeans.unregister(mbeanName);
+      mbeanName = null;
+    }
+    if (mxbeanName != null) {
+      MBeans.unregister(mxbeanName);
+      mxbeanName = null;
     }
     if (dir != null) {
       dir.shutdown();
@@ -6345,7 +6351,7 @@ public class FSNamesystem implements Nam
    * Register NameNodeMXBean
    */
   private void registerMXBean() {
-    MBeans.register("NameNode", "NameNodeInfo", this);
+    mxbeanName = MBeans.register("NameNode", "NameNodeInfo", this);
   }
 
   /**

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Wed Nov 20 05:31:02 2013
@@ -26,6 +26,9 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
+
+import javax.management.ObjectName;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -261,6 +264,7 @@ public class NameNode implements NameNod
   private NameNodeRpcServer rpcServer;
 
   private JvmPauseMonitor pauseMonitor;
+  private ObjectName nameNodeStatusBeanName;
   
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
@@ -745,6 +749,10 @@ public class NameNode implements NameNod
       if (namesystem != null) {
         namesystem.shutdown();
       }
+      if (nameNodeStatusBeanName != null) {
+        MBeans.unregister(nameNodeStatusBeanName);
+        nameNodeStatusBeanName = null;
+      }
     }
   }
 
@@ -1414,7 +1422,7 @@ public class NameNode implements NameNod
    * Register NameNodeStatusMXBean
    */
   private void registerNNSMXBean() {
-    MBeans.register("NameNode", "NameNodeStatus", this);
+    nameNodeStatusBeanName = MBeans.register("NameNode", "NameNodeStatus", this);
   }
 
   @Override // NameNodeStatusMXBean

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Wed Nov 20 05:31:02 2013
@@ -113,7 +113,8 @@ public class TestDatanodeConfig {
   @Test(timeout=60000)
   public void testMemlockLimit() throws Exception {
     assumeTrue(NativeIO.isAvailable());
-    final long memlockLimit = NativeIO.getMemlockLimit();
+    final long memlockLimit =
+        NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
 
     // Can't increase the memlock limit past the maximum.
     assumeTrue(memlockLimit != Long.MAX_VALUE);

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java Wed Nov 20 05:31:02 2013
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import java.io.FileDescriptor;
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Map;
@@ -36,7 +37,8 @@ import org.apache.hadoop.hdfs.protocol.E
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheTracker;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -54,7 +56,7 @@ public class TestCachingStrategy {
     EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
 
     // Track calls to posix_fadvise.
-    NativeIO.POSIX.cacheTracker = tracker;
+    NativeIO.POSIX.setCacheManipulator(tracker);
     
     // Normally, we wait for a few megabytes of data to be read or written 
     // before dropping the cache.  This is to avoid an excessive number of
@@ -106,12 +108,13 @@ public class TestCachingStrategy {
     }
   }
 
-  private static class TestRecordingCacheTracker implements CacheTracker {
+  private static class TestRecordingCacheTracker extends CacheManipulator {
     private final Map<String, Stats> map = new TreeMap<String, Stats>();
 
     @Override
-    synchronized public void fadvise(String name,
-        long offset, long len, int flags) {
+    public void posixFadviseIfPossible(String name,
+      FileDescriptor fd, long offset, long len, int flags)
+          throws NativeIOException {
       if ((len < 0) || (len > Integer.MAX_VALUE)) {
         throw new RuntimeException("invalid length of " + len +
             " passed to posixFadviseIfPossible");
@@ -126,6 +129,7 @@ public class TestCachingStrategy {
         map.put(name, stats);
       }
       stats.fadvise((int)offset, (int)len, flags);
+      super.posixFadviseIfPossible(name, fd, offset, len, flags);
     }
 
     synchronized void clear() {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Wed Nov 20 05:31:02 2013
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.MetricsAsserts;
@@ -99,7 +100,6 @@ public class TestFsDatasetCache {
   @Before
   public void setUp() throws Exception {
     assumeTrue(!Path.WINDOWS);
-    assumeTrue(NativeIO.getMemlockLimit() >= CACHE_CAPACITY);
     conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true);
     conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS,
@@ -122,18 +122,8 @@ public class TestFsDatasetCache {
 
     spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);
 
-    prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-
-    // Save the current CacheManipulator and replace it at the end of the test
-    // Stub out mlock calls to avoid failing when not enough memory is lockable
-    // by the operating system.
-    NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
-      @Override
-      public void mlock(String identifier,
-          ByteBuffer mmap, long length) throws IOException {
-        LOG.info("mlocking " + identifier);
-      }
-    };
+    prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
   }
 
   @After
@@ -145,7 +135,7 @@ public class TestFsDatasetCache {
       cluster.shutdown();
     }
     // Restore the original CacheManipulator
-    NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
+    NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
   }
 
   private static void setHeartbeatResponse(DatanodeCommand[] cmds)
@@ -222,7 +212,8 @@ public class TestFsDatasetCache {
           if (tries++ > 10) {
             LOG.info("verifyExpectedCacheUsage: expected " +
                 expected + ", got " + curDnCacheUsed + "; " +
-                "memlock limit = " + NativeIO.getMemlockLimit() +
+                "memlock limit = " +
+                NativeIO.POSIX.getCacheManipulator().getMemlockLimit() +
                 ".  Waiting...");
           }
           return false;
@@ -297,40 +288,31 @@ public class TestFsDatasetCache {
    */
   @Test(timeout=600000)
   public void testCacheAndUncacheBlockWithRetries() throws Exception {
-    CacheManipulator prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-    
-    try {
-      NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
-        private final Set<String> seenIdentifiers = new HashSet<String>();
-        
-        @Override
-        public void mlock(String identifier,
-            ByteBuffer mmap, long length) throws IOException {
-          if (seenIdentifiers.contains(identifier)) {
-            // mlock succeeds the second time.
-            LOG.info("mlocking " + identifier);
-            return;
-          }
-          seenIdentifiers.add(identifier);
-          throw new IOException("injecting IOException during mlock of " +
-              identifier);
+    // We don't have to save the previous cacheManipulator
+    // because it will be reinstalled by the @After function.
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
+      private final Set<String> seenIdentifiers = new HashSet<String>();
+      
+      @Override
+      public void mlock(String identifier,
+          ByteBuffer mmap, long length) throws IOException {
+        if (seenIdentifiers.contains(identifier)) {
+          // mlock succeeds the second time.
+          LOG.info("mlocking " + identifier);
+          return;
         }
-      };
-      testCacheAndUncacheBlock();
-    } finally {
-      NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
-    }
+        seenIdentifiers.add(identifier);
+        throw new IOException("injecting IOException during mlock of " +
+            identifier);
+      }
+    });
+    testCacheAndUncacheBlock();
   }
 
   @Test(timeout=600000)
   public void testFilesExceedMaxLockedMemory() throws Exception {
     LOG.info("beginning testFilesExceedMaxLockedMemory");
 
-    // We don't want to deal with page rounding issues, so skip this
-    // test if page size is weird
-    long osPageSize = NativeIO.getOperatingSystemPageSize();
-    assumeTrue(osPageSize == 4096); 
-
     // Create some test files that will exceed total cache capacity
     final int numFiles = 5;
     final long fileSize = 15000;
@@ -411,7 +393,7 @@ public class TestFsDatasetCache {
     assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
     assertEquals("Unexpected amount of cache used", current, cacheUsed);
 
-    NativeIO.POSIX.cacheManipulator = new NativeIO.POSIX.CacheManipulator() {
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
       @Override
       public void mlock(String identifier,
           ByteBuffer mmap, long length) throws IOException {
@@ -422,7 +404,7 @@ public class TestFsDatasetCache {
           Assert.fail();
         }
       }
-    };
+    });
     // Starting caching each block in succession.  The usedBytes amount
     // should increase, even though caching doesn't complete on any of them.
     for (int i=0; i<NUM_BLOCKS; i++) {

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Wed Nov 20 05:31:02 2013
@@ -31,9 +31,8 @@ import static org.junit.Assert.assertTru
 import static org.junit.Assert.fail;
 
 import java.io.IOException;
-import java.nio.MappedByteBuffer;
-import java.security.PrivilegedExceptionAction;
 import java.nio.ByteBuffer;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -59,16 +58,15 @@ import org.apache.hadoop.hdfs.protocol.C
 import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
 import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.MappableBlock;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.GSet;
 import org.junit.After;
-import org.junit.Assume;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -100,18 +98,8 @@ public class TestPathBasedCacheRequests 
     cluster.waitActive();
     dfs = cluster.getFileSystem();
     proto = cluster.getNameNodeRpc();
-    prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-
-    // Save the current CacheManipulator and replace it at the end of the test
-    // Stub out mlock calls to avoid failing when not enough memory is lockable
-    // by the operating system.
-    NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
-      @Override
-      public void mlock(String identifier,
-          ByteBuffer mmap, long length) throws IOException {
-        LOG.info("mlocking " + identifier);
-      }
-    };
+    prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
+    NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
   }
 
   @After
@@ -120,7 +108,7 @@ public class TestPathBasedCacheRequests 
       cluster.shutdown();
     }
     // Restore the original CacheManipulator
-    NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
+    NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
   }
 
   @Test(timeout=60000)
@@ -482,6 +470,15 @@ public class TestPathBasedCacheRequests 
     dfs.removePathBasedCacheDirective(relativeId);
     iter = dfs.listPathBasedCacheDirectives(null);
     assertFalse(iter.hasNext());
+
+    // Verify that PBCDs with path "." work correctly
+    PathBasedCacheDirective directive =
+        new PathBasedCacheDirective.Builder().setPath(new Path("."))
+            .setPool("pool1").build();
+    long id = dfs.addPathBasedCacheDirective(directive);
+    dfs.modifyPathBasedCacheDirective(new PathBasedCacheDirective.Builder(
+        directive).setId(id).setReplication((short)2).build());
+    dfs.removePathBasedCacheDirective(id);
   }
 
   @Test(timeout=60000)
@@ -647,20 +644,6 @@ public class TestPathBasedCacheRequests 
   // Most Linux installs will allow non-root users to lock 64KB.
   private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES;
 
-  /**
-   * Return true if we can test DN caching.
-   */
-  private static boolean canTestDatanodeCaching() {
-    if (!NativeIO.isAvailable()) {
-      // Need NativeIO in order to cache blocks on the DN.
-      return false;
-    }
-    if (NativeIO.getMemlockLimit() < CACHE_CAPACITY) {
-      return false;
-    }
-    return true;
-  }
-
   private static HdfsConfiguration createCachingConf() {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -674,7 +657,6 @@ public class TestPathBasedCacheRequests 
 
   @Test(timeout=120000)
   public void testWaitForCachedReplicas() throws Exception {
-    Assume.assumeTrue(canTestDatanodeCaching());
     HdfsConfiguration conf = createCachingConf();
     FileSystemTestHelper helper = new FileSystemTestHelper();
     MiniDFSCluster cluster =
@@ -732,7 +714,6 @@ public class TestPathBasedCacheRequests 
   @Test(timeout=120000)
   public void testAddingPathBasedCacheDirectivesWhenCachingIsDisabled()
       throws Exception {
-    Assume.assumeTrue(canTestDatanodeCaching());
     HdfsConfiguration conf = createCachingConf();
     conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, false);
     MiniDFSCluster cluster =
@@ -780,7 +761,6 @@ public class TestPathBasedCacheRequests 
 
   @Test(timeout=120000)
   public void testWaitForCachedReplicasInDirectory() throws Exception {
-    Assume.assumeTrue(canTestDatanodeCaching());
     HdfsConfiguration conf = createCachingConf();
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
@@ -832,7 +812,6 @@ public class TestPathBasedCacheRequests 
    */
   @Test(timeout=120000)
   public void testReplicationFactor() throws Exception {
-    Assume.assumeTrue(canTestDatanodeCaching());
     HdfsConfiguration conf = createCachingConf();
     MiniDFSCluster cluster =
       new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();

Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java?rev=1543710&r1=1543709&r2=1543710&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java (original)
+++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java Wed Nov 20 05:31:02 2013
@@ -28,7 +28,12 @@ import java.io.IOException;
 import java.io.PipedInputStream;
 import java.io.PipedOutputStream;
 import java.io.PrintStream;
+import java.lang.management.ManagementFactory;
 import java.util.Random;
+import java.util.Set;
+
+import javax.management.MBeanServerConnection;
+import javax.management.ObjectName;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -92,9 +97,8 @@ public class TestJMXGet {
         fileSize, fileSize, blockSize, (short) 2, seed);
 
     JMXGet jmx = new JMXGet();
-    //jmx.setService("*"); // list all hadoop services
-    //jmx.init();
-    //jmx = new JMXGet();
+    String serviceName = "NameNode";
+    jmx.setService(serviceName);
     jmx.init(); // default lists namenode mbeans only
     assertTrue("error printAllValues", checkPrintAllValues(jmx));
 
@@ -107,6 +111,10 @@ public class TestJMXGet {
         jmx.getValue("NumOpenConnections")));
 
     cluster.shutdown();
+    MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
+    ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
+    Set<ObjectName> names = mbsc.queryNames(query, null);
+    assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
   }
   
   private static boolean checkPrintAllValues(JMXGet jmx) throws Exception {
@@ -140,13 +148,15 @@ public class TestJMXGet {
         fileSize, fileSize, blockSize, (short) 2, seed);
 
     JMXGet jmx = new JMXGet();
-    //jmx.setService("*"); // list all hadoop services
-    //jmx.init();
-    //jmx = new JMXGet();
-    jmx.setService("DataNode");
+    String serviceName = "DataNode";
+    jmx.setService(serviceName);
     jmx.init();
     assertEquals(fileSize, Integer.parseInt(jmx.getValue("BytesWritten")));
 
     cluster.shutdown();
+    MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer();
+    ObjectName query = new ObjectName("Hadoop:service=" + serviceName + ",*");
+    Set<ObjectName> names = mbsc.queryNames(query, null);
+    assertTrue("No beans should be registered for " + serviceName, names.isEmpty());
   }
 }