You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cn...@apache.org on 2014/07/11 05:09:48 UTC

svn commit: r1609619 - in /hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs: ./ src/main/bin/ src/main/java/org/apache/hadoop/hdfs/server/datanode/ src/site/apt/ src/test/java/org/apache/hadoop/hdfs/server/datanode/

Author: cnauroth
Date: Fri Jul 11 03:09:48 2014
New Revision: 1609619

URL: http://svn.apache.org/r1609619
Log:
HDFS-5202. Merging change r1609618 from trunk to branch-2.

Modified:
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
    hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1609619&r1=1609618&r2=1609619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Fri Jul 11 03:09:48 2014
@@ -27,6 +27,8 @@ Release 2.6.0 - UNRELEASED
     HDFS-6640. Syntax for MKDIRS, CREATESYMLINK, and SETXATTR are given wrongly
     in WebHdfs document (missed webhdfs/v1). (Stephen Chu via jing9)
 
+    HDFS-5202. Support Centralized Cache Management on Windows. (cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd?rev=1609619&r1=1609618&r2=1609619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd Fri Jul 11 03:09:48 2014
@@ -47,7 +47,7 @@ if "%1" == "--config" (
       goto print_usage
   )
 
-  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir
+  set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin
   for %%i in ( %hdfscommands% ) do (
     if %hdfs-command% == %%i set hdfscommand=true
   )
@@ -146,6 +146,10 @@ goto :eof
   set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir
   goto :eof
 
+:cacheadmin
+  set CLASS=org.apache.hadoop.hdfs.tools.CacheAdmin
+  goto :eof
+
 @rem This changes %1, %2 etc. Hence those cannot be used after calling this.
 :make_command_arguments
   if "%1" == "--config" (
@@ -193,6 +197,7 @@ goto :eof
   @echo                        current directory contents with a snapshot
   @echo   lsSnapshottableDir   list all snapshottable dirs owned by the current user
   @echo 						Use -help to see options
+  @echo   cacheadmin           configure the HDFS cache
   @echo.
   @echo Most commands print help when invoked w/o parameters.
 

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1609619&r1=1609618&r2=1609619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Fri Jul 11 03:09:48 2014
@@ -752,15 +752,19 @@ public class DataNode extends Configured
             " size (%s) is greater than zero and native code is not available.",
             DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
       }
-      long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
-      if (dnConf.maxLockedMemory > ulimit) {
-      throw new RuntimeException(String.format(
-          "Cannot start datanode because the configured max locked memory" +
-          " size (%s) of %d bytes is more than the datanode's available" +
-          " RLIMIT_MEMLOCK ulimit of %d bytes.",
-          DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-          dnConf.maxLockedMemory,
-          ulimit));
+      if (Path.WINDOWS) {
+        NativeIO.Windows.extendWorkingSetSize(dnConf.maxLockedMemory);
+      } else {
+        long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
+        if (dnConf.maxLockedMemory > ulimit) {
+          throw new RuntimeException(String.format(
+            "Cannot start datanode because the configured max locked memory" +
+            " size (%s) of %d bytes is more than the datanode's available" +
+            " RLIMIT_MEMLOCK ulimit of %d bytes.",
+            DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+            dnConf.maxLockedMemory,
+            ulimit));
+        }
       }
     }
     LOG.info("Starting DataNode with maxLockedMemory = " +

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm?rev=1609619&r1=1609618&r2=1609619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/CentralizedCacheManagement.apt.vm Fri Jul 11 03:09:48 2014
@@ -270,7 +270,7 @@ Centralized Cache Management in HDFS
 ** {Native Libraries}
 
   In order to lock block files into memory, the DataNode relies on native JNI
-  code found in <<<libhadoop.so>>>. Be sure to
+  code found in <<<libhadoop.so>>> or <<<hadoop.dll>>> on Windows. Be sure to
   {{{../hadoop-common/NativeLibraries.html}enable JNI}} if you are using HDFS
   centralized cache management.
 
@@ -283,11 +283,11 @@ Centralized Cache Management in HDFS
   * dfs.datanode.max.locked.memory
 
     This determines the maximum amount of memory a DataNode will use for caching.
-    The "locked-in-memory size" ulimit (<<<ulimit -l>>>) of the DataNode user
-    also needs to be increased to match this parameter (see below section on
-    {{OS Limits}}). When setting this value, please remember that you will need
-    space in memory for other things as well, such as the DataNode and
-    application JVM heaps and the operating system page cache.
+    On Unix-like systems, the "locked-in-memory size" ulimit (<<<ulimit -l>>>) of
+    the DataNode user also needs to be increased to match this parameter (see
+    below section on {{OS Limits}}). When setting this value, please remember
+    that you will need space in memory for other things as well, such as the
+    DataNode and application JVM heaps and the operating system page cache.
 
 *** Optional
 
@@ -339,3 +339,6 @@ Centralized Cache Management in HDFS
   "unlimited," indicating that there is no limit.  Note that it's typical for
   <<<ulimit -l>>> to output the memory lock limit in KB, but
   dfs.datanode.max.locked.memory must be specified in bytes.
+
+  This information does not apply to deployments on Windows.  Windows has no
+  direct equivalent of <<<ulimit -l>>>.

Modified: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1609619&r1=1609618&r2=1609619&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Fri Jul 11 03:09:48 2014
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.da
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assume.assumeTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyInt;
 import static org.mockito.Matchers.anyLong;
@@ -68,6 +67,7 @@ import org.apache.hadoop.hdfs.server.pro
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
 import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
@@ -114,7 +114,6 @@ public class TestFsDatasetCache {
 
   @Before
   public void setUp() throws Exception {
-    assumeTrue(!Path.WINDOWS);
     conf = new HdfsConfiguration();
     conf.setLong(
         DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 100);
@@ -143,6 +142,9 @@ public class TestFsDatasetCache {
 
   @After
   public void tearDown() throws Exception {
+    // Verify that each test uncached whatever it cached.  This cleanup is
+    // required so that file descriptors are not leaked across tests.
+    DFSTestUtil.verifyExpectedCacheUsage(0, 0, fsd);
     if (fs != null) {
       fs.close();
     }
@@ -205,9 +207,16 @@ public class TestFsDatasetCache {
       String bpid = loc.getLocatedBlock().getBlock().getBlockPoolId();
       Block block = loc.getLocatedBlock().getBlock().getLocalBlock();
       ExtendedBlock extBlock = new ExtendedBlock(bpid, block);
-      FileChannel blockChannel =
-          ((FileInputStream)fsd.getBlockInputStream(extBlock, 0)).getChannel();
-      sizes[i] = blockChannel.size();
+      FileInputStream blockInputStream = null;
+      FileChannel blockChannel = null;
+      try {
+        blockInputStream =
+          (FileInputStream)fsd.getBlockInputStream(extBlock, 0);
+        blockChannel = blockInputStream.getChannel();
+        sizes[i] = blockChannel.size();
+      } finally {
+        IOUtils.cleanup(LOG, blockChannel, blockInputStream);
+      }
     }
     return sizes;
   }
@@ -571,5 +580,7 @@ public class TestFsDatasetCache {
         return true;
       }
     }, 1000, 30000);
+
+    dfs.removeCacheDirective(shortCacheDirectiveId);
   }
 }