You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/11/20 01:48:05 UTC
svn commit: r1543676 - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./
src/main/java/org/apache/hadoop/hdfs/server/datanode/
src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/
src/test/java/org/apache/hadoop/hdfs/ src/test...
Author: cmccabe
Date: Wed Nov 20 00:48:05 2013
New Revision: 1543676
URL: http://svn.apache.org/r1543676
Log:
HDFS-5511. improve CacheManipulator interface to allow better unit testing (cmccabe)
Modified:
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Wed Nov 20 00:48:05 2013
@@ -199,6 +199,9 @@ Trunk (Unreleased)
HDFS-5366. recaching improvements (cmccabe)
+ HDFS-5511. improve CacheManipulator interface to allow better unit testing
+ (cmccabe)
+
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java Wed Nov 20 00:48:05 2013
@@ -657,8 +657,9 @@ class BlockReceiver implements Closeable
//
long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES;
if (dropPos > 0 && dropCacheBehindWrites) {
- NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
- outFd, 0, dropPos, NativeIO.POSIX.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+ block.getBlockName(), outFd, 0, dropPos,
+ NativeIO.POSIX.POSIX_FADV_DONTNEED);
}
lastCacheManagementOffset = offsetInBlock;
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java Wed Nov 20 00:48:05 2013
@@ -375,8 +375,9 @@ class BlockSender implements java.io.Clo
((dropCacheBehindAllReads) ||
(dropCacheBehindLargeReads && isLongRead()))) {
try {
- NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
- blockInFd, lastCacheDropOffset, offset - lastCacheDropOffset,
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+ block.getBlockName(), blockInFd, lastCacheDropOffset,
+ offset - lastCacheDropOffset,
NativeIO.POSIX.POSIX_FADV_DONTNEED);
} catch (Exception e) {
LOG.warn("Unable to drop cache on file close", e);
@@ -674,8 +675,9 @@ class BlockSender implements java.io.Clo
if (isLongRead() && blockInFd != null) {
// Advise that this file descriptor will be accessed sequentially.
- NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
- blockInFd, 0, 0, NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+ block.getBlockName(), blockInFd, 0, 0,
+ NativeIO.POSIX.POSIX_FADV_SEQUENTIAL);
}
// Trigger readahead of beginning of file if configured.
@@ -761,9 +763,9 @@ class BlockSender implements java.io.Clo
long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES;
if (offset >= nextCacheDropOffset) {
long dropLength = offset - lastCacheDropOffset;
- NativeIO.POSIX.posixFadviseIfPossible(block.getBlockName(),
- blockInFd, lastCacheDropOffset, dropLength,
- NativeIO.POSIX.POSIX_FADV_DONTNEED);
+ NativeIO.POSIX.getCacheManipulator().posixFadviseIfPossible(
+ block.getBlockName(), blockInFd, lastCacheDropOffset,
+ dropLength, NativeIO.POSIX.POSIX_FADV_DONTNEED);
lastCacheDropOffset = offset;
}
}
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java Wed Nov 20 00:48:05 2013
@@ -667,7 +667,7 @@ public class DataNode extends Configured
" size (%s) is greater than zero and native code is not available.",
DFS_DATANODE_MAX_LOCKED_MEMORY_KEY));
}
- long ulimit = NativeIO.getMemlockLimit();
+ long ulimit = NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
if (dnConf.maxLockedMemory > ulimit) {
throw new RuntimeException(String.format(
"Cannot start datanode because the configured max locked memory" +
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java Wed Nov 20 00:48:05 2013
@@ -163,7 +163,8 @@ public class FsDatasetCache {
private final UsedBytesCount usedBytesCount;
public static class PageRounder {
- private final long osPageSize = NativeIO.getOperatingSystemPageSize();
+ private final long osPageSize =
+ NativeIO.POSIX.getCacheManipulator().getOperatingSystemPageSize();
/**
* Round up a number to the operating system page size.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlock.java Wed Nov 20 00:48:05 2013
@@ -82,7 +82,7 @@ public class MappableBlock implements Cl
throw new IOException("Block InputStream has no FileChannel.");
}
mmap = blockChannel.map(MapMode.READ_ONLY, 0, length);
- NativeIO.POSIX.cacheManipulator.mlock(blockFileName, mmap, length);
+ NativeIO.POSIX.getCacheManipulator().mlock(blockFileName, mmap, length);
verifyChecksum(length, metaIn, blockChannel, blockFileName);
mappableBlock = new MappableBlock(mmap, length);
} finally {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java Wed Nov 20 00:48:05 2013
@@ -113,7 +113,8 @@ public class TestDatanodeConfig {
@Test(timeout=60000)
public void testMemlockLimit() throws Exception {
assumeTrue(NativeIO.isAvailable());
- final long memlockLimit = NativeIO.getMemlockLimit();
+ final long memlockLimit =
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit();
// Can't increase the memlock limit past the maximum.
assumeTrue(memlockLimit != Long.MAX_VALUE);
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestCachingStrategy.java Wed Nov 20 00:48:05 2013
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
+import java.io.FileDescriptor;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
@@ -36,7 +37,8 @@ import org.apache.hadoop.hdfs.protocol.E
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheTracker;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIOException;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -54,7 +56,7 @@ public class TestCachingStrategy {
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
// Track calls to posix_fadvise.
- NativeIO.POSIX.cacheTracker = tracker;
+ NativeIO.POSIX.setCacheManipulator(tracker);
// Normally, we wait for a few megabytes of data to be read or written
// before dropping the cache. This is to avoid an excessive number of
@@ -106,12 +108,13 @@ public class TestCachingStrategy {
}
}
- private static class TestRecordingCacheTracker implements CacheTracker {
+ private static class TestRecordingCacheTracker extends CacheManipulator {
private final Map<String, Stats> map = new TreeMap<String, Stats>();
@Override
- synchronized public void fadvise(String name,
- long offset, long len, int flags) {
+ public void posixFadviseIfPossible(String name,
+ FileDescriptor fd, long offset, long len, int flags)
+ throws NativeIOException {
if ((len < 0) || (len > Integer.MAX_VALUE)) {
throw new RuntimeException("invalid length of " + len +
" passed to posixFadviseIfPossible");
@@ -126,6 +129,7 @@ public class TestCachingStrategy {
map.put(name, stats);
}
stats.fadvise((int)offset, (int)len, flags);
+ super.posixFadviseIfPossible(name, fd, offset, len, flags);
}
synchronized void clear() {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Wed Nov 20 00:48:05 2013
@@ -63,6 +63,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.MetricsAsserts;
@@ -99,7 +100,6 @@ public class TestFsDatasetCache {
@Before
public void setUp() throws Exception {
assumeTrue(!Path.WINDOWS);
- assumeTrue(NativeIO.getMemlockLimit() >= CACHE_CAPACITY);
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY, true);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS,
@@ -122,18 +122,8 @@ public class TestFsDatasetCache {
spyNN = DataNodeTestUtils.spyOnBposToNN(dn, nn);
- prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-
- // Save the current CacheManipulator and replace it at the end of the test
- // Stub out mlock calls to avoid failing when not enough memory is lockable
- // by the operating system.
- NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
- @Override
- public void mlock(String identifier,
- ByteBuffer mmap, long length) throws IOException {
- LOG.info("mlocking " + identifier);
- }
- };
+ prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
+ NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
}
@After
@@ -145,7 +135,7 @@ public class TestFsDatasetCache {
cluster.shutdown();
}
// Restore the original CacheManipulator
- NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
+ NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
private static void setHeartbeatResponse(DatanodeCommand[] cmds)
@@ -222,7 +212,8 @@ public class TestFsDatasetCache {
if (tries++ > 10) {
LOG.info("verifyExpectedCacheUsage: expected " +
expected + ", got " + curDnCacheUsed + "; " +
- "memlock limit = " + NativeIO.getMemlockLimit() +
+ "memlock limit = " +
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit() +
". Waiting...");
}
return false;
@@ -297,40 +288,31 @@ public class TestFsDatasetCache {
*/
@Test(timeout=600000)
public void testCacheAndUncacheBlockWithRetries() throws Exception {
- CacheManipulator prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-
- try {
- NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
- private final Set<String> seenIdentifiers = new HashSet<String>();
-
- @Override
- public void mlock(String identifier,
- ByteBuffer mmap, long length) throws IOException {
- if (seenIdentifiers.contains(identifier)) {
- // mlock succeeds the second time.
- LOG.info("mlocking " + identifier);
- return;
- }
- seenIdentifiers.add(identifier);
- throw new IOException("injecting IOException during mlock of " +
- identifier);
+ // We don't have to save the previous cacheManipulator
+ // because it will be reinstalled by the @After function.
+ NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
+ private final Set<String> seenIdentifiers = new HashSet<String>();
+
+ @Override
+ public void mlock(String identifier,
+ ByteBuffer mmap, long length) throws IOException {
+ if (seenIdentifiers.contains(identifier)) {
+ // mlock succeeds the second time.
+ LOG.info("mlocking " + identifier);
+ return;
}
- };
- testCacheAndUncacheBlock();
- } finally {
- NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
- }
+ seenIdentifiers.add(identifier);
+ throw new IOException("injecting IOException during mlock of " +
+ identifier);
+ }
+ });
+ testCacheAndUncacheBlock();
}
@Test(timeout=600000)
public void testFilesExceedMaxLockedMemory() throws Exception {
LOG.info("beginning testFilesExceedMaxLockedMemory");
- // We don't want to deal with page rounding issues, so skip this
- // test if page size is weird
- long osPageSize = NativeIO.getOperatingSystemPageSize();
- assumeTrue(osPageSize == 4096);
-
// Create some test files that will exceed total cache capacity
final int numFiles = 5;
final long fileSize = 15000;
@@ -411,7 +393,7 @@ public class TestFsDatasetCache {
assertEquals("Unexpected cache capacity", CACHE_CAPACITY, cacheCapacity);
assertEquals("Unexpected amount of cache used", current, cacheUsed);
- NativeIO.POSIX.cacheManipulator = new NativeIO.POSIX.CacheManipulator() {
+ NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator() {
@Override
public void mlock(String identifier,
ByteBuffer mmap, long length) throws IOException {
@@ -422,7 +404,7 @@ public class TestFsDatasetCache {
Assert.fail();
}
}
- };
+ });
// Starting caching each block in succession. The usedBytes amount
// should increase, even though caching doesn't complete on any of them.
for (int i=0; i<NUM_BLOCKS; i++) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1543676&r1=1543675&r2=1543676&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Wed Nov 20 00:48:05 2013
@@ -61,12 +61,12 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.GSet;
import org.junit.After;
-import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
@@ -98,18 +98,8 @@ public class TestPathBasedCacheRequests
cluster.waitActive();
dfs = cluster.getFileSystem();
proto = cluster.getNameNodeRpc();
- prevCacheManipulator = NativeIO.POSIX.cacheManipulator;
-
- // Save the current CacheManipulator and replace it at the end of the test
- // Stub out mlock calls to avoid failing when not enough memory is lockable
- // by the operating system.
- NativeIO.POSIX.cacheManipulator = new CacheManipulator() {
- @Override
- public void mlock(String identifier,
- ByteBuffer mmap, long length) throws IOException {
- LOG.info("mlocking " + identifier);
- }
- };
+ prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
+ NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
}
@After
@@ -118,7 +108,7 @@ public class TestPathBasedCacheRequests
cluster.shutdown();
}
// Restore the original CacheManipulator
- NativeIO.POSIX.cacheManipulator = prevCacheManipulator;
+ NativeIO.POSIX.setCacheManipulator(prevCacheManipulator);
}
@Test(timeout=60000)
@@ -654,20 +644,6 @@ public class TestPathBasedCacheRequests
// Most Linux installs will allow non-root users to lock 64KB.
private static final long CACHE_CAPACITY = 64 * 1024 / NUM_DATANODES;
- /**
- * Return true if we can test DN caching.
- */
- private static boolean canTestDatanodeCaching() {
- if (!NativeIO.isAvailable()) {
- // Need NativeIO in order to cache blocks on the DN.
- return false;
- }
- if (NativeIO.getMemlockLimit() < CACHE_CAPACITY) {
- return false;
- }
- return true;
- }
-
private static HdfsConfiguration createCachingConf() {
HdfsConfiguration conf = new HdfsConfiguration();
conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
@@ -681,7 +657,6 @@ public class TestPathBasedCacheRequests
@Test(timeout=120000)
public void testWaitForCachedReplicas() throws Exception {
- Assume.assumeTrue(canTestDatanodeCaching());
HdfsConfiguration conf = createCachingConf();
FileSystemTestHelper helper = new FileSystemTestHelper();
MiniDFSCluster cluster =
@@ -739,7 +714,6 @@ public class TestPathBasedCacheRequests
@Test(timeout=120000)
public void testAddingPathBasedCacheDirectivesWhenCachingIsDisabled()
throws Exception {
- Assume.assumeTrue(canTestDatanodeCaching());
HdfsConfiguration conf = createCachingConf();
conf.setBoolean(DFS_NAMENODE_CACHING_ENABLED_KEY, false);
MiniDFSCluster cluster =
@@ -787,7 +761,6 @@ public class TestPathBasedCacheRequests
@Test(timeout=120000)
public void testWaitForCachedReplicasInDirectory() throws Exception {
- Assume.assumeTrue(canTestDatanodeCaching());
HdfsConfiguration conf = createCachingConf();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
@@ -839,7 +812,6 @@ public class TestPathBasedCacheRequests
*/
@Test(timeout=120000)
public void testReplicationFactor() throws Exception {
- Assume.assumeTrue(canTestDatanodeCaching());
HdfsConfiguration conf = createCachingConf();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();