You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by cm...@apache.org on 2013/11/27 18:55:53 UTC
svn commit: r1546143 [2/2] - in
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs: ./ dev-support/
src/main/java/org/apache/hadoop/hdfs/
src/main/java/org/apache/hadoop/hdfs/client/
src/main/java/org/apache/hadoop/hdfs/protocol/ src/main/java/org/ap...
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java?rev=1546143&r1=1546142&r2=1546143&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java Wed Nov 27 17:55:52 2013
@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.HdfsConfig
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
@@ -263,8 +264,8 @@ public class TestCacheDirectives {
setOwnerName(ownerName).setGroupName(groupName).
setMode(mode).setWeight(weight));
- RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
- CachePoolInfo info = iter.next();
+ RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
+ CachePoolInfo info = iter.next().getInfo();
assertEquals(poolName, info.getPoolName());
assertEquals(ownerName, info.getOwnerName());
assertEquals(groupName, info.getGroupName());
@@ -278,7 +279,7 @@ public class TestCacheDirectives {
setMode(mode).setWeight(weight));
iter = dfs.listCachePools();
- info = iter.next();
+ info = iter.next().getInfo();
assertEquals(poolName, info.getPoolName());
assertEquals(ownerName, info.getOwnerName());
assertEquals(groupName, info.getGroupName());
@@ -507,9 +508,9 @@ public class TestCacheDirectives {
.setGroupName(groupName)
.setMode(mode)
.setWeight(weight));
- RemoteIterator<CachePoolInfo> pit = dfs.listCachePools();
+ RemoteIterator<CachePoolEntry> pit = dfs.listCachePools();
assertTrue("No cache pools found", pit.hasNext());
- CachePoolInfo info = pit.next();
+ CachePoolInfo info = pit.next().getInfo();
assertEquals(pool, info.getPoolName());
assertEquals(groupName, info.getGroupName());
assertEquals(mode, info.getMode());
@@ -542,7 +543,7 @@ public class TestCacheDirectives {
// Check that state came back up
pit = dfs.listCachePools();
assertTrue("No cache pools found", pit.hasNext());
- info = pit.next();
+ info = pit.next().getInfo();
assertEquals(pool, info.getPoolName());
assertEquals(pool, info.getPoolName());
assertEquals(groupName, info.getGroupName());
@@ -713,7 +714,16 @@ public class TestCacheDirectives {
try {
cluster.waitActive();
DistributedFileSystem dfs = cluster.getFileSystem();
- NameNode namenode = cluster.getNameNode();
+ final NameNode namenode = cluster.getNameNode();
+ GenericTestUtils.waitFor(new Supplier<Boolean>() {
+ @Override
+ public Boolean get() {
+ return ((namenode.getNamesystem().getCacheCapacity() ==
+ (NUM_DATANODES * CACHE_CAPACITY)) &&
+ (namenode.getNamesystem().getCacheUsed() == 0));
+ }
+ }, 500, 60000);
+
NamenodeProtocols nnRpc = namenode.getRpcServer();
Path rootDir = helper.getDefaultWorkingDirectory(dfs);
// Create the pool
@@ -967,8 +977,8 @@ public class TestCacheDirectives {
dfs.addCachePool(new CachePoolInfo(poolName)
.setMode(new FsPermission((short)0700)));
// Should only see partial info
- RemoteIterator<CachePoolInfo> it = myDfs.listCachePools();
- CachePoolInfo info = it.next();
+ RemoteIterator<CachePoolEntry> it = myDfs.listCachePools();
+ CachePoolInfo info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertNull("Unexpected owner name", info.getOwnerName());
@@ -981,7 +991,7 @@ public class TestCacheDirectives {
.setWeight(99));
// Should see full info
it = myDfs.listCachePools();
- info = it.next();
+ info = it.next().getInfo();
assertFalse(it.hasNext());
assertEquals("Expected pool name", poolName, info.getPoolName());
assertEquals("Mismatched owner name", myUser.getShortUserName(),
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1546143&r1=1546142&r2=1546143&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Wed Nov 27 17:55:52 2013
@@ -31,7 +31,10 @@ import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.io.nativeio.NativeIO;
+import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
@@ -46,10 +49,16 @@ public class TestNameNodeMXBean {
*/
private static final double DELTA = 0.000001;
+ static {
+ NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
+ }
+
@SuppressWarnings({ "unchecked" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+ NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
MiniDFSCluster cluster = null;
try {
@@ -171,6 +180,10 @@ public class TestNameNodeMXBean {
}
assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size());
+ assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
+ assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
+ cluster.getDataNodes().size(),
+ mbs.getAttribute(mxbeanName, "CacheCapacity"));
} finally {
if (cluster != null) {
for (URI dir : cluster.getNameDirs(0)) {
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1546143&r1=1546142&r2=1546143&view=diff
==============================================================================
--- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original)
+++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Nov 27 17:55:52 2013
@@ -54,6 +54,7 @@ import org.apache.hadoop.hdfs.MiniDFSNNT
import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -904,7 +905,7 @@ public class TestRetryCacheWithHA {
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
- RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+ RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
if (iter.hasNext()) {
return true;
}
@@ -941,8 +942,8 @@ public class TestRetryCacheWithHA {
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
- RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
- if (iter.hasNext() && iter.next().getWeight() == 99) {
+ RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
+ if (iter.hasNext() && iter.next().getInfo().getWeight() == 99) {
return true;
}
Thread.sleep(1000);
@@ -978,7 +979,7 @@ public class TestRetryCacheWithHA {
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
for (int i = 0; i < CHECKTIMES; i++) {
- RemoteIterator<CachePoolInfo> iter = dfs.listCachePools();
+ RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
if (!iter.hasNext()) {
return true;
}