You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by wa...@apache.org on 2013/10/24 02:08:16 UTC
svn commit: r1535217 - in
/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs: ./
src/test/java/org/apache/hadoop/hdfs/server/datanode/
src/test/java/org/apache/hadoop/hdfs/server/namenode/
Author: wang
Date: Thu Oct 24 00:08:15 2013
New Revision: 1535217
URL: http://svn.apache.org/r1535217
Log:
HDFS-5404 Resolve regressions in Windows compatibility on HDFS-4949 branch. Contributed by Chris Nauroth.
Modified:
hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt
hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt?rev=1535217&r1=1535216&r2=1535217&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt Thu Oct 24 00:08:15 2013
@@ -115,3 +115,6 @@ HDFS-4949 (Unreleased)
HDFS-5385. Caching RPCs are AtMostOnce, but do not persist client ID and
call ID to edit log. (Chris Nauroth via Colin Patrick McCabe)
+ HDFS-5404. Resolve regressions in Windows compatibility on HDFS-4949
+ branch. (Chris Nauroth via Andrew Wang)
+
Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java?rev=1535217&r1=1535216&r2=1535217&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestFsDatasetCache.java Thu Oct 24 00:08:15 2013
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.datanode;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assume.assumeTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyInt;
@@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.server.pro
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.log4j.Logger;
import org.junit.After;
import org.junit.Before;
@@ -72,6 +74,8 @@ public class TestFsDatasetCache {
@Before
public void setUp() throws Exception {
+ assumeTrue(!Path.WINDOWS);
+ assumeTrue(NativeIO.isAvailable());
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java?rev=1535217&r1=1535216&r2=1535217&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java (original)
+++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPathBasedCacheRequests.java Thu Oct 24 00:08:15 2013
@@ -447,77 +447,74 @@ public class TestPathBasedCacheRequests
@Test(timeout=60000)
public void testCacheManagerRestart() throws Exception {
+ cluster.shutdown();
+ cluster = null;
HdfsConfiguration conf = createCachingConf();
- MiniDFSCluster cluster =
- new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
- try {
- cluster.waitActive();
- DistributedFileSystem dfs = cluster.getFileSystem();
+ cluster.waitActive();
+ DistributedFileSystem dfs = cluster.getFileSystem();
- // Create and validate a pool
- final String pool = "poolparty";
- String groupName = "partygroup";
- FsPermission mode = new FsPermission((short)0777);
- int weight = 747;
- dfs.addCachePool(new CachePoolInfo(pool)
- .setGroupName(groupName)
- .setMode(mode)
- .setWeight(weight));
- RemoteIterator<CachePoolInfo> pit = dfs.listCachePools();
- assertTrue("No cache pools found", pit.hasNext());
- CachePoolInfo info = pit.next();
- assertEquals(pool, info.getPoolName());
- assertEquals(groupName, info.getGroupName());
- assertEquals(mode, info.getMode());
- assertEquals(weight, (int)info.getWeight());
- assertFalse("Unexpected # of cache pools found", pit.hasNext());
+ // Create and validate a pool
+ final String pool = "poolparty";
+ String groupName = "partygroup";
+ FsPermission mode = new FsPermission((short)0777);
+ int weight = 747;
+ dfs.addCachePool(new CachePoolInfo(pool)
+ .setGroupName(groupName)
+ .setMode(mode)
+ .setWeight(weight));
+ RemoteIterator<CachePoolInfo> pit = dfs.listCachePools();
+ assertTrue("No cache pools found", pit.hasNext());
+ CachePoolInfo info = pit.next();
+ assertEquals(pool, info.getPoolName());
+ assertEquals(groupName, info.getGroupName());
+ assertEquals(mode, info.getMode());
+ assertEquals(weight, (int)info.getWeight());
+ assertFalse("Unexpected # of cache pools found", pit.hasNext());
- // Create some cache entries
- int numEntries = 10;
- String entryPrefix = "/party-";
- for (int i=0; i<numEntries; i++) {
- dfs.addPathBasedCacheDirective(
- new PathBasedCacheDirective.Builder().
- setPath(new Path(entryPrefix + i)).setPool(pool).build());
- }
- RemoteIterator<PathBasedCacheDescriptor> dit
- = dfs.listPathBasedCacheDescriptors(null, null);
- for (int i=0; i<numEntries; i++) {
- assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
- PathBasedCacheDescriptor cd = dit.next();
- assertEquals(i+1, cd.getEntryId());
- assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
- assertEquals(pool, cd.getPool());
- }
- assertFalse("Unexpected # of cache descriptors found", dit.hasNext());
+ // Create some cache entries
+ int numEntries = 10;
+ String entryPrefix = "/party-";
+ for (int i=0; i<numEntries; i++) {
+ dfs.addPathBasedCacheDirective(
+ new PathBasedCacheDirective.Builder().
+ setPath(new Path(entryPrefix + i)).setPool(pool).build());
+ }
+ RemoteIterator<PathBasedCacheDescriptor> dit
+ = dfs.listPathBasedCacheDescriptors(null, null);
+ for (int i=0; i<numEntries; i++) {
+ assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+ PathBasedCacheDescriptor cd = dit.next();
+ assertEquals(i+1, cd.getEntryId());
+ assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+ assertEquals(pool, cd.getPool());
+ }
+ assertFalse("Unexpected # of cache descriptors found", dit.hasNext());
- // Restart namenode
- cluster.restartNameNode();
+ // Restart namenode
+ cluster.restartNameNode();
- // Check that state came back up
- pit = dfs.listCachePools();
- assertTrue("No cache pools found", pit.hasNext());
- info = pit.next();
- assertEquals(pool, info.getPoolName());
- assertEquals(pool, info.getPoolName());
- assertEquals(groupName, info.getGroupName());
- assertEquals(mode, info.getMode());
- assertEquals(weight, (int)info.getWeight());
- assertFalse("Unexpected # of cache pools found", pit.hasNext());
+ // Check that state came back up
+ pit = dfs.listCachePools();
+ assertTrue("No cache pools found", pit.hasNext());
+ info = pit.next();
+ assertEquals(pool, info.getPoolName());
+ assertEquals(pool, info.getPoolName());
+ assertEquals(groupName, info.getGroupName());
+ assertEquals(mode, info.getMode());
+ assertEquals(weight, (int)info.getWeight());
+ assertFalse("Unexpected # of cache pools found", pit.hasNext());
- dit = dfs.listPathBasedCacheDescriptors(null, null);
- for (int i=0; i<numEntries; i++) {
- assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
- PathBasedCacheDescriptor cd = dit.next();
- assertEquals(i+1, cd.getEntryId());
- assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
- assertEquals(pool, cd.getPool());
- }
- assertFalse("Unexpected # of cache descriptors found", dit.hasNext());
- } finally {
- cluster.shutdown();
+ dit = dfs.listPathBasedCacheDescriptors(null, null);
+ for (int i=0; i<numEntries; i++) {
+ assertTrue("Unexpected # of cache entries: " + i, dit.hasNext());
+ PathBasedCacheDescriptor cd = dit.next();
+ assertEquals(i+1, cd.getEntryId());
+ assertEquals(entryPrefix + i, cd.getPath().toUri().getPath());
+ assertEquals(pool, cd.getPool());
}
+ assertFalse("Unexpected # of cache descriptors found", dit.hasNext());
}
private static void waitForCachedBlocks(NameNode nn,