You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2012/09/27 00:55:20 UTC
svn commit: r1390763 [4/4] - in
/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project:
hadoop-hdfs-httpfs/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/
hadoop-hdfs-httpfs/src/site/apt/ hadoop-hdfs-httpfs/src/test/java/org/apac...
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java Wed Sep 26 22:55:00 2012
@@ -22,7 +22,9 @@ import static org.junit.Assert.assertEqu
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
-
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.inOrder;
+import static org.mockito.Mockito.mock;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
@@ -54,6 +56,7 @@ import org.apache.hadoop.util.DataChecks
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
+import org.mockito.InOrder;
public class TestDistributedFileSystem {
private static final Random RAN = new Random();
@@ -117,17 +120,38 @@ public class TestDistributedFileSystem {
DFSTestUtil.readFile(fileSys, p);
DFSClient client = ((DistributedFileSystem)fileSys).dfs;
- SocketCache cache = client.socketCache;
- assertEquals(1, cache.size());
fileSys.close();
- assertEquals(0, cache.size());
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
+
+ @Test
+ public void testDFSCloseOrdering() throws Exception {
+ DistributedFileSystem fs = new MyDistributedFileSystem();
+ Path path = new Path("/a");
+ fs.deleteOnExit(path);
+ fs.close();
+
+ InOrder inOrder = inOrder(fs.dfs);
+ inOrder.verify(fs.dfs).closeOutputStreams(eq(false));
+ inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true));
+ inOrder.verify(fs.dfs).close();
+ }
+ private static class MyDistributedFileSystem extends DistributedFileSystem {
+ MyDistributedFileSystem() {
+ statistics = new FileSystem.Statistics("myhdfs"); // can't mock finals
+ dfs = mock(DFSClient.class);
+ }
+ @Override
+ public boolean exists(Path p) {
+ return true; // trick out deleteOnExit
+ }
+ }
+
@Test
public void testDFSSeekExceptions() throws IOException {
Configuration conf = getTestConfiguration();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileStatus.java Wed Sep 26 22:55:00 2012
@@ -25,6 +25,7 @@ import static org.junit.Assert.fail;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -100,7 +101,7 @@ public class TestFileStatus {
}
private void checkFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
DFSTestUtil.waitReplication(fileSys, name, (short) repl);
}
@@ -129,7 +130,7 @@ public class TestFileStatus {
/** Test the FileStatus obtained calling getFileStatus on a file */
@Test
- public void testGetFileStatusOnFile() throws IOException {
+ public void testGetFileStatusOnFile() throws Exception {
checkFile(fs, file1, 1);
// test getFileStatus on a file
FileStatus status = fs.getFileStatus(file1);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java Wed Sep 26 22:55:00 2012
@@ -17,8 +17,7 @@
*/
package org.apache.hadoop.hdfs;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -28,48 +27,178 @@ import java.util.Map;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.util.Time;
import org.junit.Test;
+
/**
- * This class tests if block replacement request to data nodes work correctly.
+ * This class tests if getblocks request works correctly.
*/
public class TestGetBlocks {
+ private static final int blockSize = 8192;
+ private static final String racks[] = new String[] { "/d1/r1", "/d1/r1",
+ "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3" };
+ private static final int numDatanodes = racks.length;
+
+ /**
+ * Stop the heartbeat of a datanode in the MiniDFSCluster
+ *
+ * @param cluster
+ * The MiniDFSCluster
+ * @param hostName
+ * The hostName of the datanode to be stopped
+ * @return The DataNode whose heartbeat has been stopped
+ */
+ private DataNode stopDataNodeHeartbeat(MiniDFSCluster cluster, String hostName) {
+ for (DataNode dn : cluster.getDataNodes()) {
+ if (dn.getDatanodeId().getHostName().equals(hostName)) {
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
+ return dn;
+ }
+ }
+ return null;
+ }
+
+ /**
+ * Test if the datanodes returned by
+ * {@link ClientProtocol#getBlockLocations(String, long, long)} is correct
+ * when stale nodes checking is enabled. Also test during the scenario when 1)
+ * stale nodes checking is enabled, 2) a writing is going on, 3) a datanode
+ * becomes stale happen simultaneously
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testReadSelectNonStaleDatanode() throws Exception {
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_CHECK_STALE_DATANODE_KEY, true);
+ long staleInterval = 30 * 1000 * 60;
+ conf.setLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
+ staleInterval);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(numDatanodes).racks(racks).build();
+
+ cluster.waitActive();
+ InetSocketAddress addr = new InetSocketAddress("localhost",
+ cluster.getNameNodePort());
+ DFSClient client = new DFSClient(addr, conf);
+ List<DatanodeDescriptor> nodeInfoList = cluster.getNameNode()
+ .getNamesystem().getBlockManager().getDatanodeManager()
+ .getDatanodeListForReport(DatanodeReportType.LIVE);
+ assertEquals("Unexpected number of datanodes", numDatanodes,
+ nodeInfoList.size());
+ FileSystem fileSys = cluster.getFileSystem();
+ FSDataOutputStream stm = null;
+ try {
+ // do the writing but do not close the FSDataOutputStream
+ // in order to mimic the ongoing writing
+ final Path fileName = new Path("/file1");
+ stm = fileSys.create(
+ fileName,
+ true,
+ fileSys.getConf().getInt(
+ CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
+ (short) 3, blockSize);
+ stm.write(new byte[(blockSize * 3) / 2]);
+ // We do not close the stream so that
+ // the writing seems to be still ongoing
+ stm.hflush();
+
+ LocatedBlocks blocks = client.getNamenode().getBlockLocations(
+ fileName.toString(), 0, blockSize);
+ DatanodeInfo[] nodes = blocks.get(0).getLocations();
+ assertEquals(nodes.length, 3);
+ DataNode staleNode = null;
+ DatanodeDescriptor staleNodeInfo = null;
+ // stop the heartbeat of the first node
+ staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+ assertNotNull(staleNode);
+ // set the first node as stale
+ staleNodeInfo = cluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager()
+ .getDatanode(staleNode.getDatanodeId());
+ staleNodeInfo.setLastUpdate(Time.now() - staleInterval - 1);
+
+ LocatedBlocks blocksAfterStale = client.getNamenode().getBlockLocations(
+ fileName.toString(), 0, blockSize);
+ DatanodeInfo[] nodesAfterStale = blocksAfterStale.get(0).getLocations();
+ assertEquals(nodesAfterStale.length, 3);
+ assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+
+ // restart the staleNode's heartbeat
+ DataNodeTestUtils.setHeartbeatsDisabledForTests(staleNode, false);
+ // reset the first node as non-stale, so as to avoid two stale nodes
+ staleNodeInfo.setLastUpdate(Time.now());
+
+ LocatedBlock lastBlock = client.getLocatedBlocks(fileName.toString(), 0,
+ Long.MAX_VALUE).getLastLocatedBlock();
+ nodes = lastBlock.getLocations();
+ assertEquals(nodes.length, 3);
+ // stop the heartbeat of the first node for the last block
+ staleNode = this.stopDataNodeHeartbeat(cluster, nodes[0].getHostName());
+ assertNotNull(staleNode);
+ // set the node as stale
+ cluster.getNameNode().getNamesystem().getBlockManager()
+ .getDatanodeManager()
+ .getDatanode(staleNode.getDatanodeId())
+ .setLastUpdate(Time.now() - staleInterval - 1);
+
+ LocatedBlock lastBlockAfterStale = client.getLocatedBlocks(
+ fileName.toString(), 0, Long.MAX_VALUE).getLastLocatedBlock();
+ nodesAfterStale = lastBlockAfterStale.getLocations();
+ assertEquals(nodesAfterStale.length, 3);
+ assertEquals(nodesAfterStale[2].getHostName(), nodes[0].getHostName());
+ } finally {
+ if (stm != null) {
+ stm.close();
+ }
+ cluster.shutdown();
+ }
+ }
+
/** test getBlocks */
@Test
public void testGetBlocks() throws Exception {
final Configuration CONF = new HdfsConfiguration();
- final short REPLICATION_FACTOR = (short)2;
+ final short REPLICATION_FACTOR = (short) 2;
final int DEFAULT_BLOCK_SIZE = 1024;
final Random r = new Random();
-
+
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
- MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
- .numDataNodes(REPLICATION_FACTOR)
- .build();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
+ REPLICATION_FACTOR).build();
try {
cluster.waitActive();
-
+
// create a file with two blocks
FileSystem fs = cluster.getFileSystem();
FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
REPLICATION_FACTOR);
- byte [] data = new byte[1024];
- long fileLen = 2*DEFAULT_BLOCK_SIZE;
+ byte[] data = new byte[1024];
+ long fileLen = 2 * DEFAULT_BLOCK_SIZE;
long bytesToWrite = fileLen;
- while( bytesToWrite > 0 ) {
+ while (bytesToWrite > 0) {
r.nextBytes(data);
- int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
+ int bytesToWriteNext = (1024 < bytesToWrite) ? 1024
+ : (int) bytesToWrite;
out.write(data, 0, bytesToWriteNext);
bytesToWrite -= bytesToWriteNext;
}
@@ -77,27 +206,28 @@ public class TestGetBlocks {
// get blocks & data nodes
List<LocatedBlock> locatedBlocks;
- DatanodeInfo[] dataNodes=null;
+ DatanodeInfo[] dataNodes = null;
boolean notWritten;
do {
- final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF), CONF);
- locatedBlocks = dfsclient.getNamenode().
- getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
+ final DFSClient dfsclient = new DFSClient(NameNode.getAddress(CONF),
+ CONF);
+ locatedBlocks = dfsclient.getNamenode()
+ .getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
assertEquals(2, locatedBlocks.size());
notWritten = false;
- for(int i=0; i<2; i++) {
+ for (int i = 0; i < 2; i++) {
dataNodes = locatedBlocks.get(i).getLocations();
- if(dataNodes.length != REPLICATION_FACTOR) {
+ if (dataNodes.length != REPLICATION_FACTOR) {
notWritten = true;
try {
Thread.sleep(10);
- } catch(InterruptedException e) {
+ } catch (InterruptedException e) {
}
break;
}
}
- } while(notWritten);
-
+ } while (notWritten);
+
// get RPC client to namenode
InetSocketAddress addr = new InetSocketAddress("localhost",
cluster.getNameNodePort());
@@ -122,7 +252,7 @@ public class TestGetBlocks {
assertEquals(locs[0].getStorageIDs().length, 2);
// get blocks of size 0 from dataNodes[0]
- getBlocksWithException(namenode, dataNodes[0], 0);
+ getBlocksWithException(namenode, dataNodes[0], 0);
// get blocks of size -1 from dataNodes[0]
getBlocksWithException(namenode, dataNodes[0], -1);
@@ -136,46 +266,39 @@ public class TestGetBlocks {
}
private void getBlocksWithException(NamenodeProtocol namenode,
- DatanodeInfo datanode,
- long size) throws IOException {
+ DatanodeInfo datanode, long size) throws IOException {
boolean getException = false;
try {
- namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
- } catch(RemoteException e) {
+ namenode.getBlocks(DFSTestUtil.getLocalDatanodeInfo(), 2);
+ } catch (RemoteException e) {
getException = true;
assertTrue(e.getClassName().contains("HadoopIllegalArgumentException"));
}
assertTrue(getException);
}
-
+
@Test
public void testBlockKey() {
Map<Block, Long> map = new HashMap<Block, Long>();
final Random RAN = new Random();
final long seed = RAN.nextLong();
- System.out.println("seed=" + seed);
+ System.out.println("seed=" + seed);
RAN.setSeed(seed);
- long[] blkids = new long[10];
- for(int i = 0; i < blkids.length; i++) {
+ long[] blkids = new long[10];
+ for (int i = 0; i < blkids.length; i++) {
blkids[i] = 1000L + RAN.nextInt(100000);
map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
}
System.out.println("map=" + map.toString().replace(",", "\n "));
-
- for(int i = 0; i < blkids.length; i++) {
- Block b = new Block(blkids[i], 0, GenerationStamp.GRANDFATHER_GENERATION_STAMP);
+
+ for (int i = 0; i < blkids.length; i++) {
+ Block b = new Block(blkids[i], 0,
+ GenerationStamp.GRANDFATHER_GENERATION_STAMP);
Long v = map.get(b);
System.out.println(b + " => " + v);
assertEquals(blkids[i], v.longValue());
}
}
- /**
- * @param args
- */
- public static void main(String[] args) throws Exception {
- (new TestGetBlocks()).testGetBlocks();
- }
-
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSTrash.java Wed Sep 26 22:55:00 2012
@@ -23,11 +23,6 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TestTrash;
-import org.apache.hadoop.fs.Trash;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
import org.junit.AfterClass;
import org.junit.BeforeClass;
@@ -62,53 +57,4 @@ public class TestHDFSTrash {
conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, fs.getUri().toString());
TestTrash.trashNonDefaultFS(conf);
}
-
- /** Clients should always use trash if enabled server side */
- @Test
- public void testTrashEnabledServerSide() throws IOException {
- Configuration serverConf = new HdfsConfiguration();
- Configuration clientConf = new Configuration();
-
- // Enable trash on the server and client
- serverConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
- clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
-
- MiniDFSCluster cluster2 = null;
- try {
- cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
- FileSystem fs = cluster2.getFileSystem();
- assertTrue(new Trash(fs, clientConf).isEnabled());
-
- // Disabling trash on the client is ignored
- clientConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
- assertTrue(new Trash(fs, clientConf).isEnabled());
- } finally {
- if (cluster2 != null) cluster2.shutdown();
- }
- }
-
- /** Clients should always use trash if enabled client side */
- @Test
- public void testTrashEnabledClientSide() throws IOException {
- Configuration serverConf = new HdfsConfiguration();
- Configuration clientConf = new Configuration();
-
- // Disable server side
- serverConf.setLong(FS_TRASH_INTERVAL_KEY, 0);
-
- MiniDFSCluster cluster2 = null;
- try {
- cluster2 = new MiniDFSCluster.Builder(serverConf).numDataNodes(1).build();
-
- // Client side is disabled by default
- FileSystem fs = cluster2.getFileSystem();
- assertFalse(new Trash(fs, clientConf).isEnabled());
-
- // Enabling on the client works even though its disabled on the server
- clientConf.setLong(FS_TRASH_INTERVAL_KEY, 1);
- assertTrue(new Trash(fs, clientConf).isEnabled());
- } finally {
- if (cluster2 != null) cluster2.shutdown();
- }
- }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpDelegationToken.java Wed Sep 26 22:55:00 2012
@@ -19,13 +19,11 @@
package org.apache.hadoop.hdfs;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertSame;
-
+import static org.junit.Assert.*;
import java.io.IOException;
import java.lang.reflect.Field;
+import java.net.ServerSocket;
+import java.net.Socket;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
@@ -55,7 +53,7 @@ public class TestHftpDelegationToken {
new Text("127.0.0.1:8020"));
user.addToken(token);
Token<?> token2 = new Token<TokenIdentifier>
- (null, null, new Text("other token"), new Text("127.0.0.1:8020"));
+ (null, null, new Text("other token"), new Text("127.0.0.1:8021"));
user.addToken(token2);
assertEquals("wrong tokens in user", 2, user.getTokens().size());
FileSystem fs =
@@ -138,6 +136,53 @@ public class TestHftpDelegationToken {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_KEY, 5);
}
+
+ @Test
+ public void testInsecureRemoteCluster() throws Exception {
+ final ServerSocket socket = new ServerSocket(0); // just reserve a port
+ socket.close();
+ Configuration conf = new Configuration();
+ URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort());
+ assertNull(FileSystem.newInstance(fsUri, conf).getDelegationToken(null));
+ }
+
+ @Test
+ public void testSecureClusterError() throws Exception {
+ final ServerSocket socket = new ServerSocket(0);
+ Thread t = new Thread() {
+ @Override
+ public void run() {
+ while (true) { // fetching does a few retries
+ try {
+ Socket s = socket.accept();
+ s.getOutputStream().write(1234);
+ s.shutdownOutput();
+ } catch (Exception e) {
+ break;
+ }
+ }
+ }
+ };
+ t.start();
+
+ try {
+ Configuration conf = new Configuration();
+ URI fsUri = URI.create("hsftp://localhost:"+socket.getLocalPort());
+ Exception ex = null;
+ try {
+ FileSystem.newInstance(fsUri, conf).getDelegationToken(null);
+ } catch (Exception e) {
+ ex = e;
+ }
+ assertNotNull(ex);
+ assertNotNull(ex.getCause());
+ assertEquals("Unexpected end of file from server",
+ ex.getCause().getMessage());
+ } finally {
+ t.interrupt();
+ }
+ }
+
private void checkTokenSelection(HftpFileSystem fs,
int port,
Configuration conf) throws IOException {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Wed Sep 26 22:55:00 2012
@@ -27,6 +27,7 @@ import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.util.Iterator;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -420,8 +421,8 @@ public class TestReplication {
}
}
- private void changeBlockLen(MiniDFSCluster cluster,
- int lenDelta) throws IOException, InterruptedException {
+ private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+ throws IOException, InterruptedException, TimeoutException {
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short)1;
final FileSystem fs = cluster.getFileSystem();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Wed Sep 26 22:55:00 2012
@@ -224,7 +224,8 @@ public class TestShortCircuitLocalRead {
@Test
public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
- conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
+ conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+ "alloweduser1,alloweduser2");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
cluster.waitActive();
@@ -232,8 +233,10 @@ public class TestShortCircuitLocalRead {
FileSystem fs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
- UserGroupInformation aUgi = UserGroupInformation
- .createRemoteUser("alloweduser");
+ UserGroupInformation aUgi1 =
+ UserGroupInformation.createRemoteUser("alloweduser1");
+ UserGroupInformation aUgi2 =
+ UserGroupInformation.createRemoteUser("alloweduser2");
LocatedBlocks lb = cluster.getNameNode().getRpcServer()
.getBlockLocations("/tmp/x", 0, 16);
// Create a new block object, because the block inside LocatedBlock at
@@ -241,7 +244,7 @@ public class TestShortCircuitLocalRead {
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
- ClientDatanodeProtocol proxy = aUgi
+ ClientDatanodeProtocol proxy = aUgi1
.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
@Override
public ClientDatanodeProtocol run() throws Exception {
@@ -250,13 +253,29 @@ public class TestShortCircuitLocalRead {
}
});
- //This should succeed
+ // This should succeed
BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
Assert.assertEquals(
DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
blpi.getBlockPath());
- // Now try with a not allowed user.
+ // Try with the other allowed user
+ proxy = aUgi2
+ .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
+ @Override
+ public ClientDatanodeProtocol run() throws Exception {
+ return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
+ 60000, false);
+ }
+ });
+
+ // This should succeed as well
+ blpi = proxy.getBlockLocalPathInfo(blk, token);
+ Assert.assertEquals(
+ DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
+ blpi.getBlockPath());
+
+ // Now try with a disallowed user
UserGroupInformation bUgi = UserGroupInformation
.createRemoteUser("notalloweduser");
proxy = bUgi
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Wed Sep 26 22:55:00 2012
@@ -80,9 +80,11 @@ public class TestClientProtocolWithDeleg
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
3600000, mockNameSys);
sm.startThreads();
- final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS,
- 0, 5, true, conf, sm);
-
+ final Server server = new RPC.Builder(conf)
+ .setProtocol(ClientProtocol.class).setInstance(mockNN)
+ .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+ .setSecretManager(sm).build();
+
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Wed Sep 26 22:55:00 2012
@@ -231,8 +231,9 @@ public class TestBlockToken {
ProtobufRpcEngine.class);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(mockDN);
- return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
- true, conf, sm);
+ return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
+ .setInstance(service).setBindAddress(ADDRESS).setPort(0)
+ .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
@Test
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Wed Sep 26 22:55:00 2012
@@ -88,7 +88,7 @@ public class TestBalancer {
/* create a file with a length of <code>fileLen</code> */
static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
short replicationFactor, int nnIndex)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
FileSystem fs = cluster.getFileSystem(nnIndex);
DFSTestUtil.createFile(fs, filePath, fileLen,
replicationFactor, r.nextLong());
@@ -100,7 +100,7 @@ public class TestBalancer {
* whose used space to be <code>size</code>
*/
private ExtendedBlock[] generateBlocks(Configuration conf, long size,
- short numNodes) throws IOException {
+ short numNodes) throws IOException, InterruptedException, TimeoutException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
try {
cluster.waitActive();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Wed Sep 26 22:55:00 2012
@@ -23,6 +23,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNam
/* create a file with a length of <code>fileLen</code> */
private static void createFile(Suite s, int index, long len
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = s.cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNam
* whose used space to be <code>size</code>
*/
private static ExtendedBlock[][] generateBlocks(Suite s, long size
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
for(int n = 0; n < s.clients.length; n++) {
final long fileLen = size/s.replication;
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java Wed Sep 26 22:55:00 2012
@@ -53,7 +53,7 @@ public class TestOverReplicatedBlocks {
* corrupt ones.
*/
@Test
- public void testProcesOverReplicateBlock() throws IOException {
+ public void testProcesOverReplicateBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(
@@ -141,7 +141,7 @@ public class TestOverReplicatedBlocks {
* send heartbeats.
*/
@Test
- public void testChooseReplicaToDelete() throws IOException {
+ public void testChooseReplicaToDelete() throws Exception {
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java Wed Sep 26 22:55:00 2012
@@ -111,30 +111,30 @@ public class TestReplicationPolicy {
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[0]);
targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ 2, dataNodes[0], new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
- targets = replicator.chooseTarget(filename,
- 4, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[0]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
@@ -249,30 +249,30 @@ public class TestReplicationPolicy {
(HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[1]);
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[1]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[1]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 4, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[1]);
for(int i=1; i<4; i++) {
@@ -305,23 +305,23 @@ public class TestReplicationPolicy {
}
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
@@ -350,21 +350,21 @@ public class TestReplicationPolicy {
DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, writerDesc, BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, writerDesc,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
-
- targets = replicator.chooseTarget(filename,
- 1, writerDesc, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 1, writerDesc,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
-
- targets = replicator.chooseTarget(filename,
- 2, writerDesc, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 2, writerDesc,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
-
- targets = replicator.chooseTarget(filename,
- 3, writerDesc, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 3, writerDesc,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java Wed Sep 26 22:55:00 2012
@@ -114,31 +114,31 @@ public class TestReplicationPolicyWithNo
HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 4, 0); // overloaded
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[0]);
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[0]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameNodeGroup(targets[1], targets[2]));
- targets = replicator.chooseTarget(filename,
- 4, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[0]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
@@ -220,30 +220,30 @@ public class TestReplicationPolicyWithNo
(HdfsConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0L, 0, 0); // no space
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertEquals(targets[0], dataNodes[1]);
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertEquals(targets[0], dataNodes[1]);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertEquals(targets[0], dataNodes[1]);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 4, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 4, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 4);
assertEquals(targets[0], dataNodes[1]);
assertTrue(cluster.isNodeGroupAware());
@@ -275,23 +275,23 @@ public class TestReplicationPolicyWithNo
}
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
- targets = replicator.chooseTarget(filename,
- 1, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 1, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
- targets = replicator.chooseTarget(filename,
- 2, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 2, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
- targets = replicator.chooseTarget(filename,
- 3, dataNodes[0], BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 3, dataNodes[0],
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
for(int i=0; i<3; i++) {
assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
@@ -313,21 +313,21 @@ public class TestReplicationPolicyWithNo
public void testChooseTarget5() throws Exception {
setupDataNodeCapacity();
DatanodeDescriptor[] targets;
- targets = replicator.chooseTarget(filename,
- 0, NODE, BLOCK_SIZE);
+ targets = replicator.chooseTarget(filename, 0, NODE,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 0);
-
- targets = replicator.chooseTarget(filename,
- 1, NODE, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 1, NODE,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 1);
-
- targets = replicator.chooseTarget(filename,
- 2, NODE, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 2, NODE,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
-
- targets = replicator.chooseTarget(filename,
- 3, NODE, BLOCK_SIZE);
+
+ targets = replicator.chooseTarget(filename, 3, NODE,
+ new ArrayList<DatanodeDescriptor>(), BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Wed Sep 26 22:55:00 2012
@@ -114,6 +114,12 @@ public class DataNodeTestUtils {
dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
}
+ public static void runBlockScannerForBlock(DataNode dn, ExtendedBlock b) {
+ DataBlockScanner scanner = dn.getBlockScanner();
+ BlockPoolSliceScanner bpScanner = scanner.getBPScanner(b.getBlockPoolId());
+ bpScanner.verifyBlock(b);
+ }
+
public static void shutdownBlockScanner(DataNode dn) {
if (dn.blockScanner != null) {
dn.blockScanner.shutdown();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Wed Sep 26 22:55:00 2012
@@ -89,7 +89,7 @@ public class TestBlockReplacement {
}
@Test
- public void testBlockReplacement() throws IOException, TimeoutException {
+ public void testBlockReplacement() throws Exception {
final Configuration CONF = new HdfsConfiguration();
final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
final String[] NEW_RACKS = {"/RACK2"};
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Wed Sep 26 22:55:00 2012
@@ -27,6 +27,9 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeoutException;
+
+import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -65,7 +68,7 @@ import org.mockito.invocation.Invocation
/**
* This test simulates a variety of situations when blocks are being
- * intentionally orrupted, unexpectedly modified, and so on before a block
+ * intentionally corrupted, unexpectedly modified, and so on before a block
* report is happening
*/
public class TestBlockReport {
@@ -316,7 +319,7 @@ public class TestBlockReport {
* @throws IOException in case of an error
*/
@Test
- public void blockReport_06() throws IOException {
+ public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -353,7 +356,7 @@ public class TestBlockReport {
@Test
// Currently this test is failing as expected 'cause the correct behavior is
// not yet implemented (9/15/09)
- public void blockReport_07() throws IOException {
+ public void blockReport_07() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
@@ -670,21 +673,24 @@ public class TestBlockReport {
}
private void startDNandWait(Path filePath, boolean waitReplicas)
- throws IOException {
- if(LOG.isDebugEnabled()) {
+ throws IOException, InterruptedException, TimeoutException {
+ if (LOG.isDebugEnabled()) {
LOG.debug("Before next DN start: " + cluster.getDataNodes().size());
}
cluster.startDataNodes(conf, 1, true, null, null);
+ cluster.waitClusterUp();
ArrayList<DataNode> datanodes = cluster.getDataNodes();
assertEquals(datanodes.size(), 2);
- if(LOG.isDebugEnabled()) {
+ if (LOG.isDebugEnabled()) {
int lastDn = datanodes.size() - 1;
LOG.debug("New datanode "
+ cluster.getDataNodes().get(lastDn).getDisplayName()
+ " has been started");
}
- if (waitReplicas) DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ if (waitReplicas) {
+ DFSTestUtil.waitReplication(fs, filePath, REPL_FACTOR);
+ }
}
private ArrayList<Block> prepareForRide(final Path filePath,
@@ -836,8 +842,9 @@ public class TestBlockReport {
public void run() {
try {
startDNandWait(filePath, true);
- } catch (IOException e) {
- LOG.warn("Shouldn't happen", e);
+ } catch (Exception e) {
+ e.printStackTrace();
+ Assert.fail("Failed to start BlockChecker: " + e);
}
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java Wed Sep 26 22:55:00 2012
@@ -105,7 +105,7 @@ public class TestDataNodeVolumeFailure {
* failure if the configuration parameter allows this.
*/
@Test
- public void testVolumeFailure() throws IOException {
+ public void testVolumeFailure() throws Exception {
FileSystem fs = cluster.getFileSystem();
dataDir = new File(cluster.getDataDirectory());
System.out.println("Data dir: is " + dataDir.getPath());
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestMultipleNNDataBlockScanner.java Wed Sep 26 22:55:00 2012
@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs.server.da
import java.io.IOException;
+import junit.framework.Assert;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
@@ -31,7 +34,13 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner;
+import static org.apache.hadoop.hdfs.server.datanode.DataBlockScanner.SLEEP_PERIOD_MS;
+import org.apache.log4j.Level;
+import org.apache.log4j.Logger;
import org.junit.Test;
+import org.junit.Ignore;
+import static org.junit.Assert.fail;
public class TestMultipleNNDataBlockScanner {
@@ -166,4 +175,75 @@ public class TestMultipleNNDataBlockScan
cluster.shutdown();
}
}
+
+ @Test
+ public void test2NNBlockRescanInterval() throws IOException {
+ ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+ Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
+ .build();
+
+ try {
+ FileSystem fs = cluster.getFileSystem(1);
+ Path file2 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file2, 30, (short) 1, 0);
+
+ fs = cluster.getFileSystem(0);
+ Path file1 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+ for (int i = 0; i < 8; i++) {
+ LOG.info("Verifying that the blockscanner scans exactly once");
+ waitAndScanBlocks(1, 1);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * HDFS-3828: DN rescans blocks too frequently
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testBlockRescanInterval() throws IOException {
+ ((Log4JLogger)BlockPoolSliceScanner.LOG).getLogger().setLevel(Level.ALL);
+ Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).build();
+
+ try {
+ FileSystem fs = cluster.getFileSystem();
+ Path file1 = new Path("/test/testBlockScanInterval");
+ DFSTestUtil.createFile(fs, file1, 30, (short) 1, 0);
+ for (int i = 0; i < 4; i++) {
+ LOG.info("Verifying that the blockscanner scans exactly once");
+ waitAndScanBlocks(1, 1);
+ }
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ void waitAndScanBlocks(long scansLastRun, long scansTotal)
+ throws IOException {
+ // DataBlockScanner will run for every 5 seconds so we are checking for
+ // every 5 seconds
+ int n = 5;
+ String bpid = cluster.getNamesystem(0).getBlockPoolId();
+ DataNode dn = cluster.getDataNodes().get(0);
+ long blocksScanned, total;
+ do {
+ try {
+ Thread.sleep(SLEEP_PERIOD_MS);
+ } catch (InterruptedException e) {
+ fail("Interrupted: " + e);
+ }
+ blocksScanned = dn.blockScanner.getBlocksScannedInLastRun(bpid);
+ total = dn.blockScanner.getTotalScans(bpid);
+ LOG.info("bpid = " + bpid + " blocksScanned = " + blocksScanned + " total=" + total);
+ } while (n-- > 0 && (blocksScanned != scansLastRun || scansTotal != total));
+ Assert.assertEquals(scansTotal, total);
+ Assert.assertEquals(scansLastRun, blocksScanned);
+ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestDatanodeRestart.java Wed Sep 26 22:55:00 2012
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
}
// test recovering unlinked tmp replicas
- @Test public void testRecoverReplicas() throws IOException {
+ @Test public void testRecoverReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Wed Sep 26 22:55:00 2012
@@ -61,7 +61,7 @@ public class NameNodeAdapter {
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
boolean resolveLink) throws AccessControlException, UnresolvedLinkException,
- StandbyException {
+ StandbyException, IOException {
return namenode.getNamesystem().getFileInfo(src, resolveLink);
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java Wed Sep 26 22:55:00 2012
@@ -32,13 +32,17 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
@@ -83,6 +87,7 @@ public class TestAuditLogs {
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
+ conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
setNumFiles(20).build();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
@@ -115,6 +120,18 @@ public class TestAuditLogs {
assertTrue("failed to read from file", val > 0);
}
+ /** test that allowed stat puts proper entry in audit log */
+ @Test
+ public void testAuditAllowedStat() throws Exception {
+ final Path file = new Path(fnames[0]);
+ FileSystem userfs = DFSTestUtil.getFileSystemAs(userGroupInfo, conf);
+
+ setupAuditLogs();
+ FileStatus st = userfs.getFileStatus(file);
+ verifyAuditLogs(true);
+ assertTrue("failed to stat file", st != null && st.isFile());
+ }
+
/** test that denied operation puts proper entry in audit log */
@Test
public void testAuditDenied() throws Exception {
@@ -135,6 +152,85 @@ public class TestAuditLogs {
verifyAuditLogs(false);
}
+ /** test that access via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfs() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0644));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ InputStream istream = webfs.open(file);
+ int val = istream.read();
+ istream.close();
+
+ verifyAuditLogsRepeat(true, 3);
+ assertTrue("failed to read from file", val > 0);
+ }
+
+ /** test that stat via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfsStat() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0644));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ FileStatus st = webfs.getFileStatus(file);
+
+ verifyAuditLogs(true);
+ assertTrue("failed to stat file", st != null && st.isFile());
+ }
+
+ /** test that access via Hftp puts proper entry in audit log */
+ @Test
+ public void testAuditHftp() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ final String hftpUri =
+ "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
+
+ HftpFileSystem hftpFs = null;
+
+ setupAuditLogs();
+ try {
+ hftpFs = (HftpFileSystem) new Path(hftpUri).getFileSystem(conf);
+ InputStream istream = hftpFs.open(file);
+ int val = istream.read();
+ istream.close();
+
+ verifyAuditLogs(true);
+ } finally {
+ if (hftpFs != null) hftpFs.close();
+ }
+ }
+
+ /** test that denied access via webhdfs puts proper entry in audit log */
+ @Test
+ public void testAuditWebHdfsDenied() throws Exception {
+ final Path file = new Path(fnames[0]);
+
+ fs.setPermission(file, new FsPermission((short)0600));
+ fs.setOwner(file, "root", null);
+
+ setupAuditLogs();
+ try {
+ WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf);
+ InputStream istream = webfs.open(file);
+ int val = istream.read();
+ fail("open+read must not succeed, got " + val);
+ } catch(AccessControlException E) {
+ System.out.println("got access denied, as expected.");
+ }
+ verifyAuditLogsRepeat(false, 2);
+ }
+
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
File file = new File(auditLogFile);
@@ -148,19 +244,34 @@ public class TestAuditLogs {
logger.addAppender(appender);
}
+ // Ensure audit log has only one entry
private void verifyAuditLogs(boolean expectSuccess) throws IOException {
+ verifyAuditLogsRepeat(expectSuccess, 1);
+ }
+
+ // Ensure audit log has exactly N entries
+ private void verifyAuditLogsRepeat(boolean expectSuccess, int ndupe)
+ throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
- // Ensure audit log has only one entry
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
- String line = reader.readLine();
- assertNotNull(line);
- assertTrue("Expected audit event not found in audit log",
- auditPattern.matcher(line).matches());
- assertTrue("Expected success=" + expectSuccess,
- successPattern.matcher(line).matches() == expectSuccess);
- assertNull("Unexpected event in audit log", reader.readLine());
+ String line = null;
+ boolean ret = true;
+
+ try {
+ for (int i = 0; i < ndupe; i++) {
+ line = reader.readLine();
+ assertNotNull(line);
+ assertTrue("Expected audit event not found in audit log",
+ auditPattern.matcher(line).matches());
+ ret &= successPattern.matcher(line).matches();
+ }
+ assertNull("Unexpected event in audit log", reader.readLine());
+ assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
+ } finally {
+ reader.close();
+ }
}
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Wed Sep 26 22:55:00 2012
@@ -31,9 +31,7 @@ import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
-import java.io.PrintWriter;
import java.io.RandomAccessFile;
-import java.io.StringWriter;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -1238,10 +1236,8 @@ public class TestEditLog {
}
} catch (IOException e) {
} catch (Throwable t) {
- StringWriter sw = new StringWriter();
- t.printStackTrace(new PrintWriter(sw));
- fail("caught non-IOException throwable with message " +
- t.getMessage() + "\nstack trace\n" + sw.toString());
+ fail("Caught non-IOException throwable " +
+ StringUtils.stringifyException(t));
}
} finally {
if ((elfos != null) && (elfos.isOpen()))
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java Wed Sep 26 22:55:00 2012
@@ -116,7 +116,7 @@ public class TestFSEditLogLoader {
* automatically bumped up to the new minimum upon restart.
*/
@Test
- public void testReplicationAdjusted() throws IOException {
+ public void testReplicationAdjusted() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
// Replicate and heartbeat fast to shave a few seconds off test
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Sep 26 22:55:00 2012
@@ -95,6 +95,12 @@ public class TestFsck {
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=fsck\\ssrc=\\/\\sdst=null\\s" +
"perm=null");
+ static final Pattern getfileinfoPattern = Pattern.compile(
+ "allowed=.*?\\s" +
+ "ugi=.*?\\s" +
+ "ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
+ "cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" +
+ "perm=null");
static final Pattern numCorruptBlocksPattern = Pattern.compile(
".*Corrupt blocks:\t\t([0123456789]*).*");
@@ -180,10 +186,14 @@ public class TestFsck {
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
- // Ensure audit log has only one for FSCK
+ // Audit log should contain one getfileinfo and one fsck
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
String line = reader.readLine();
assertNotNull(line);
+ assertTrue("Expected getfileinfo event not found in audit log",
+ getfileinfoPattern.matcher(line).matches());
+ line = reader.readLine();
+ assertNotNull(line);
assertTrue("Expected fsck event not found in audit log",
fsckPattern.matcher(line).matches());
assertNull("Unexpected event in audit log", reader.readLine());
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java Wed Sep 26 22:55:00 2012
@@ -53,7 +53,7 @@ public class TestProcessCorruptBlocks {
* replicas (2) is equal to replication factor (2))
*/
@Test
- public void testWhenDecreasingReplication() throws IOException {
+ public void testWhenDecreasingReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -108,7 +108,7 @@ public class TestProcessCorruptBlocks {
*
*/
@Test
- public void testByAddingAnExtraDataNode() throws IOException {
+ public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -159,7 +159,7 @@ public class TestProcessCorruptBlocks {
* replicas (1) is equal to replication factor (1))
*/
@Test
- public void testWithReplicationFactorAsOne() throws IOException {
+ public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
@@ -208,7 +208,7 @@ public class TestProcessCorruptBlocks {
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test
- public void testWithAllCorruptReplicas() throws IOException {
+ public void testWithAllCorruptReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java Wed Sep 26 22:55:00 2012
@@ -17,17 +17,27 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
+import java.net.SocketTimeoutException;
+import java.net.URL;
import java.util.Collections;
import java.util.List;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
@@ -100,4 +110,45 @@ public class TestTransferFsImage {
cluster.shutdown();
}
}
+
+ /**
+ * Test to verify the read timeout
+ */
+ @Test(timeout = 5000)
+ public void testImageTransferTimeout() throws Exception {
+ HttpServer testServer = HttpServerFunctionalTest.createServer("hdfs");
+ try {
+ testServer.addServlet("GetImage", "/getimage", TestGetImageServlet.class);
+ testServer.start();
+ URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
+ TransferFsImage.timeout = 2000;
+ try {
+ TransferFsImage.getFileClient(serverURL.getAuthority(), "txid=1", null,
+ null, false);
+ fail("TransferImage Should fail with timeout");
+ } catch (SocketTimeoutException e) {
+ assertEquals("Read should timeout", "Read timed out", e.getMessage());
+ }
+ } finally {
+ if (testServer != null) {
+ testServer.stop();
+ }
+ }
+ }
+
+ public static class TestGetImageServlet extends HttpServlet {
+ private static final long serialVersionUID = 1L;
+
+ @Override
+ protected void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException {
+ synchronized (this) {
+ try {
+ wait(5000);
+ } catch (InterruptedException e) {
+ // Ignore
+ }
+ }
+ }
+ }
}
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java Wed Sep 26 22:55:00 2012
@@ -89,7 +89,6 @@ public class TestWebHdfsDataLocality {
//set client address to a particular datanode
final DataNode dn = cluster.getDataNodes().get(i);
final String ipAddr = dm.getDatanode(dn.getDatanodeId()).getIpAddr();
- NamenodeWebHdfsMethods.setRemoteAddress(ipAddr);
//The chosen datanode must be the same as the client address
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java Wed Sep 26 22:55:00 2012
@@ -80,7 +80,7 @@ public class TestWebHDFS {
}
}
- @Test
+ @Test(timeout=300000)
public void testLargeFile() throws Exception {
largeFileTest(200L << 20); //200MB file length
}
@@ -202,7 +202,7 @@ public class TestWebHDFS {
}
/** Test client retry with namenode restarting. */
- @Test
+ @Test(timeout=300000)
public void testNamenodeRestart() throws Exception {
((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
final Configuration conf = WebHdfsTestUtil.createConf();
Modified: hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1390763&r1=1390762&r2=1390763&view=diff
==============================================================================
--- hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original)
+++ hadoop/common/branches/branch-trunk-win/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Wed Sep 26 22:55:00 2012
@@ -15885,6 +15885,23 @@
</comparators>
</test>
+ <!-- Test for rollEdits -->
+ <test> <!-- TESTED -->
+ <description>rollEdits: test rollEdits admin command</description>
+ <test-commands>
+ <dfs-admin-command>-fs NAMENODE -rollEdits</dfs-admin-command>
+ </test-commands>
+ <cleanup-commands>
+ <!-- no cleanup -->
+ </cleanup-commands>
+ <comparators>
+ <comparator>
+ <type>RegexpComparator</type>
+ <expected-output>New segment starts at txid \d+</expected-output>
+ </comparator>
+ </comparators>
+ </test>
+
<!-- Test for refreshNodes -->
<test> <!-- TESTED -->
<description>refreshNodes: to refresh the nodes</description>