You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by sz...@apache.org on 2012/10/19 04:28:07 UTC
svn commit: r1399950 [21/27] - in
/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project: ./
hadoop-hdfs-httpfs/ hadoop-hdfs-httpfs/dev-support/
hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/
hadoop-hdfs-httpfs/src/main/java/org/apac...
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java Fri Oct 19 02:25:55 2012
@@ -28,7 +28,6 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java Fri Oct 19 02:25:55 2012
@@ -17,37 +17,38 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.net.InetSocketAddress;
import java.util.Iterator;
-import java.util.Random;
-
-import junit.framework.TestCase;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
/**
* This class tests the replication of a DFS file.
*/
-public class TestReplication extends TestCase {
+public class TestReplication {
private static final long seed = 0xDEADBEEFL;
private static final int blockSize = 8192;
private static final int fileSize = 16384;
@@ -57,19 +58,6 @@ public class TestReplication extends Tes
private static final int numDatanodes = racks.length;
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestReplication");
-
- private void writeFile(FileSystem fileSys, Path name, int repl)
- throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) repl, blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
/* check if there are at least two nodes are on the same rack */
private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -148,6 +136,7 @@ public class TestReplication extends Tes
/*
* Test if Datanode reports bad blocks during replication request
*/
+ @Test
public void testBadBlockReportOnTransfer() throws Exception {
Configuration conf = new HdfsConfiguration();
FileSystem fs = null;
@@ -217,19 +206,25 @@ public class TestReplication extends Tes
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("/smallblocktest.dat");
- writeFile(fileSys, file1, 3);
+ //writeFile(fileSys, file1, 3);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 3, seed);
checkFile(fileSys, file1, 3);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 10);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 10, seed);
checkFile(fileSys, file1, 10);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 4);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 4, seed);
checkFile(fileSys, file1, 4);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 1);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 1, seed);
checkFile(fileSys, file1, 1);
cleanupFile(fileSys, file1);
- writeFile(fileSys, file1, 2);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 2, seed);
checkFile(fileSys, file1, 2);
cleanupFile(fileSys, file1);
} finally {
@@ -239,11 +234,13 @@ public class TestReplication extends Tes
}
+ @Test
public void testReplicationSimulatedStorag() throws IOException {
runReplication(true);
}
+ @Test
public void testReplication() throws IOException {
runReplication(false);
}
@@ -253,7 +250,7 @@ public class TestReplication extends Tes
ClientProtocol namenode,
int expected, long maxWaitSec)
throws IOException {
- long start = System.currentTimeMillis();
+ long start = Time.now();
//wait for all the blocks to be replicated;
LOG.info("Checking for block replication for " + filename);
@@ -279,7 +276,7 @@ public class TestReplication extends Tes
}
if (maxWaitSec > 0 &&
- (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
+ (Time.now() - start) > (maxWaitSec * 1000)) {
throw new IOException("Timedout while waiting for all blocks to " +
" be replicated for " + filename);
}
@@ -297,6 +294,7 @@ public class TestReplication extends Tes
* two of the blocks and removes one of the replicas. Expected behavior is
* that missing replica will be copied from one valid source.
*/
+ @Test
public void testPendingReplicationRetry() throws IOException {
MiniDFSCluster cluster = null;
@@ -399,6 +397,7 @@ public class TestReplication extends Tes
* Test if replication can detect mismatched length on-disk blocks
* @throws Exception
*/
+ @Test
public void testReplicateLenMismatchedBlock() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
try {
@@ -412,8 +411,8 @@ public class TestReplication extends Tes
}
}
- private void changeBlockLen(MiniDFSCluster cluster,
- int lenDelta) throws IOException, InterruptedException {
+ private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
+ throws IOException, InterruptedException, TimeoutException {
final Path fileName = new Path("/file1");
final short REPLICATION_FACTOR = (short)1;
final FileSystem fs = cluster.getFileSystem();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestRestartDFS.java Fri Oct 19 02:25:55 2012
@@ -18,17 +18,19 @@
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.junit.Test;
/**
* A JUnit test for checking if restarting DFS preserves integrity.
*/
-public class TestRestartDFS extends TestCase {
+public class TestRestartDFS {
public void runTests(Configuration conf, boolean serviceTest) throws Exception {
MiniDFSCluster cluster = null;
DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS").
@@ -110,6 +112,7 @@ public class TestRestartDFS extends Test
}
}
/** check if DFS remains in proper condition after a restart */
+ @Test
public void testRestartDFS() throws Exception {
final Configuration conf = new HdfsConfiguration();
runTests(conf, false);
@@ -118,6 +121,7 @@ public class TestRestartDFS extends Test
/** check if DFS remains in proper condition after a restart
* this rerun is with 2 ports enabled for RPC in the namenode
*/
+ @Test
public void testRestartDualPortDFS() throws Exception {
final Configuration conf = new HdfsConfiguration();
runTests(conf, true);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSafeMode.java Fri Oct 19 02:25:55 2012
@@ -18,30 +18,33 @@
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
-
-import static org.junit.Assert.*;
-import org.junit.Before;
import org.junit.After;
+import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
@@ -278,36 +281,43 @@ public class TestSafeMode {
dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER));
runFsFun("Set quota while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
((DistributedFileSystem)fs).setQuota(file1, 1, 1);
}});
runFsFun("Set perm while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
fs.setPermission(file1, FsPermission.getDefault());
}});
runFsFun("Set owner while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
fs.setOwner(file1, "user", "group");
}});
runFsFun("Set repl while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
fs.setReplication(file1, (short)1);
}});
runFsFun("Append file while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
DFSTestUtil.appendFile(fs, file1, "new bytes");
}});
runFsFun("Delete file while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
fs.delete(file1, false);
}});
runFsFun("Rename file while in SM", new FSRun() {
+ @Override
public void run(FileSystem fs) throws IOException {
fs.rename(file1, new Path("file2"));
}});
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSeekBug.java Fri Oct 19 02:25:55 2012
@@ -17,37 +17,29 @@
*/
package org.apache.hadoop.hdfs;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Random;
-import junit.framework.TestCase;
-
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumFileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
+import org.junit.Test;
/**
* This class tests the presence of seek bug as described
* in HADOOP-508
*/
-public class TestSeekBug extends TestCase {
+public class TestSeekBug {
static final long seed = 0xDEADBEEFL;
static final int ONEMB = 1 << 20;
- private void writeFile(FileSystem fileSys, Path name) throws IOException {
- // create and write a file that contains 1MB
- DataOutputStream stm = fileSys.create(name);
- byte[] buffer = new byte[ONEMB];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
-
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
assertEquals(message+" byte "+(from+idx)+" differs. expected "+
@@ -123,13 +115,16 @@ public class TestSeekBug extends TestCas
/**
* Test if the seek bug exists in FSDataInputStream in DFS.
*/
+ @Test
public void testSeekBugDFS() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("seektest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+ fileSys.getDefaultBlockSize(file1),
+ fileSys.getDefaultReplication(file1), seed);
seekReadFile(fileSys, file1);
smallReadSeek(fileSys, file1);
cleanupFile(fileSys, file1);
@@ -142,12 +137,15 @@ public class TestSeekBug extends TestCas
/**
* Tests if the seek bug exists in FSDataInputStream in LocalFS.
*/
+ @Test
public void testSeekBugLocalFS() throws IOException {
Configuration conf = new HdfsConfiguration();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
Path file1 = new Path("build/test/data", "seektest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, ONEMB, ONEMB,
+ fileSys.getDefaultBlockSize(file1),
+ fileSys.getDefaultReplication(file1), seed);
seekReadFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java Fri Oct 19 02:25:55 2012
@@ -17,26 +17,33 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.text.SimpleDateFormat;
+import java.util.Date;
import java.util.Random;
-import java.net.*;
+
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import java.text.SimpleDateFormat;
-import java.util.Date;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.util.Time;
+import org.junit.Test;
/**
* This class tests the access time on files.
*
*/
-public class TestSetTimes extends TestCase {
+public class TestSetTimes {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
static final int fileSize = 16384;
@@ -77,6 +84,7 @@ public class TestSetTimes extends TestCa
/**
* Tests mod & access time in DFS.
*/
+ @Test
public void testTimes() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
@@ -159,8 +167,8 @@ public class TestSetTimes extends TestCa
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
- long mtime4 = System.currentTimeMillis() - (3600L * 1000L);
- long atime4 = System.currentTimeMillis();
+ long mtime4 = Time.now() - (3600L * 1000L);
+ long atime4 = Time.now();
fileSys.setTimes(dir1, mtime4, atime4);
// check new modification time on file
stat = fileSys.getFileStatus(dir1);
@@ -208,6 +216,7 @@ public class TestSetTimes extends TestCa
/**
* Tests mod time change at close in DFS.
*/
+ @Test
public void testTimesAtClose() throws IOException {
Configuration conf = new HdfsConfiguration();
final int MAX_IDLE_TIME = 2000; // 2s
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepDecreasing.java Fri Oct 19 02:25:55 2012
@@ -19,9 +19,10 @@ package org.apache.hadoop.hdfs;
import java.io.IOException;
-import junit.framework.TestCase;
+import org.junit.Test;
-public class TestSetrepDecreasing extends TestCase {
+public class TestSetrepDecreasing {
+ @Test
public void testSetrepDecreasing() throws IOException {
TestSetrepIncreasing.setrep(5, 3, false);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetrepIncreasing.java Fri Oct 19 02:25:55 2012
@@ -17,14 +17,21 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
-public class TestSetrepIncreasing extends TestCase {
+public class TestSetrepIncreasing {
static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
@@ -68,9 +75,11 @@ public class TestSetrepIncreasing extend
}
}
+ @Test
public void testSetrepIncreasing() throws IOException {
setrep(3, 7, false);
}
+ @Test
public void testSetrepIncreasingSimulatedStorage() throws IOException {
setrep(3, 7, true);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java Fri Oct 19 02:25:55 2012
@@ -40,8 +40,10 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -94,8 +96,7 @@ public class TestShortCircuitLocalRead {
// Now read using a different API.
actual = new byte[expected.length-readOffset];
stm = fs.open(name);
- long skipped = stm.skip(readOffset);
- Assert.assertEquals(skipped, readOffset);
+ IOUtils.skipFully(stm, readOffset);
//Read a small number of bytes first.
int nread = stm.read(actual, 0, 3);
nread += stm.read(actual, nread, 2);
@@ -114,6 +115,14 @@ public class TestShortCircuitLocalRead {
stm.close();
}
+ private static byte [] arrayFromByteBuffer(ByteBuffer buf) {
+ ByteBuffer alt = buf.duplicate();
+ alt.clear();
+ byte[] arr = new byte[alt.remaining()];
+ alt.get(arr);
+ return arr;
+ }
+
/**
* Verifies that reading a file with the direct read(ByteBuffer) api gives the expected set of bytes.
*/
@@ -121,10 +130,9 @@ public class TestShortCircuitLocalRead {
int readOffset) throws IOException {
HdfsDataInputStream stm = (HdfsDataInputStream)fs.open(name);
- ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
+ ByteBuffer actual = ByteBuffer.allocateDirect(expected.length - readOffset);
- long skipped = stm.skip(readOffset);
- Assert.assertEquals(skipped, readOffset);
+ IOUtils.skipFully(stm, readOffset);
actual.limit(3);
@@ -136,7 +144,8 @@ public class TestShortCircuitLocalRead {
// Read across chunk boundary
actual.limit(Math.min(actual.capacity(), nread + 517));
nread += stm.read(actual);
- checkData(actual.array(), readOffset, expected, nread, "A few bytes");
+ checkData(arrayFromByteBuffer(actual), readOffset, expected, nread,
+ "A few bytes");
//Now read rest of it
actual.limit(actual.capacity());
while (actual.hasRemaining()) {
@@ -147,7 +156,7 @@ public class TestShortCircuitLocalRead {
}
nread += nbytes;
}
- checkData(actual.array(), readOffset, expected, "Read 3");
+ checkData(arrayFromByteBuffer(actual), readOffset, expected, "Read 3");
stm.close();
}
@@ -224,7 +233,8 @@ public class TestShortCircuitLocalRead {
@Test
public void testGetBlockLocalPathInfo() throws IOException, InterruptedException {
final Configuration conf = new Configuration();
- conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY, "alloweduser");
+ conf.set(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
+ "alloweduser1,alloweduser2");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(true).build();
cluster.waitActive();
@@ -232,8 +242,10 @@ public class TestShortCircuitLocalRead {
FileSystem fs = cluster.getFileSystem();
try {
DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
- UserGroupInformation aUgi = UserGroupInformation
- .createRemoteUser("alloweduser");
+ UserGroupInformation aUgi1 =
+ UserGroupInformation.createRemoteUser("alloweduser1");
+ UserGroupInformation aUgi2 =
+ UserGroupInformation.createRemoteUser("alloweduser2");
LocatedBlocks lb = cluster.getNameNode().getRpcServer()
.getBlockLocations("/tmp/x", 0, 16);
// Create a new block object, because the block inside LocatedBlock at
@@ -241,22 +253,38 @@ public class TestShortCircuitLocalRead {
ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
- ClientDatanodeProtocol proxy = aUgi
+ ClientDatanodeProtocol proxy = aUgi1
.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
@Override
public ClientDatanodeProtocol run() throws Exception {
return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
- 60000);
+ 60000, false);
}
});
- //This should succeed
+ // This should succeed
BlockLocalPathInfo blpi = proxy.getBlockLocalPathInfo(blk, token);
Assert.assertEquals(
DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
blpi.getBlockPath());
- // Now try with a not allowed user.
+ // Try with the other allowed user
+ proxy = aUgi2
+ .doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
+ @Override
+ public ClientDatanodeProtocol run() throws Exception {
+ return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
+ 60000, false);
+ }
+ });
+
+ // This should succeed as well
+ blpi = proxy.getBlockLocalPathInfo(blk, token);
+ Assert.assertEquals(
+ DataNodeTestUtils.getFSDataset(dn).getBlockLocalPathInfo(blk).getBlockPath(),
+ blpi.getBlockPath());
+
+ // Now try with a disallowed user
UserGroupInformation bUgi = UserGroupInformation
.createRemoteUser("notalloweduser");
proxy = bUgi
@@ -264,7 +292,7 @@ public class TestShortCircuitLocalRead {
@Override
public ClientDatanodeProtocol run() throws Exception {
return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
- 60000);
+ 60000, false);
}
});
try {
@@ -363,7 +391,7 @@ public class TestShortCircuitLocalRead {
stm.write(dataToWrite);
stm.close();
- long start = System.currentTimeMillis();
+ long start = Time.now();
final int iteration = 20;
Thread[] threads = new Thread[threadCount];
for (int i = 0; i < threadCount; i++) {
@@ -386,7 +414,7 @@ public class TestShortCircuitLocalRead {
for (int i = 0; i < threadCount; i++) {
threads[i].join();
}
- long end = System.currentTimeMillis();
+ long end = Time.now();
System.out.println("Iteration " + iteration + " took " + (end - start));
fs.delete(file1, false);
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSmallBlock.java Fri Oct 19 02:25:55 2012
@@ -17,39 +17,31 @@
*/
package org.apache.hadoop.hdfs;
-import junit.framework.TestCase;
-import java.io.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
import java.util.Random;
+
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.junit.Test;
/**
* This class tests the creation of files with block-size
* smaller than the default buffer size of 4K.
*/
-public class TestSmallBlock extends TestCase {
+public class TestSmallBlock {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 1;
static final int fileSize = 20;
boolean simulatedStorage = false;
-
- private void writeFile(FileSystem fileSys, Path name) throws IOException {
- // create and write a file that contains three blocks of data
- FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
- .getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
- (short) 1, blockSize);
- byte[] buffer = new byte[fileSize];
- Random rand = new Random(seed);
- rand.nextBytes(buffer);
- stm.write(buffer);
- stm.close();
- }
private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
for (int idx = 0; idx < actual.length; idx++) {
@@ -90,6 +82,7 @@ public class TestSmallBlock extends Test
/**
* Tests small block size in in DFS.
*/
+ @Test
public void testSmallBlock() throws IOException {
Configuration conf = new HdfsConfiguration();
if (simulatedStorage) {
@@ -100,7 +93,8 @@ public class TestSmallBlock extends Test
FileSystem fileSys = cluster.getFileSystem();
try {
Path file1 = new Path("smallblocktest.dat");
- writeFile(fileSys, file1);
+ DFSTestUtil.createFile(fileSys, file1, fileSize, fileSize, blockSize,
+ (short) 1, seed);
checkFile(fileSys, file1);
cleanupFile(fileSys, file1);
} finally {
@@ -108,6 +102,7 @@ public class TestSmallBlock extends Test
cluster.shutdown();
}
}
+ @Test
public void testSmallBlockSimulatedStorage() throws IOException {
simulatedStorage = true;
testSmallBlock();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java Fri Oct 19 02:25:55 2012
@@ -17,12 +17,12 @@
*/
package org.apache.hadoop.hdfs;
+import java.io.OutputStream;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
-
-import java.io.OutputStream;
import org.junit.Test;
/**
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java Fri Oct 19 02:25:55 2012
@@ -20,6 +20,9 @@
package org.apache.hadoop.hdfs;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
+
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
@@ -28,24 +31,21 @@ import java.net.URI;
import java.util.Arrays;
import java.util.Collections;
import java.util.zip.CRC32;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE;
-import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE;
-
import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestExtendedBlock.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs.protocol;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
import org.junit.Test;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/TestLayoutVersion.java Fri Oct 19 02:25:55 2012
@@ -17,11 +17,13 @@
*/
package org.apache.hadoop.hdfs.protocol;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
import java.util.EnumSet;
-import static org.junit.Assert.*;
-import org.junit.Test;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
+import org.junit.Test;
/**
* Test for {@link LayoutVersion}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java Fri Oct 19 02:25:55 2012
@@ -17,7 +17,8 @@
*/
package org.apache.hadoop.hdfs.protocolPB;
-import static junit.framework.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.util.ArrayList;
import java.util.Arrays;
@@ -27,9 +28,9 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
@@ -57,10 +58,10 @@ import org.apache.hadoop.hdfs.server.com
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -79,6 +80,12 @@ import com.google.common.collect.Lists;
* Tests for {@link PBHelper}
*/
public class TestPBHelper {
+
+ /**
+ * Used for asserting equality on doubles.
+ */
+ private static final double DELTA = 0.000001;
+
@Test
public void testConvertNamenodeRole() {
assertEquals(NamenodeRoleProto.BACKUP,
@@ -283,11 +290,12 @@ public class TestPBHelper {
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
- assertEquals(dn1.getBlockPoolUsedPercent(), dn2.getBlockPoolUsedPercent());
+ assertEquals(dn1.getBlockPoolUsedPercent(),
+ dn2.getBlockPoolUsedPercent(), DELTA);
assertEquals(dn1.getCapacity(), dn2.getCapacity());
assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
- assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
+ assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent(), DELTA);
assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
assertEquals(dn1.getHostName(), dn2.getHostName());
assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
@@ -373,14 +381,12 @@ public class TestPBHelper {
@Test
public void testConvertNamespaceInfo() {
- NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300, 53);
+ NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300);
NamespaceInfoProto proto = PBHelper.convert(info);
NamespaceInfo info2 = PBHelper.convert(proto);
compare(info, info2); //Compare the StorageInfo
assertEquals(info.getBlockPoolID(), info2.getBlockPoolID());
assertEquals(info.getBuildVersion(), info2.getBuildVersion());
- assertEquals(info.getDistributedUpgradeVersion(),
- info2.getDistributedUpgradeVersion());
}
private void compare(StorageInfo expected, StorageInfo actual) {
@@ -401,9 +407,9 @@ public class TestPBHelper {
@Test
public void testConvertLocatedBlock() {
DatanodeInfo [] dnInfos = {
- DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
- DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
- DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h2", AdminStates.DECOMMISSIONED),
+ DFSTestUtil.getLocalDatanodeInfo("127.0.0.1", "h3", AdminStates.NORMAL)
};
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
@@ -432,7 +438,7 @@ public class TestPBHelper {
DatanodeRegistration reg2 = PBHelper.convert(proto);
compare(reg.getStorageInfo(), reg2.getStorageInfo());
compare(reg.getExportedKeys(), reg2.getExportedKeys());
- compare((DatanodeID)reg, (DatanodeID)reg2);
+ compare(reg, reg2);
assertEquals(reg.getSoftwareVersion(), reg2.getSoftwareVersion());
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestClientProtocolWithDelegationToken.java Fri Oct 19 02:25:55 2012
@@ -80,9 +80,11 @@ public class TestClientProtocolWithDeleg
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
3600000, mockNameSys);
sm.startThreads();
- final Server server = RPC.getServer(ClientProtocol.class, mockNN, ADDRESS,
- 0, 5, true, conf, sm);
-
+ final Server server = new RPC.Builder(conf)
+ .setProtocol(ClientProtocol.class).setInstance(mockNN)
+ .setBindAddress(ADDRESS).setPort(0).setNumHandlers(5).setVerbose(true)
+ .setSecretManager(sm).build();
+
server.start();
final UserGroupInformation current = UserGroupInformation.getCurrentUser();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java Fri Oct 19 02:25:55 2012
@@ -20,15 +20,14 @@ package org.apache.hadoop.hdfs.security;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.IOException;
import java.net.URI;
import java.security.PrivilegedExceptionAction;
-import java.util.List;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -49,6 +48,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
@@ -153,25 +153,18 @@ public class TestDelegationToken {
}
@Test
- public void testDelegationTokenDFSApi() throws Exception {
- DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
- final Token<DelegationTokenIdentifier> token = dfs.getDelegationToken("JobTracker");
- DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
- byte[] tokenId = token.getIdentifier();
- identifier.readFields(new DataInputStream(
- new ByteArrayInputStream(tokenId)));
- LOG.info("A valid token should have non-null password, and should be renewed successfully");
- Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
- dtSecretManager.renewToken(token, "JobTracker");
- UserGroupInformation.createRemoteUser("JobTracker").doAs(
- new PrivilegedExceptionAction<Object>() {
- @Override
- public Object run() throws Exception {
- token.renew(config);
- token.cancel(config);
- return null;
- }
- });
+ public void testAddDelegationTokensDFSApi() throws Exception {
+ UserGroupInformation ugi = UserGroupInformation.createRemoteUser("JobTracker");
+ DistributedFileSystem dfs = cluster.getFileSystem();
+ Credentials creds = new Credentials();
+ final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
+ Assert.assertEquals(1, tokens.length);
+ Assert.assertEquals(1, creds.numberOfTokens());
+ checkTokenIdentifier(ugi, tokens[0]);
+
+ final Token<?> tokens2[] = dfs.addDelegationTokens("JobTracker", creds);
+ Assert.assertEquals(0, tokens2.length); // already have token
+ Assert.assertEquals(1, creds.numberOfTokens());
}
@Test
@@ -190,60 +183,36 @@ public class TestDelegationToken {
}
});
- { //test getDelegationToken(..)
- final Token<DelegationTokenIdentifier> token = webhdfs
- .getDelegationToken("JobTracker");
- DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
- byte[] tokenId = token.getIdentifier();
- identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
- LOG.info("A valid token should have non-null password, and should be renewed successfully");
- Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
- dtSecretManager.renewToken(token, "JobTracker");
- ugi.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- token.renew(config);
- token.cancel(config);
- return null;
- }
- });
- }
-
- { //test getDelegationTokens(..)
- final List<Token<?>> tokenlist = webhdfs.getDelegationTokens("JobTracker");
- DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
- @SuppressWarnings("unchecked")
- final Token<DelegationTokenIdentifier> token = (Token<DelegationTokenIdentifier>)tokenlist.get(0);
- byte[] tokenId = token.getIdentifier();
- identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
- LOG.info("A valid token should have non-null password, and should be renewed successfully");
- Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
- dtSecretManager.renewToken(token, "JobTracker");
- ugi.doAs(new PrivilegedExceptionAction<Void>() {
- @Override
- public Void run() throws Exception {
- token.renew(config);
- token.cancel(config);
- return null;
- }
- });
+ { //test addDelegationTokens(..)
+ Credentials creds = new Credentials();
+ final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
+ Assert.assertEquals(1, tokens.length);
+ Assert.assertEquals(1, creds.numberOfTokens());
+ Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
+ checkTokenIdentifier(ugi, tokens[0]);
+ final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
+ Assert.assertEquals(0, tokens2.length);
}
}
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWithDoAs() throws Exception {
- final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
- final Token<DelegationTokenIdentifier> token =
- dfs.getDelegationToken("JobTracker");
+ final DistributedFileSystem dfs = cluster.getFileSystem();
+ final Credentials creds = new Credentials();
+ final Token<?> tokens[] = dfs.addDelegationTokens("JobTracker", creds);
+ Assert.assertEquals(1, tokens.length);
+ @SuppressWarnings("unchecked")
+ final Token<DelegationTokenIdentifier> token =
+ (Token<DelegationTokenIdentifier>) tokens[0];
final UserGroupInformation longUgi = UserGroupInformation
.createRemoteUser("JobTracker/foo.com@FOO.COM");
final UserGroupInformation shortUgi = UserGroupInformation
.createRemoteUser("JobTracker");
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
public Object run() throws IOException {
- final DistributedFileSystem dfs = (DistributedFileSystem) cluster
- .getFileSystem();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
try {
//try renew with long name
dfs.renewDelegationToken(token);
@@ -254,17 +223,17 @@ public class TestDelegationToken {
}
});
shortUgi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
public Object run() throws IOException {
- final DistributedFileSystem dfs = (DistributedFileSystem) cluster
- .getFileSystem();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.renewDelegationToken(token);
return null;
}
});
longUgi.doAs(new PrivilegedExceptionAction<Object>() {
+ @Override
public Object run() throws IOException {
- final DistributedFileSystem dfs = (DistributedFileSystem) cluster
- .getFileSystem();
+ final DistributedFileSystem dfs = cluster.getFileSystem();
try {
//try cancel with long name
dfs.cancelDelegationToken(token);
@@ -301,7 +270,7 @@ public class TestDelegationToken {
NameNodeAdapter.getDtSecretManager(nn.getNamesystem());
assertFalse("Secret manager should not run in safe mode", sm.isRunning());
- NameNodeAdapter.leaveSafeMode(nn, false);
+ NameNodeAdapter.leaveSafeMode(nn);
assertTrue("Secret manager should start when safe mode is exited",
sm.isRunning());
@@ -322,4 +291,33 @@ public class TestDelegationToken {
assertFalse(nn.isInSafeMode());
assertTrue(sm.isRunning());
}
+
+ @SuppressWarnings("unchecked")
+ private void checkTokenIdentifier(UserGroupInformation ugi, final Token<?> token)
+ throws Exception {
+ Assert.assertNotNull(token);
+ // should be able to use token.decodeIdentifier() but webhdfs isn't
+ // registered with the service loader for token decoding
+ DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
+ byte[] tokenId = token.getIdentifier();
+ DataInputStream in = new DataInputStream(new ByteArrayInputStream(tokenId));
+ try {
+ identifier.readFields(in);
+ } finally {
+ in.close();
+ }
+ Assert.assertNotNull(identifier);
+ LOG.info("A valid token should have non-null password, and should be renewed successfully");
+ Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
+ dtSecretManager.renewToken((Token<DelegationTokenIdentifier>) token, "JobTracker");
+ ugi.doAs(
+ new PrivilegedExceptionAction<Object>() {
+ @Override
+ public Object run() throws Exception {
+ token.renew(config);
+ token.cancel(config);
+ return null;
+ }
+ });
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java Fri Oct 19 02:25:55 2012
@@ -44,7 +44,6 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -136,16 +135,15 @@ public class TestDelegationTokenForProxy
final UserGroupInformation proxyUgi = UserGroupInformation
.createProxyUserForTesting(PROXY_USER, ugi, GROUP_NAMES);
try {
- Token<DelegationTokenIdentifier> token = proxyUgi
- .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
- public Token<DelegationTokenIdentifier> run() throws IOException {
- DistributedFileSystem dfs = (DistributedFileSystem) cluster
- .getFileSystem();
- return dfs.getDelegationToken("RenewerUser");
+ Token<?>[] tokens = proxyUgi
+ .doAs(new PrivilegedExceptionAction<Token<?>[]>() {
+ @Override
+ public Token<?>[] run() throws IOException {
+ return cluster.getFileSystem().addDelegationTokens("RenewerUser", null);
}
});
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
- byte[] tokenId = token.getIdentifier();
+ byte[] tokenId = tokens[0].getIdentifier();
identifier.readFields(new DataInputStream(new ByteArrayInputStream(
tokenId)));
Assert.assertEquals(identifier.getUser().getUserName(), PROXY_USER);
@@ -205,7 +203,7 @@ public class TestDelegationTokenForProxy
final PutOpParam.Op op = PutOpParam.Op.CREATE;
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+ conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
out.write("Hello, webhdfs user!".getBytes());
out.close();
@@ -220,7 +218,7 @@ public class TestDelegationTokenForProxy
final PostOpParam.Op op = PostOpParam.Op.APPEND;
final URL url = WebHdfsTestUtil.toUrl(webhdfs, op, f, new DoAsParam(PROXY_USER));
HttpURLConnection conn = (HttpURLConnection) url.openConnection();
- conn = WebHdfsTestUtil.twoStepWrite(conn, op);
+ conn = WebHdfsTestUtil.twoStepWrite(webhdfs, op, conn);
final FSDataOutputStream out = WebHdfsTestUtil.write(webhdfs, op, conn, 4096);
out.write("\nHello again!".getBytes());
out.close();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/SecurityTestUtil.java Fri Oct 19 02:25:55 2012
@@ -20,8 +20,6 @@ package org.apache.hadoop.hdfs.security.
import java.io.IOException;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.security.token.Token;
/** Utilities for security tests */
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java Fri Oct 19 02:25:55 2012
@@ -70,9 +70,11 @@ import org.apache.hadoop.security.SaslRp
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Assume;
+import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
@@ -86,14 +88,6 @@ public class TestBlockToken {
public static final Log LOG = LogFactory.getLog(TestBlockToken.class);
private static final String ADDRESS = "0.0.0.0";
- static final String SERVER_PRINCIPAL_KEY = "test.ipc.server.principal";
- private static Configuration conf;
- static {
- conf = new Configuration();
- conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
- UserGroupInformation.setConfiguration(conf);
- }
-
static {
((Log4JLogger) Client.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) Server.LOG).getLogger().setLevel(Level.ALL);
@@ -110,6 +104,13 @@ public class TestBlockToken {
ExtendedBlock block1 = new ExtendedBlock("0", 0L);
ExtendedBlock block2 = new ExtendedBlock("10", 10L);
ExtendedBlock block3 = new ExtendedBlock("-10", -108L);
+
+ @Before
+ public void disableKerberos() {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "simple");
+ UserGroupInformation.setConfiguration(conf);
+ }
private static class GetLengthAnswer implements
Answer<GetReplicaVisibleLengthResponseProto> {
@@ -161,7 +162,7 @@ public class TestBlockToken {
public void testWritable() throws Exception {
TestWritable.testWritable(new BlockTokenIdentifier());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime, 0);
+ blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
TestWritable.testWritable(generateTokenId(sm, block1,
EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)));
TestWritable.testWritable(generateTokenId(sm, block2,
@@ -200,9 +201,9 @@ public class TestBlockToken {
@Test
public void testBlockTokenSecretManager() throws Exception {
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime, 0);
+ blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime);
+ blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null);
ExportedBlockKeys keys = masterHandler.exportKeys();
slaveHandler.addKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler);
@@ -214,8 +215,9 @@ public class TestBlockToken {
tokenGenerationAndVerification(masterHandler, slaveHandler);
}
- private Server createMockDatanode(BlockTokenSecretManager sm,
- Token<BlockTokenIdentifier> token) throws IOException, ServiceException {
+ private static Server createMockDatanode(BlockTokenSecretManager sm,
+ Token<BlockTokenIdentifier> token, Configuration conf)
+ throws IOException, ServiceException {
ClientDatanodeProtocolPB mockDN = mock(ClientDatanodeProtocolPB.class);
BlockTokenIdentifier id = sm.createIdentifier();
@@ -230,18 +232,23 @@ public class TestBlockToken {
ProtobufRpcEngine.class);
BlockingService service = ClientDatanodeProtocolService
.newReflectiveBlockingService(mockDN);
- return RPC.getServer(ClientDatanodeProtocolPB.class, service, ADDRESS, 0, 5,
- true, conf, sm);
+ return new RPC.Builder(conf).setProtocol(ClientDatanodeProtocolPB.class)
+ .setInstance(service).setBindAddress(ADDRESS).setPort(0)
+ .setNumHandlers(5).setVerbose(true).setSecretManager(sm).build();
}
@Test
public void testBlockTokenRpc() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
BlockTokenSecretManager sm = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime, 0);
+ blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
- final Server server = createMockDatanode(sm, token);
+ final Server server = createMockDatanode(sm, token, conf);
server.start();
@@ -270,13 +277,17 @@ public class TestBlockToken {
*/
@Test
public void testBlockTokenRpcLeak() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+
Assume.assumeTrue(FD_DIR.exists());
BlockTokenSecretManager sm = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime, 0);
+ blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
Token<BlockTokenIdentifier> token = sm.generateToken(block3,
EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
- final Server server = createMockDatanode(sm, token);
+ final Server server = createMockDatanode(sm, token, conf);
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
@@ -300,10 +311,10 @@ public class TestBlockToken {
int fdsAtStart = countOpenFileDescriptors();
try {
- long endTime = System.currentTimeMillis() + 3000;
- while (System.currentTimeMillis() < endTime) {
+ long endTime = Time.now() + 3000;
+ while (Time.now() < endTime) {
proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
- fakeBlock);
+ false, fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
@@ -341,9 +352,9 @@ public class TestBlockToken {
for (int i = 0; i < 10; i++) {
String bpid = Integer.toString(i);
BlockTokenSecretManager masterHandler = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime, 0);
+ blockKeyUpdateInterval, blockTokenLifetime, 0, "fake-pool", null);
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(
- blockKeyUpdateInterval, blockTokenLifetime);
+ blockKeyUpdateInterval, blockTokenLifetime, "fake-pool", null);
bpMgr.addBlockPool(bpid, slaveHandler);
ExportedBlockKeys keys = masterHandler.exportKeys();
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.balancer;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
@@ -41,12 +44,11 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.util.Time;
import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
/**
* This class tests if a balancer schedules tasks correctly.
@@ -86,7 +88,7 @@ public class TestBalancer {
/* create a file with a length of <code>fileLen</code> */
static void createFile(MiniDFSCluster cluster, Path filePath, long fileLen,
short replicationFactor, int nnIndex)
- throws IOException {
+ throws IOException, InterruptedException, TimeoutException {
FileSystem fs = cluster.getFileSystem(nnIndex);
DFSTestUtil.createFile(fs, filePath, fileLen,
replicationFactor, r.nextLong());
@@ -98,7 +100,7 @@ public class TestBalancer {
* whose used space to be <code>size</code>
*/
private ExtendedBlock[] generateBlocks(Configuration conf, long size,
- short numNodes) throws IOException {
+ short numNodes) throws IOException, InterruptedException, TimeoutException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
try {
cluster.waitActive();
@@ -221,7 +223,7 @@ public class TestBalancer {
throws IOException, TimeoutException {
long timeout = TIMEOUT;
long failtime = (timeout <= 0L) ? Long.MAX_VALUE
- : System.currentTimeMillis() + timeout;
+ : Time.now() + timeout;
while (true) {
long[] status = client.getStats();
@@ -233,7 +235,7 @@ public class TestBalancer {
&& usedSpaceVariance < CAPACITY_ALLOWED_VARIANCE)
break; //done
- if (System.currentTimeMillis() > failtime) {
+ if (Time.now() > failtime) {
throw new TimeoutException("Cluster failed to reached expected values of "
+ "totalSpace (current: " + status[0]
+ ", expected: " + expectedTotalSpace
@@ -259,7 +261,7 @@ public class TestBalancer {
throws IOException, TimeoutException {
long timeout = TIMEOUT;
long failtime = (timeout <= 0L) ? Long.MAX_VALUE
- : System.currentTimeMillis() + timeout;
+ : Time.now() + timeout;
final double avgUtilization = ((double)totalUsedSpace) / totalCapacity;
boolean balanced;
do {
@@ -272,7 +274,7 @@ public class TestBalancer {
/ datanode.getCapacity();
if (Math.abs(avgUtilization - nodeUtilization) > BALANCE_ALLOWED_VARIANCE) {
balanced = false;
- if (System.currentTimeMillis() > failtime) {
+ if (Time.now() > failtime) {
throw new TimeoutException(
"Rebalancing expected avg utilization to become "
+ avgUtilization + ", but on datanode " + datanode
@@ -370,7 +372,7 @@ public class TestBalancer {
* Test parse method in Balancer#Cli class with threshold value out of
* boundaries.
*/
- @Test
+ @Test(timeout=100000)
public void testBalancerCliParseWithThresholdOutOfBoundaries() {
String parameters[] = new String[] { "-threshold", "0" };
String reason = "IllegalArgumentException is expected when threshold value"
@@ -392,18 +394,24 @@ public class TestBalancer {
/** Test a cluster with even distribution,
* then a new empty node is added to the cluster*/
- @Test
+ @Test(timeout=100000)
public void testBalancer0() throws Exception {
- Configuration conf = new HdfsConfiguration();
+ testBalancer0Internal(new HdfsConfiguration());
+ }
+
+ void testBalancer0Internal(Configuration conf) throws Exception {
initConf(conf);
oneNodeTest(conf);
twoNodeTest(conf);
}
/** Test unevenly distributed cluster */
- @Test
+ @Test(timeout=100000)
public void testBalancer1() throws Exception {
- Configuration conf = new HdfsConfiguration();
+ testBalancer1Internal(new HdfsConfiguration());
+ }
+
+ void testBalancer1Internal(Configuration conf) throws Exception {
initConf(conf);
testUnevenDistribution(conf,
new long[] {50*CAPACITY/100, 10*CAPACITY/100},
@@ -411,9 +419,12 @@ public class TestBalancer {
new String[] {RACK0, RACK1});
}
- @Test
+ @Test(timeout=100000)
public void testBalancer2() throws Exception {
- Configuration conf = new HdfsConfiguration();
+ testBalancer2Internal(new HdfsConfiguration());
+ }
+
+ void testBalancer2Internal(Configuration conf) throws Exception {
initConf(conf);
testBalancerDefaultConstructor(conf, new long[] { CAPACITY, CAPACITY },
new String[] { RACK0, RACK1 }, CAPACITY, RACK2);
@@ -456,8 +467,7 @@ public class TestBalancer {
/**
* Test parse method in Balancer#Cli class with wrong number of params
*/
-
- @Test
+ @Test(timeout=100000)
public void testBalancerCliParseWithWrongParams() {
String parameters[] = new String[] { "-threshold" };
String reason =
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java Fri Oct 19 02:25:55 2012
@@ -29,8 +29,8 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
-import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.MiniDFSNNTopology.NNConf;
+import org.apache.hadoop.hdfs.NameNodeProxies;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java Fri Oct 19 02:25:55 2012
@@ -23,6 +23,7 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Random;
+import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -40,8 +41,8 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -96,7 +97,7 @@ public class TestBalancerWithMultipleNam
/* create a file with a length of <code>fileLen</code> */
private static void createFile(Suite s, int index, long len
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final FileSystem fs = s.cluster.getFileSystem(index);
DFSTestUtil.createFile(fs, FILE_PATH, len, s.replication, RANDOM.nextLong());
DFSTestUtil.waitReplication(fs, FILE_PATH, s.replication);
@@ -106,7 +107,7 @@ public class TestBalancerWithMultipleNam
* whose used space to be <code>size</code>
*/
private static ExtendedBlock[][] generateBlocks(Suite s, long size
- ) throws IOException {
+ ) throws IOException, InterruptedException, TimeoutException {
final ExtendedBlock[][] blocks = new ExtendedBlock[s.clients.length][];
for(int n = 0; n < s.clients.length; n++) {
final long fileLen = size/s.replication;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java Fri Oct 19 02:25:55 2012
@@ -191,4 +191,12 @@ public class BlockManagerTestUtil {
"Must use default policy, got %s", bpp.getClass());
((BlockPlacementPolicyDefault)bpp).setPreferLocalNode(prefer);
}
+
+ /**
+ * Call heartbeat check function of HeartbeatManager
+ * @param bm the BlockManager to manipulate
+ */
+ public static void checkHeartbeat(BlockManager bm) {
+ bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
+ }
}
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java Fri Oct 19 02:25:55 2012
@@ -17,8 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import org.junit.Test;
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
import java.util.ArrayList;
import java.util.Iterator;
@@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFac
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.junit.Test;
/**
* This class provides tests for BlockInfo class, which is used in BlocksMap.
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java Fri Oct 19 02:25:55 2012
@@ -379,7 +379,7 @@ public class TestBlockManager {
private BlockInfo addBlockOnNodes(long blockId, List<DatanodeDescriptor> nodes) {
BlockCollection bc = Mockito.mock(BlockCollection.class);
- Mockito.doReturn((short)3).when(bc).getReplication();
+ Mockito.doReturn((short)3).when(bc).getBlockReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
bm.blocksMap.addBlockCollection(blockInfo, bc);
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java Fri Oct 19 02:25:55 2012
@@ -146,7 +146,7 @@ public class TestBlockTokenWithDFS {
"test-blockpoolid", block.getBlockId());
blockReader = BlockReaderFactory.newBlockReader(
conf, s, file, block,
- lblock.getBlockToken(), 0, -1);
+ lblock.getBlockToken(), 0, -1, null);
} catch (IOException ex) {
if (ex instanceof InvalidBlockTokenException) {
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java Fri Oct 19 02:25:55 2012
@@ -17,25 +17,25 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
-import junit.framework.TestCase;
+import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.junit.Test;
/**
* Test if FSNamesystem handles heartbeat right
*/
-public class TestComputeInvalidateWork extends TestCase {
+public class TestComputeInvalidateWork {
/**
* Test if {@link FSNamesystem#computeInvalidateWork(int)}
* can schedule invalidate work correctly
*/
+ @Test
public void testCompInvalidate() throws Exception {
final Configuration conf = new HdfsConfiguration();
final int NUM_OF_DATANODES = 3;
Modified: hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java?rev=1399950&r1=1399949&r2=1399950&view=diff
==============================================================================
--- hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java (original)
+++ hadoop/common/branches/HDFS-2802/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestCorruptReplicaInfo.java Fri Oct 19 02:25:55 2012
@@ -17,6 +17,11 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
@@ -24,12 +29,11 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
-import junit.framework.TestCase;
-
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.junit.Test;
/**
@@ -38,7 +42,7 @@ import org.apache.hadoop.hdfs.protocol.B
* CorruptReplicasMap::getCorruptReplicaBlockIds
* return the correct values
*/
-public class TestCorruptReplicaInfo extends TestCase {
+public class TestCorruptReplicaInfo {
private static final Log LOG =
LogFactory.getLog(TestCorruptReplicaInfo.class);
@@ -60,6 +64,7 @@ public class TestCorruptReplicaInfo exte
return getBlock((long)block_id);
}
+ @Test
public void testCorruptReplicaInfo() throws IOException,
InterruptedException {