You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by to...@apache.org on 2011/06/07 03:11:17 UTC
svn commit: r1132846 [3/3] - in /hadoop/hdfs/branches/HDFS-1073: ./ bin/
src/c++/libhdfs/ src/c++/libhdfs/m4/ src/c++/libhdfs/tests/
src/contrib/hdfsproxy/ src/contrib/hdfsproxy/bin/
src/docs/src/documentation/content/xdocs/ src/java/ src/java/org/apac...
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java Tue Jun 7 01:11:15 2011
@@ -32,6 +32,7 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.util.StringUtils;
/**
* This test ensures the appropriate response (successful or failure) from
@@ -89,7 +90,7 @@ public class TestDFSRollback extends Tes
* Attempts to start a NameNode with the given operation. Starting
* the NameNode should throw an exception.
*/
- void startNameNodeShouldFail(StartupOption operation) {
+ void startNameNodeShouldFail(StartupOption operation, String searchString) {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.startupOption(operation)
@@ -99,6 +100,10 @@ public class TestDFSRollback extends Tes
.build(); // should fail
throw new AssertionError("NameNode should have failed to start");
} catch (Exception expected) {
+ if (!expected.getMessage().contains(searchString)) {
+ fail("Expected substring '" + searchString + "' in exception " +
+ "but got: " + StringUtils.stringifyException(expected));
+ }
// expected
}
}
@@ -165,7 +170,8 @@ public class TestDFSRollback extends Tes
log("NameNode rollback without existing previous dir", numDirs);
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
- startNameNodeShouldFail(StartupOption.ROLLBACK);
+ startNameNodeShouldFail(StartupOption.ROLLBACK,
+ "None of the storage directories contain previous fs state");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("DataNode rollback without existing previous dir", numDirs);
@@ -238,7 +244,8 @@ public class TestDFSRollback extends Tes
for (File f : baseDirs) {
FileUtil.fullyDelete(new File(f,"edits"));
}
- startNameNodeShouldFail(StartupOption.ROLLBACK);
+ startNameNodeShouldFail(StartupOption.ROLLBACK,
+ "Edits file is not found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with no image file", numDirs);
@@ -247,7 +254,8 @@ public class TestDFSRollback extends Tes
for (File f : baseDirs) {
FileUtil.fullyDelete(new File(f,"fsimage"));
}
- startNameNodeShouldFail(StartupOption.ROLLBACK);
+ startNameNodeShouldFail(StartupOption.ROLLBACK,
+ "Image file is not found");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with corrupt version file", numDirs);
@@ -256,7 +264,8 @@ public class TestDFSRollback extends Tes
for (File f : baseDirs) {
UpgradeUtilities.corruptFile(new File(f,"VERSION"));
}
- startNameNodeShouldFail(StartupOption.ROLLBACK);
+ startNameNodeShouldFail(StartupOption.ROLLBACK,
+ "file VERSION has layoutVersion missing");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous", numDirs);
@@ -269,7 +278,8 @@ public class TestDFSRollback extends Tes
UpgradeUtilities.createNameNodeVersionFile(conf, baseDirs,
storageInfo, UpgradeUtilities.getCurrentBlockPoolID(cluster));
- startNameNodeShouldFail(StartupOption.UPGRADE);
+ startNameNodeShouldFail(StartupOption.ROLLBACK,
+ "Cannot rollback to storage version 1 using this version");
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
} // end numDir loop
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java Tue Jun 7 01:11:15 2011
@@ -378,12 +378,8 @@ public class TestDFSStorageStateRecovery
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkResultDataNode(baseDirs, curAfterRecover, prevAfterRecover);
} else {
- try {
- cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
- assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
- } catch (Exception expected) {
- // expected
- }
+ cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+ assertFalse(cluster.getDataNodes().get(0).isDatanodeUp());
}
}
cluster.shutdown();
@@ -423,12 +419,8 @@ public class TestDFSStorageStateRecovery
cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
checkResultBlockPool(baseDirs, curAfterRecover, prevAfterRecover);
} else {
- try {
- cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
- assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
- } catch (Exception expected) {
- // expected
- }
+ cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+ assertFalse(cluster.getDataNodes().get(0).isBPServiceAlive(bpid));
}
}
cluster.shutdown();
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java Tue Jun 7 01:11:15 2011
@@ -47,10 +47,13 @@ import org.apache.commons.logging.LogFac
*/
public class TestDFSUpgradeFromImage extends TestCase {
- private static final Log LOG = LogFactory.getLog(
- "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
+ private static final Log LOG = LogFactory
+ .getLog(TestDFSUpgradeFromImage.class);
private static File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
+ private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
+ private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
+ private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
public int numDataNodes = 4;
@@ -64,24 +67,26 @@ public class TestDFSUpgradeFromImage ext
boolean printChecksum = false;
- protected void setUp() throws IOException {
- unpackStorage();
+ public void unpackStorage() throws IOException {
+ unpackStorage(HADOOP14_IMAGE);
}
- public void unpackStorage() throws IOException {
- String tarFile = System.getProperty("test.cache.data", "build/test/cache") +
- "/hadoop-14-dfs-dir.tgz";
+ private void unpackStorage(String tarFileName)
+ throws IOException {
+ String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ + "/" + tarFileName;
String dataDir = System.getProperty("test.build.data", "build/test/data");
File dfsDir = new File(dataDir, "dfs");
if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
+ LOG.info("Unpacking " + tarFile);
FileUtil.unTar(new File(tarFile), new File(dataDir));
//Now read the reference info
- BufferedReader reader = new BufferedReader(
- new FileReader(System.getProperty("test.cache.data", "build/test/cache") +
- "/hadoop-dfs-dir.txt"));
+ BufferedReader reader = new BufferedReader(new FileReader(
+ System.getProperty("test.cache.data", "build/test/cache")
+ + "/" + HADOOP_DFS_DIR_TXT));
String line;
while ( (line = reader.readLine()) != null ) {
@@ -177,7 +182,8 @@ public class TestDFSUpgradeFromImage ext
}
}
- public void testUpgradeFromImage() throws IOException {
+ public void testUpgradeFromRel14Image() throws IOException {
+ unpackStorage();
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
@@ -246,8 +252,40 @@ public class TestDFSUpgradeFromImage ext
.build();
fail("Was able to start NN from 0.3.0 image");
} catch (IOException ioe) {
- LOG.info("Got expected exception", ioe);
assertTrue(ioe.toString().contains("Old layout version is 'too old'"));
}
}
+
+ /**
+ * Test upgrade from 0.22 image
+ */
+ public void testUpgradeFromRel22Image() throws IOException {
+ unpackStorage(HADOOP22_IMAGE);
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new HdfsConfiguration();
+ if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
+ System.setProperty("test.build.data", "build/test/data");
+ }
+ conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(numDataNodes)
+ .format(false)
+ .startupOption(StartupOption.UPGRADE)
+ .clusterId("testClusterId")
+ .build();
+ cluster.waitActive();
+ DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+ DFSClient dfsClient = dfs.dfs;
+ //Safemode will be off only after upgrade is complete. Wait for it.
+ while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
+ LOG.info("Waiting for SafeMode to be OFF.");
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException ignored) {}
+ }
+ } finally {
+ if (cluster != null) { cluster.shutdown(); }
+ }
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java Tue Jun 7 01:11:15 2011
@@ -29,14 +29,19 @@ import java.util.Collection;
import java.util.Iterator;
import java.util.List;
+import junit.framework.Assert;
+
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
+import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
+
public class TestDFSUtil {
/**
@@ -233,4 +238,16 @@ public class TestDFSUtil {
} catch (IOException expected) {
}
}
+
+ @Test
+ public void testGetServerInfo(){
+ HdfsConfiguration conf = new HdfsConfiguration();
+ conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ String httpsport = DFSUtil.getInfoServer(null, conf, true);
+ Assert.assertEquals("0.0.0.0:50470", httpsport);
+ String httpport = DFSUtil.getInfoServer(null, conf, false);
+ Assert.assertEquals("0.0.0.0:50070", httpport);
+ }
+
}
\ No newline at end of file
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java Tue Jun 7 01:11:15 2011
@@ -23,6 +23,8 @@ import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -31,9 +33,12 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
@@ -42,6 +47,9 @@ import org.junit.BeforeClass;
import org.junit.Test;
public class TestLeaseRecovery2 {
+
+ public static final Log LOG = LogFactory.getLog(TestLeaseRecovery2.class);
+
{
((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
@@ -232,4 +240,126 @@ public class TestLeaseRecovery2 {
"Now validating data and sizes from datanodes...");
AppendTestUtil.checkFullFile(dfs, filepath, size, buffer, filestr);
}
+
+ /**
+ * This test makes it so the client does not renew its lease and also
+ * set the hard lease expiration period to be short, thus triggering
+ * lease expiration to happen while the client is still alive. The test
+ * also causes the NN to restart after lease recovery has begun, but before
+ * the DNs have completed the blocks. This test verifies that when the NN
+ * comes back up, the client no longer holds the lease.
+ *
+ * The test makes sure that the lease recovery completes and the client
+ * fails if it continues to write to the file, even after NN restart.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testHardLeaseRecoveryAfterNameNodeRestart() throws Exception {
+ hardLeaseRecoveryRestartHelper(false);
+ }
+
+ @Test
+ public void testHardLeaseRecoveryWithRenameAfterNameNodeRestart()
+ throws Exception {
+ hardLeaseRecoveryRestartHelper(true);
+ }
+
+ public void hardLeaseRecoveryRestartHelper(boolean doRename)
+ throws Exception {
+ //create a file
+ String fileStr = "/hardLeaseRecovery";
+ AppendTestUtil.LOG.info("filestr=" + fileStr);
+ Path filePath = new Path(fileStr);
+ FSDataOutputStream stm = dfs.create(filePath, true,
+ BUF_SIZE, REPLICATION_NUM, BLOCK_SIZE);
+ assertTrue(dfs.dfs.exists(fileStr));
+
+ // write bytes into the file.
+ int size = AppendTestUtil.nextInt(FILE_SIZE);
+ AppendTestUtil.LOG.info("size=" + size);
+ stm.write(buffer, 0, size);
+
+ String originalLeaseHolder = NameNodeAdapter.getLeaseHolderForPath(
+ cluster.getNameNode(), fileStr);
+
+ assertFalse("original lease holder should not be the NN",
+ originalLeaseHolder.equals(HdfsConstants.NAMENODE_LEASE_HOLDER));
+
+ // hflush file
+ AppendTestUtil.LOG.info("hflush");
+ stm.hflush();
+
+ if (doRename) {
+ fileStr += ".renamed";
+ Path renamedPath = new Path(fileStr);
+ assertTrue(dfs.rename(filePath, renamedPath));
+ filePath = renamedPath;
+ }
+
+ // kill the lease renewal thread
+ AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
+ dfs.dfs.leaserenewer.interruptAndJoin();
+
+ // Make sure the DNs don't send a heartbeat for a while, so the blocks
+ // won't actually get completed during lease recovery.
+ for (DataNode dn : cluster.getDataNodes()) {
+ DataNodeAdapter.setHeartbeatsDisabledForTests(dn, true);
+ }
+
+ // set the hard limit to be 1 second
+ cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
+
+ // Make sure lease recovery begins.
+ Thread.sleep(HdfsConstants.NAMENODE_LEASE_RECHECK_INTERVAL * 2);
+
+ assertEquals("lease holder should now be the NN", HdfsConstants.NAMENODE_LEASE_HOLDER,
+ NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+
+ cluster.restartNameNode(false);
+
+ assertEquals("lease holder should still be the NN after restart",
+ HdfsConstants.NAMENODE_LEASE_HOLDER,
+ NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(), fileStr));
+
+ // Let the DNs send heartbeats again.
+ for (DataNode dn : cluster.getDataNodes()) {
+ DataNodeAdapter.setHeartbeatsDisabledForTests(dn, false);
+ }
+
+ cluster.waitActive();
+
+ // set the hard limit to be 1 second, to initiate lease recovery.
+ cluster.setLeasePeriod(LONG_LEASE_PERIOD, SHORT_LEASE_PERIOD);
+
+ // wait for lease recovery to complete
+ LocatedBlocks locatedBlocks;
+ do {
+ Thread.sleep(SHORT_LEASE_PERIOD);
+ locatedBlocks = DFSClient.callGetBlockLocations(dfs.dfs.namenode,
+ fileStr, 0L, size);
+ } while (locatedBlocks.isUnderConstruction());
+ assertEquals(size, locatedBlocks.getFileLength());
+
+ // make sure that the client can't write data anymore.
+ stm.write('b');
+ try {
+ stm.hflush();
+ fail("Should not be able to flush after we've lost the lease");
+ } catch (IOException e) {
+ LOG.info("Expceted exception on hflush", e);
+ }
+
+ try {
+ stm.close();
+ fail("Should not be able to close after we've lost the lease");
+ } catch (IOException e) {
+ LOG.info("Expected exception on close", e);
+ }
+
+ // verify data
+ AppendTestUtil.LOG.info(
+ "File size is good. Now validating sizes from datanodes...");
+ AppendTestUtil.checkFullFile(dfs, filePath, size, buffer, fileStr);
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteRead.java Tue Jun 7 01:11:15 2011
@@ -23,7 +23,6 @@ import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
@@ -32,51 +31,50 @@ import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
public class TestWriteRead {
-
- // junit test settings
- private static final int WR_NTIMES = 4;
- private static final int WR_CHUNK_SIZE = 1000;
-
- private static final int BUFFER_SIZE = 8192 * 100;
+ // Junit test settings.
+ private static final int WR_NTIMES = 350;
+ private static final int WR_CHUNK_SIZE = 10000;
+
+ private static final int BUFFER_SIZE = 8192 * 100;
private static final String ROOT_DIR = "/tmp/";
-
- // command-line options
+
+ // command-line options. Different defaults for unit test vs real cluster
String filenameOption = ROOT_DIR + "fileX1";
int chunkSizeOption = 10000;
int loopOption = 10;
-
-
+
private MiniDFSCluster cluster;
- private Configuration conf; // = new HdfsConfiguration();
- private FileSystem mfs; // = cluster.getFileSystem();
- private FileContext mfc; // = FileContext.getFileContext();
-
- // configuration
- final boolean positionRead = false; // position read vs sequential read
- private boolean useFCOption = false; // use either FileSystem or FileContext
+ private Configuration conf; // = new HdfsConfiguration();
+ private FileSystem mfs; // = cluster.getFileSystem();
+ private FileContext mfc; // = FileContext.getFileContext();
+
+ // configuration
+ private boolean useFCOption = false; // use either FileSystem or FileContext
private boolean verboseOption = true;
+ private boolean positionReadOption = false;
+ private boolean truncateOption = false;
+ private boolean abortTestOnFailure = true;
static private Log LOG = LogFactory.getLog(TestWriteRead.class);
@Before
public void initJunitModeTest() throws Exception {
LOG.info("initJunitModeTest");
-
+
conf = new HdfsConfiguration();
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 100); //100K blocksize
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024 * 100); // 100K
+ // blocksize
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
-
+
mfs = cluster.getFileSystem();
mfc = FileContext.getFileContext();
@@ -91,10 +89,8 @@ public class TestWriteRead {
// Equivalence of @Before for cluster mode testing.
private void initClusterModeTest() throws IOException {
-
+
LOG = LogFactory.getLog(TestWriteRead.class);
- ((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.INFO);
- ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.INFO);
LOG.info("initClusterModeTest");
conf = new Configuration();
@@ -103,28 +99,42 @@ public class TestWriteRead {
}
/** Junit Test reading while writing. */
+
@Test
- public void TestWriteRead1() throws IOException {
+ public void testWriteReadSeq() throws IOException {
+ useFCOption = false;
+ positionReadOption = false;
String fname = filenameOption;
-
+
// need to run long enough to fail: takes 25 to 35 seec on Mac
int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
- Assert.assertTrue(stat == 0);
+ LOG.info("Summary status from test1: status= " + stat);
+ Assert.assertEquals(0, stat);
}
+ /** Junit Test position read while writing. */
+ @Test
+ public void testWriteReadPos() throws IOException {
+ String fname = filenameOption;
+ positionReadOption = true; // position read
+ int stat = testWriteAndRead(fname, WR_NTIMES, WR_CHUNK_SIZE);
+ Assert.assertEquals(0, stat);
+ }
+
+
// equivalent of TestWriteRead1
private int clusterTestWriteRead1() throws IOException {
- int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption);
+ int stat = testWriteAndRead(filenameOption, loopOption, chunkSizeOption);
return stat;
}
-
+
/**
- * Open the file to read from begin to end. Then close the file.
- * Return number of bytes read.
+ * Open the file to read from begin to end. Then close the file.
+ * Return number of bytes read.
* Support both sequential read and position read.
*/
private long readData(String fname, byte[] buffer, long byteExpected)
- throws IOException {
+ throws IOException {
long totalByteRead = 0;
long beginPosition = 0;
Path path = getFullyQualifiedPath(fname);
@@ -135,55 +145,68 @@ public class TestWriteRead {
long visibleLenFromReadStream = getVisibleFileLength(in);
+ if (visibleLenFromReadStream < byteExpected)
+ {
+ throw new IOException(visibleLenFromReadStream
+ + " = visibleLenFromReadStream < bytesExpected= "
+ + byteExpected);
+ }
+
totalByteRead = readUntilEnd(in, buffer, buffer.length, fname,
- beginPosition, visibleLenFromReadStream, positionRead);
+ beginPosition, visibleLenFromReadStream, positionReadOption);
in.close();
- return totalByteRead + beginPosition;
+ // reading more data than visibleLeng is OK, but not less
+ if (totalByteRead + beginPosition < byteExpected ){
+ throw new IOException("readData mismatch in byte read: expected="
+ + byteExpected + " ; got " + (totalByteRead + beginPosition));
+ }
+ return totalByteRead + beginPosition;
} catch (IOException e) {
throw new IOException("##### Caught Exception in readData. "
- + "Total Byte Read so far = " + totalByteRead
- + " beginPosition = " + beginPosition, e);
+ + "Total Byte Read so far = " + totalByteRead + " beginPosition = "
+ + beginPosition, e);
} finally {
- if (in != null)
+ if (in != null)
in.close();
}
}
/**
- * read chunks into buffer repeatedly until total of VisibleLen byte are read
+ * read chunks into buffer repeatedly until total of VisibleLen byte are read.
* Return total number of bytes read
*/
- private long readUntilEnd(FSDataInputStream in, byte[] buffer, long size,String fname,
- long pos, long visibleLen, boolean positionRead) throws IOException {
+ private long readUntilEnd(FSDataInputStream in, byte[] buffer, long size,
+ String fname, long pos, long visibleLen, boolean positionReadOption)
+ throws IOException {
- if (pos >= visibleLen || visibleLen <= 0 )
+ if (pos >= visibleLen || visibleLen <= 0)
return 0;
-
+
int chunkNumber = 0;
long totalByteRead = 0;
long currentPosition = pos;
int byteRead = 0;
long byteLeftToRead = visibleLen - pos;
int byteToReadThisRound = 0;
-
- if (!positionRead){
+
+ if (!positionReadOption) {
in.seek(pos);
currentPosition = in.getPos();
- }
+ }
if (verboseOption)
- LOG.info("reader begin: position: " + pos
- + " ; currentOffset = " + currentPosition + " ; bufferSize ="
- + buffer.length + " ; Filename = " + fname);
+ LOG.info("reader begin: position: " + pos + " ; currentOffset = "
+ + currentPosition + " ; bufferSize =" + buffer.length
+ + " ; Filename = " + fname);
try {
- while (byteLeftToRead > 0 && currentPosition < visibleLen ) {
- byteToReadThisRound = (int) (byteLeftToRead >= buffer.length ?
- buffer.length : byteLeftToRead);
- if (positionRead) {
+ while (byteLeftToRead > 0 && currentPosition < visibleLen) {
+ byteToReadThisRound = (int) (byteLeftToRead >= buffer.length
+ ? buffer.length : byteLeftToRead);
+ if (positionReadOption) {
byteRead = in.read(currentPosition, buffer, 0, byteToReadThisRound);
} else {
- byteRead = in.read(buffer, 0, byteToReadThisRound);
+ byteRead = in.read(buffer, 0, byteToReadThisRound);
}
if (byteRead <= 0)
break;
@@ -191,10 +214,10 @@ public class TestWriteRead {
totalByteRead += byteRead;
currentPosition += byteRead;
byteLeftToRead -= byteRead;
-
+
if (verboseOption) {
LOG.info("reader: Number of byte read: " + byteRead
- + " ; toatlByteRead = " + totalByteRead + " ; currentPosition="
+ + " ; totalByteRead = " + totalByteRead + " ; currentPosition="
+ currentPosition + " ; chunkNumber =" + chunkNumber
+ "; File name = " + fname);
}
@@ -202,21 +225,21 @@ public class TestWriteRead {
} catch (IOException e) {
throw new IOException(
"#### Exception caught in readUntilEnd: reader currentOffset = "
- + currentPosition + " ; totalByteRead =" + totalByteRead
- + " ; latest byteRead = " + byteRead + "; visibleLen= "
- + visibleLen + " ; bufferLen = " + buffer.length
- + " ; Filename = " + fname, e);
+ + currentPosition + " ; totalByteRead =" + totalByteRead
+ + " ; latest byteRead = " + byteRead + "; visibleLen= "
+ + visibleLen + " ; bufferLen = " + buffer.length
+ + " ; Filename = " + fname, e);
}
if (verboseOption)
- LOG.info("reader end: position: " + pos
- + " ; currentOffset = " + currentPosition + " ; totalByteRead ="
- + totalByteRead + " ; Filename = " + fname);
+ LOG.info("reader end: position: " + pos + " ; currentOffset = "
+ + currentPosition + " ; totalByteRead =" + totalByteRead
+ + " ; Filename = " + fname);
return totalByteRead;
}
- private int writeData(FSDataOutputStream out, byte[] buffer, int length)
+ private void writeData(FSDataOutputStream out, byte[] buffer, int length)
throws IOException {
int totalByteWritten = 0;
@@ -229,76 +252,97 @@ public class TestWriteRead {
totalByteWritten += toWriteThisRound;
remainToWrite -= toWriteThisRound;
}
- return totalByteWritten;
+ if (totalByteWritten != length) {
+ throw new IOException("WriteData: failure in write. Attempt to write "
+ + length + " ; written=" + totalByteWritten);
+ }
}
- /**
- * Common routine to do position read while open the file for write.
- * After each iteration of write, do a read of the file from begin to end.
+ /**
+ * Common routine to do position read while open the file for write.
+ * After each iteration of write, do a read of the file from begin to end.
* Return 0 on success, else number of failure.
*/
private int testWriteAndRead(String fname, int loopN, int chunkSize)
throws IOException {
-
+
int countOfFailures = 0;
long byteVisibleToRead = 0;
FSDataOutputStream out = null;
byte[] outBuffer = new byte[BUFFER_SIZE];
byte[] inBuffer = new byte[BUFFER_SIZE];
-
+
for (int i = 0; i < BUFFER_SIZE; i++) {
outBuffer[i] = (byte) (i & 0x00ff);
}
try {
Path path = getFullyQualifiedPath(fname);
+ long fileLengthBeforeOpen = 0;
- out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE)) :
- mfs.create(path);
+ if (ifExists(path)) {
+ if (truncateOption) {
+ out = useFCOption ? mfc.create(path,EnumSet.of(CreateFlag.OVERWRITE)):
+ mfs.create(path, truncateOption);
+ LOG.info("File already exists. File open with Truncate mode: "+ path);
+ } else {
+ out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.APPEND))
+ : mfs.append(path);
+ fileLengthBeforeOpen = getFileLengthFromNN(path);
+ LOG.info("File already exists of size " + fileLengthBeforeOpen
+ + " File open for Append mode: " + path);
+ }
+ } else {
+ out = useFCOption ? mfc.create(path, EnumSet.of(CreateFlag.CREATE))
+ : mfs.create(path);
+ }
- long totalByteWritten = 0;
- long totalByteVisible = 0;
+ long totalByteWritten = fileLengthBeforeOpen;
+ long totalByteVisible = fileLengthBeforeOpen;
long totalByteWrittenButNotVisible = 0;
- int byteWrittenThisTime;
boolean toFlush;
for (int i = 0; i < loopN; i++) {
toFlush = (i % 2) == 0;
- byteWrittenThisTime = writeData(out, outBuffer, chunkSize);
+ writeData(out, outBuffer, chunkSize);
- totalByteWritten += byteWrittenThisTime;
+ totalByteWritten += chunkSize;
if (toFlush) {
out.hflush();
- totalByteVisible += byteWrittenThisTime
- + totalByteWrittenButNotVisible;
+ totalByteVisible += chunkSize + totalByteWrittenButNotVisible;
totalByteWrittenButNotVisible = 0;
} else {
- totalByteWrittenButNotVisible += byteWrittenThisTime;
+ totalByteWrittenButNotVisible += chunkSize;
}
if (verboseOption) {
- LOG.info("TestReadWrite - Written " + byteWrittenThisTime
+ LOG.info("TestReadWrite - Written " + chunkSize
+ ". Total written = " + totalByteWritten
+ ". TotalByteVisible = " + totalByteVisible + " to file "
+ fname);
}
- byteVisibleToRead = readData(fname, inBuffer, totalByteVisible);
-
- String readmsg;
-
+ byteVisibleToRead = readData(fname, inBuffer, totalByteVisible);
+
+ String readmsg = "Written=" + totalByteWritten + " ; Expected Visible="
+ + totalByteVisible + " ; Got Visible=" + byteVisibleToRead
+ + " of file " + fname;
+
if (byteVisibleToRead >= totalByteVisible
&& byteVisibleToRead <= totalByteWritten) {
- readmsg = "pass: reader sees expected number of visible byte "
- + byteVisibleToRead + " of file " + fname + " [pass]";
+ readmsg = "pass: reader sees expected number of visible byte. "
+ + readmsg + " [pass]";
} else {
countOfFailures++;
- readmsg = "fail: reader does not see expected number of visible byte "
- + byteVisibleToRead + " of file " + fname + " [fail]";
- }
- LOG.info(readmsg);
+ readmsg = "fail: reader see different number of visible byte. "
+ + readmsg + " [fail]";
+ if (abortTestOnFailure) {
+ throw new IOException(readmsg);
+ }
+ }
+ LOG.info(readmsg);
}
// test the automatic flush after close
@@ -310,22 +354,35 @@ public class TestWriteRead {
out.close();
byteVisibleToRead = readData(fname, inBuffer, totalByteVisible);
- long lenFromFc = getFileLengthFromNN(path);
+ String readmsg2 = "Written=" + totalByteWritten + " ; Expected Visible="
+ + totalByteVisible + " ; Got Visible=" + byteVisibleToRead
+ + " of file " + fname;
String readmsg;
- if (byteVisibleToRead == totalByteVisible) {
- readmsg = "PASS: reader sees expected size of file " + fname
- + " after close. File Length from NN: " + lenFromFc + " [Pass]";
+
+ if (byteVisibleToRead >= totalByteVisible
+ && byteVisibleToRead <= totalByteWritten) {
+ readmsg = "pass: reader sees expected number of visible byte on close. "
+ + readmsg2 + " [pass]";
} else {
countOfFailures++;
- readmsg = "FAIL: reader sees is different size of file " + fname
- + " after close. File Length from NN: " + lenFromFc + " [Fail]";
+ readmsg = "fail: reader sees different number of visible byte on close. "
+ + readmsg2 + " [fail]";
+ LOG.info(readmsg);
+ if (abortTestOnFailure)
+ throw new IOException(readmsg);
}
- LOG.info(readmsg);
+ // now check if NN got the same length
+ long lenFromFc = getFileLengthFromNN(path);
+ if (lenFromFc != byteVisibleToRead){
+ readmsg = "fail: reader sees different number of visible byte from NN "
+ + readmsg2 + " [fail]";
+ throw new IOException(readmsg);
+ }
} catch (IOException e) {
throw new IOException(
- "##### Caught Exception in testAppendWriteAndRead. Close file. "
+ "##### Caught Exception in testAppendWriteAndRead. Close file. "
+ "Total Byte Read so far = " + byteVisibleToRead, e);
} finally {
if (out != null)
@@ -344,8 +401,8 @@ public class TestWriteRead {
// length of a file (path name) from NN.
private long getFileLengthFromNN(Path path) throws IOException {
- FileStatus fileStatus = useFCOption ?
- mfc.getFileStatus(path) : mfs.getFileStatus(path);
+ FileStatus fileStatus = useFCOption ? mfc.getFileStatus(path) :
+ mfs.getFileStatus(path);
return fileStatus.getLen();
}
@@ -359,54 +416,88 @@ public class TestWriteRead {
}
private Path getFullyQualifiedPath(String pathString) {
- return useFCOption ?
- mfc.makeQualified(new Path(ROOT_DIR, pathString)) :
- mfs.makeQualified(new Path(ROOT_DIR, pathString));
+ return useFCOption ? mfc.makeQualified(new Path(ROOT_DIR, pathString))
+ : mfs.makeQualified(new Path(ROOT_DIR, pathString));
}
- private void usage(){
- System.out.println("Usage: -chunkSize nn -loop ntime -f filename");
+ private void usage() {
+ LOG.info("Usage: [-useSeqRead | -usePosRead] [-append|truncate]"
+ + " -chunkSize nn -loop ntimes -f filename");
+ System.out.println("Usage: [-useSeqRead | -usePosRead] [-append|truncate]"
+ + " -chunkSize nn -loop ntimes -f filename");
+ System.out.println("Defaults: -chunkSize=10000, -loop=10, -f=/tmp/fileX1, "
+ + "use sequential read, use append mode if file already exists");
System.exit(0);
}
-
- private void getCmdLineOption(String[] args){
- for (int i = 0; i < args.length; i++){
+
+ private void dumpOptions() {
+ LOG.info(" Option setting: filenameOption = " + filenameOption);
+ LOG.info(" Option setting: chunkSizeOption = " + chunkSizeOption);
+ LOG.info(" Option setting: loopOption = " + loopOption);
+ LOG.info(" Option setting: posReadOption = " + positionReadOption);
+ LOG.info(" Option setting: truncateOption = " + truncateOption);
+ LOG.info(" Option setting: verboseOption = " + verboseOption);
+ }
+
+ private void getCmdLineOption(String[] args) {
+ for (int i = 0; i < args.length; i++) {
if (args[i].equals("-f")) {
filenameOption = args[++i];
- } else if (args[i].equals("-chunkSize")){
+ } else if (args[i].equals("-chunkSize")) {
chunkSizeOption = Integer.parseInt(args[++i]);
- } else if (args[i].equals("-loop")){
+ } else if (args[i].equals("-loop")) {
loopOption = Integer.parseInt(args[++i]);
- } else {
+ } else if (args[i].equals("-usePosRead")) {
+ positionReadOption = true;
+ } else if (args[i].equals("-useSeqRead")) {
+ positionReadOption = false;
+ } else if (args[i].equals("-truncate")) {
+ truncateOption = true;
+ } else if (args[i].equals("-append")) {
+ truncateOption = false;
+ } else if (args[i].equals("-verbose")) {
+ verboseOption = true;
+ } else if (args[i].equals("-noVerbose")) {
+ verboseOption = false;
+ } else {
usage();
}
}
+ if (verboseOption)
+ dumpOptions();
return;
}
/**
- * Entry point of the test when using a real cluster.
- * Usage: [-loop ntimes] [-chunkSize nn] [-f filename]
- * -loop: iterate ntimes: each iteration consists of a write, then a read
+ * Entry point of the test when using a real cluster.
+ * Usage: [-loop ntimes] [-chunkSize nn] [-f filename]
+ * [-useSeqRead |-usePosRead] [-append |-truncate] [-verbose |-noVerbose]
+ * -loop: iterate ntimes: each iteration consists of a write, then a read
* -chunkSize: number of byte for each write
- * -f filename: filename to write and read
- * Default: ntimes = 10; chunkSize = 10000; filename = /tmp/fileX1
+ * -f filename: filename to write and read
+ * [-useSeqRead | -usePosRead]: use Position Read, or default Sequential Read
+ * [-append | -truncate]: if file already exist, Truncate or default Append
+ * [-verbose | -noVerbose]: additional debugging messages if verbose is on
+ * Default: -loop = 10; -chunkSize = 10000; -f filename = /tmp/fileX1
+ * Use Sequential Read, Append Mode, verbose on.
*/
public static void main(String[] args) {
try {
TestWriteRead trw = new TestWriteRead();
trw.initClusterModeTest();
trw.getCmdLineOption(args);
+
int stat = trw.clusterTestWriteRead1();
-
- if (stat == 0){
- System.out.println("Status: clusterTestWriteRead1 test PASS");
+
+ if (stat == 0) {
+ System.out.println("Status: clusterTestWriteRead1 test PASS");
} else {
- System.out.println("Status: clusterTestWriteRead1 test FAIL");
+ System.out.println("Status: clusterTestWriteRead1 test FAIL with "
+ + stat + " failures");
}
System.exit(stat);
} catch (IOException e) {
- LOG.info("#### Exception in Main");
+ LOG.info("#### Exception in Main");
e.printStackTrace();
System.exit(-2);
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java Tue Jun 7 01:11:15 2011
@@ -36,7 +36,9 @@ import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -50,7 +52,6 @@ import org.apache.hadoop.hdfs.server.dat
import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.test.GenericTestUtils;
/**
* This class defines a number of static helper methods used by the
@@ -465,7 +466,7 @@ public class UpgradeUtilities {
public static void createBlockPoolVersionFile(File bpDir,
StorageInfo version, String bpid) throws IOException {
// Create block pool version files
- if (version.layoutVersion < Storage.LAST_PRE_FEDERATION_LAYOUT_VERSION) {
+ if (LayoutVersion.supports(Feature.FEDERATION, version.layoutVersion)) {
File bpCurDir = new File(bpDir, Storage.STORAGE_DIR_CURRENT);
BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(version,
bpid);
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/DataNodeAdapter.java Tue Jun 7 01:11:15 2011
@@ -37,4 +37,9 @@ public class DataNodeAdapter {
final long blkId) {
return ((FSDataset)dn.data).fetchReplicaInfo(bpid, blkId);
}
+
+ public static void setHeartbeatsDisabledForTests(DataNode dn,
+ boolean heartbeatsDisabledForTests) {
+ dn.setHeartbeatsDisabledForTests(heartbeatsDisabledForTests);
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java Tue Jun 7 01:11:15 2011
@@ -231,7 +231,8 @@ public class TestDirectoryScanner extend
fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();
CONF.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
parallelism);
- scanner = new DirectoryScanner(fds, CONF);
+ DataNode dn = cluster.getDataNodes().get(0);
+ scanner = new DirectoryScanner(dn, fds, CONF);
scanner.setRetainDiffs(true);
// Add files with 100 blocks
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java Tue Jun 7 01:11:15 2011
@@ -73,4 +73,8 @@ public class NameNodeAdapter {
ns.readUnlock();
return r;
}
+
+ public static String getLeaseHolderForPath(NameNode namenode, String path) {
+ return namenode.getNamesystem().leaseManager.getLeaseByPath(path).getHolder();
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java Tue Jun 7 01:11:15 2011
@@ -30,6 +30,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSImage;
@@ -38,6 +40,7 @@ import org.apache.hadoop.hdfs.server.nam
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -225,6 +228,28 @@ public class OfflineEditsViewerHelper {
// sync to disk, otherwise we parse partial edits
cluster.getNameNode().getFSImage().getEditLog().logSync();
+
+ // OP_REASSIGN_LEASE 22
+ String filePath = "/hard-lease-recovery-test";
+ byte[] bytes = "foo-bar-baz".getBytes();
+ DFSClientAdapter.stopLeaseRenewer(dfs.getClient());
+ FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
+ leaseRecoveryPath.write(bytes);
+ leaseRecoveryPath.hflush();
+ // Set the hard lease timeout to 1 second.
+ cluster.setLeasePeriod(60 * 1000, 1000);
+ // wait for lease recovery to complete
+ LocatedBlocks locatedBlocks;
+ do {
+ try {
+ Thread.sleep(1000);
+ } catch (InterruptedException e) {
+ LOG.info("Innocuous exception", e);
+ }
+ locatedBlocks = DFSClientAdapter.callGetBlockLocations(
+ cluster.getNameNode(), filePath, 0L, bytes.length);
+ } while (locatedBlocks.isUnderConstruction());
+
// Force a roll so we get an OP_END_LOG_SEGMENT txn
return cluster.getNameNode().rollEditLog();
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java Tue Jun 7 01:11:15 2011
@@ -62,8 +62,20 @@ public class TestAllowFormat {
throw new IOException("Could not delete hdfs directory '" + hdfsDir +
"'");
}
+
+ // Test has multiple name directories.
+ // Format should not really prompt us if one of the directories exist,
+ // but is empty. So in case the test hangs on an input, it means something
+ // could be wrong in the format prompting code. (HDFS-1636)
LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());
- config.set(DFS_NAMENODE_NAME_DIR_KEY, new File(hdfsDir, "name").getPath());
+ File nameDir1 = new File(hdfsDir, "name1");
+ File nameDir2 = new File(hdfsDir, "name2");
+
+ // To test multiple directory handling, we pre-create one of the name directories.
+ nameDir1.mkdirs();
+
+ // Set multiple name directories.
+ config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
config.set(DFS_DATANODE_DATA_DIR_KEY, new File(hdfsDir, "data").getPath());
config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Jun 7 01:11:15 2011
@@ -506,5 +506,40 @@ public class TestFsck extends TestCase {
if (cluster != null) {cluster.shutdown();}
}
}
+
+ /**
+ * Test for checking fsck command on illegal arguments should print the proper
+ * usage.
+ */
+ public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
+ MiniDFSCluster cluster = null;
+ try {
+ // bring up a one-node cluster
+ Configuration conf = new HdfsConfiguration();
+ cluster = new MiniDFSCluster.Builder(conf).build();
+ String fileName = "/test.txt";
+ Path filePath = new Path(fileName);
+ FileSystem fs = cluster.getFileSystem();
+ // create a one-block file
+ DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
+ DFSTestUtil.waitReplication(fs, filePath, (short) 1);
+
+ // passing illegal option
+ String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
+ System.out.println(outStr);
+ assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+ // passing multiple paths are arguments
+ outStr = runFsck(conf, -1, true, "/", fileName);
+ System.out.println(outStr);
+ assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+ // clean up file system
+ fs.delete(filePath, true);
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
}
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Tue Jun 7 01:11:15 2011
@@ -46,7 +46,7 @@ public class TestNameNodeMetrics extends
private static final Configuration CONF = new HdfsConfiguration();
private static final int DFS_REPLICATION_INTERVAL = 1;
private static final Path TEST_ROOT_DIR_PATH =
- new Path(System.getProperty("test.build.data", "build/test/data"));
+ new Path("/testNameNodeMetrics");
private static final String NN_METRICS = "NameNodeActivity";
private static final String NS_METRICS = "FSNamesystem";
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java Tue Jun 7 01:11:15 2011
@@ -45,7 +45,7 @@ import org.junit.Test;
*/
public class TestGetConf {
enum TestType {
- NAMENODE, BACKUP, SECONDARY
+ NAMENODE, BACKUP, SECONDARY, NNRPCADDRESSES
}
/** Setup federation nameServiceIds in the configuration */
@@ -103,6 +103,8 @@ public class TestGetConf {
return DFSUtil.getBackupNodeAddresses(conf);
case SECONDARY:
return DFSUtil.getSecondaryNameNodeAddresses(conf);
+ case NNRPCADDRESSES:
+ return DFSUtil.getNNServiceRpcAddresses(conf);
}
return null;
}
@@ -140,6 +142,9 @@ public class TestGetConf {
case SECONDARY:
args[0] = Command.SECONDARY.getName();
break;
+ case NNRPCADDRESSES:
+ args[0] = Command.NNRPCADDRESSES.getName();
+ break;
}
return runTool(conf, args, success);
}
@@ -147,9 +152,16 @@ public class TestGetConf {
/**
* Using {@link GetConf} methods get the list of given {@code type} of
* addresses
+ *
+ * @param type, TestType
+ * @param conf, configuration
+ * @param checkPort, If checkPort is true, verify NNPRCADDRESSES whose
+ * expected value is hostname:rpc-port. If checkPort is false, the
+ * expected is hostname only.
+ * @param expected, expected addresses
*/
private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
- List<InetSocketAddress> expected) throws Exception {
+ boolean checkPort, List<InetSocketAddress> expected) throws Exception {
String out = getAddressListFromTool(type, conf, expected.size() != 0);
List<String> values = new ArrayList<String>();
@@ -165,15 +177,19 @@ public class TestGetConf {
int i = 0;
String[] expectedHosts = new String[expected.size()];
for (InetSocketAddress addr : expected) {
- expectedHosts[i++] = addr.getHostName();
+ if (!checkPort) {
+ expectedHosts[i++] = addr.getHostName();
+ }else {
+ expectedHosts[i++] = addr.getHostName()+":"+addr.getPort();
+ }
}
// Compare two arrays
assertTrue(Arrays.equals(expectedHosts, actual));
}
-
+
private void verifyAddresses(HdfsConfiguration conf, TestType type,
- String... expected) throws Exception {
+ boolean checkPort, String... expected) throws Exception {
// Ensure DFSUtil returned the right set of addresses
List<InetSocketAddress> list = getAddressListFromConf(type, conf);
String[] actual = toStringArray(list);
@@ -182,7 +198,7 @@ public class TestGetConf {
assertTrue(Arrays.equals(expected, actual));
// Test GetConf returned addresses
- getAddressListFromTool(type, conf, list);
+ getAddressListFromTool(type, conf, checkPort, list);
}
private static String getNameServiceId(int index) {
@@ -199,6 +215,7 @@ public class TestGetConf {
getAddressListFromTool(TestType.NAMENODE, conf, false);
System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
getAddressListFromTool(TestType.SECONDARY, conf, false);
+ getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
for (Command cmd : Command.values()) {
CommandHandler handler = Command.getHandler(cmd.getName());
if (handler.key != null) {
@@ -230,26 +247,29 @@ public class TestGetConf {
// Returned namenode address should match default address
conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match backupnode RPC address
- conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.BACKUP, "localhost:1001");
+ conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY,"localhost:1001");
+ verifyAddresses(conf, TestType.BACKUP, false, "localhost:1001");
// Returned address should match secondary http address
conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "localhost:1002");
- verifyAddresses(conf, TestType.SECONDARY, "localhost:1002");
+ verifyAddresses(conf, TestType.SECONDARY, false, "localhost:1002");
// Returned namenode address should match service RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:1000");
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1000");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1000");
// Returned address should match RPC address
conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
- verifyAddresses(conf, TestType.NAMENODE, "localhost:1001");
+ verifyAddresses(conf, TestType.NAMENODE, false, "localhost:1001");
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, "localhost:1001");
}
/**
@@ -272,13 +292,15 @@ public class TestGetConf {
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
String[] secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
- verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
- verifyAddresses(conf, TestType.BACKUP, backupAddresses);
- verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
-
- // Test to ensure namenode, backup and secondary namenode addresses are
- // returned from federation configuration. Returned namenode addresses are
- // based on regular RPC address in the absence of service RPC address
+ verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+ verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+ verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
+
+ // Test to ensure namenode, backup, secondary namenode addresses and
+ // namenode rpc addresses are returned from federation configuration.
+ // Returned namenode addresses are based on regular RPC address
+ // in the absence of service RPC address.
conf = new HdfsConfiguration(false);
setupNameServices(conf, nsCount);
nnAddresses = setupAddress(conf,
@@ -287,14 +309,15 @@ public class TestGetConf {
DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
secondaryAddresses = setupAddress(conf,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
- verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
- verifyAddresses(conf, TestType.BACKUP, backupAddresses);
- verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
+ verifyAddresses(conf, TestType.NAMENODE, false, nnAddresses);
+ verifyAddresses(conf, TestType.BACKUP, false, backupAddresses);
+ verifyAddresses(conf, TestType.SECONDARY, false, secondaryAddresses);
+ verifyAddresses(conf, TestType.NNRPCADDRESSES, true, nnAddresses);
}
/**
- * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP}
- * and {@link Command#SECONDARY}
+ * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
+ * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
*/
public void testTool() throws Exception {
HdfsConfiguration conf = new HdfsConfiguration(false);
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
Binary files - no diff available.
Modified: hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/test/hdfs/org/apache/hadoop/hdfs/tools/offlineEditsViewer/editsStored.xml Tue Jun 7 01:11:15 2011
@@ -1,8 +1,8 @@
<?xml version="1.0"?>
<EDITS>
- <EDITS_VERSION>-37</EDITS_VERSION>
+ <EDITS_VERSION>-38</EDITS_VERSION>
<RECORD>
- <OPCODE>23</OPCODE>
+ <OPCODE>24</OPCODE>
<DATA>
<TRANSACTION_ID>1</TRANSACTION_ID>
</DATA>
@@ -490,9 +490,40 @@
<CHECKSUM>1382094146</CHECKSUM>
</RECORD>
<RECORD>
- <OPCODE>22</OPCODE>
+ <OPCODE>0</OPCODE>
<DATA>
<TRANSACTION_ID>33</TRANSACTION_ID>
+ <LENGTH>5</LENGTH>
+ <PATH>/reassign-lease-test</PATH>
+ <REPLICATION>1</REPLICATION>
+ <MTIME>1286491964741</MTIME>
+ <ATIME>1286491964741</ATIME>
+ <BLOCKSIZE>512</BLOCKSIZE>
+ <NUMBLOCKS>0</NUMBLOCKS>
+ <PERMISSION_STATUS>
+ <USERNAME>atm</USERNAME>
+ <GROUPNAME>supergroup</GROUPNAME>
+ <FS_PERMISSIONS>420</FS_PERMISSIONS>
+ </PERMISSION_STATUS>
+ <CLIENT_NAME>DFSClient_871171074</CLIENT_NAME>
+ <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+ </DATA>
+ <CHECKSUM>1975140107</CHECKSUM>
+ </RECORD>
+ <RECORD>
+ <OPCODE>22</OPCODE>
+ <DATA>
+ <TRANSACTION_ID>34</TRANSACTION_ID>
+ <CLIENT_NAME>DFSClient_871171074</CLIENT_NAME>
+ <PATH>/reassign-lease-test</PATH>
+ <CLIENT_NAME>HDFS_NameNode</CLIENT_NAME>
+ </DATA>
+ <CHECKSUM>1975140107</CHECKSUM>
+ </RECORD>
+ <RECORD>
+ <OPCODE>23</OPCODE>
+ <DATA>
+ <TRANSACTION_ID>35</TRANSACTION_ID>
</DATA>
<CHECKSUM>1975140107</CHECKSUM>
</RECORD>
Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun 7 01:11:15 2011
@@ -3,4 +3,4 @@
/hadoop/hdfs/branches/HDFS-1052/src/webapps/datanode:987665-1095512
/hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/datanode:820487
-/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1128452
+/hadoop/hdfs/trunk/src/webapps/datanode:1086482-1132839
Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun 7 01:11:15 2011
@@ -3,4 +3,4 @@
/hadoop/hdfs/branches/HDFS-1052/src/webapps/hdfs:987665-1095512
/hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs:820487
-/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1128452
+/hadoop/hdfs/trunk/src/webapps/hdfs:1086482-1132839
Modified: hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsclusterhealth.xsl
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsclusterhealth.xsl?rev=1132846&r1=1132845&r2=1132846&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsclusterhealth.xsl (original)
+++ hadoop/hdfs/branches/HDFS-1073/src/webapps/hdfs/dfsclusterhealth.xsl Tue Jun 7 01:11:15 2011
@@ -29,6 +29,7 @@
<html>
<head>
<link rel="stylesheet" type="text/css" href="static/hadoop.css" />
+ <style type="text/css">th,span {width:8em;}</style>
<title>
Hadoop cluster
<xsl:value-of select="cluster/@clusterId" />
@@ -102,7 +103,7 @@
<thead>
<xsl:for-each select="cluster/namenodes/node[1]/item">
<th>
- <xsl:value-of select="@label" />
+ <SPAN><xsl:value-of select="@label" /></SPAN>
</th>
</xsl:for-each>
</thead>
Propchange: hadoop/hdfs/branches/HDFS-1073/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Jun 7 01:11:15 2011
@@ -3,4 +3,4 @@
/hadoop/hdfs/branches/HDFS-1052/src/webapps/secondary:987665-1095512
/hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
/hadoop/hdfs/branches/branch-0.21/src/webapps/secondary:820487
-/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1128452
+/hadoop/hdfs/trunk/src/webapps/secondary:1086482-1132839