You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by ha...@apache.org on 2009/10/01 00:57:34 UTC
svn commit: r820487 [6/6] - in /hadoop/hdfs/branches/branch-0.21: ./
.eclipse.templates/.launches/ lib/ src/contrib/block_forensics/
src/contrib/block_forensics/client/ src/contrib/block_forensics/ivy/
src/contrib/block_forensics/src/ src/contrib/block...
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java?rev=820487&r1=820486&r2=820487&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java Wed Sep 30 22:57:30 2009
@@ -21,33 +21,30 @@
import java.util.List;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.junit.Assert;
+import org.junit.Test;
/**
* This tests InterDataNodeProtocol for block handling.
*/
public class TestInterDatanodeProtocol extends junit.framework.TestCase {
- public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
- DataBlockScanner scanner) throws IOException {
- BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
+ public static void checkMetaInfo(Block b, DataNode dn) throws IOException {
+ Block metainfo = dn.data.getStoredBlock(b.getBlockId());
assertEquals(b.getBlockId(), metainfo.getBlockId());
assertEquals(b.getNumBytes(), metainfo.getNumBytes());
- if (scanner != null) {
- assertEquals(scanner.getLastScanTime(b),
- metainfo.getLastScanTime());
- }
}
public static LocatedBlock getLastLocatedBlock(
@@ -99,16 +96,155 @@
//verify BlockMetaDataInfo
Block b = locatedblock.getBlock();
InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
- checkMetaInfo(b, idp, datanode.blockScanner);
+ checkMetaInfo(b, datanode);
+ long recoveryId = b.getGenerationStamp() + 1;
+ idp.initReplicaRecovery(
+ new RecoveringBlock(b, locatedblock.getLocations(), recoveryId));
//verify updateBlock
Block newblock = new Block(
b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
- idp.updateBlock(b, newblock, false);
- checkMetaInfo(newblock, idp, datanode.blockScanner);
+ idp.updateReplicaUnderRecovery(b, recoveryId, newblock.getNumBytes());
+ checkMetaInfo(newblock, datanode);
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
+
+ private static ReplicaInfo createReplicaInfo(Block b) {
+ return new ReplicaBeingWritten(b.getBlockId(), b.getGenerationStamp(),
+ null, null);
+ }
+
+ private static void assertEquals(ReplicaInfo originalInfo, ReplicaRecoveryInfo recoveryInfo) {
+ Assert.assertEquals(originalInfo.getBlockId(), recoveryInfo.getBlockId());
+ Assert.assertEquals(originalInfo.getGenerationStamp(), recoveryInfo.getGenerationStamp());
+ Assert.assertEquals(originalInfo.getBytesOnDisk(), recoveryInfo.getNumBytes());
+ Assert.assertEquals(originalInfo.getState(), recoveryInfo.getOriginalReplicaState());
+ }
+
+ /** Test {@link FSDataset#initReplicaRecovery(ReplicasMap, Block, long)} */
+ @Test
+ public void testInitReplicaRecovery() throws IOException {
+ final long firstblockid = 10000L;
+ final long gs = 7777L;
+ final long length = 22L;
+ final ReplicasMap map = new ReplicasMap();
+ final Block[] blocks = new Block[5];
+ for(int i = 0; i < blocks.length; i++) {
+ blocks[i] = new Block(firstblockid + i, length, gs);
+ map.add(createReplicaInfo(blocks[i]));
+ }
+
+ {
+ //normal case
+ final Block b = blocks[0];
+ final ReplicaInfo originalInfo = map.get(b);
+
+ final long recoveryid = gs + 1;
+ final ReplicaRecoveryInfo recoveryInfo = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid);
+ assertEquals(originalInfo, recoveryInfo);
+
+ final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery)map.get(b);
+ Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
+ Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
+
+ //recover one more time
+ final long recoveryid2 = gs + 2;
+ final ReplicaRecoveryInfo recoveryInfo2 = FSDataset.initReplicaRecovery(map, blocks[0], recoveryid2);
+ assertEquals(originalInfo, recoveryInfo2);
+
+ final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery)map.get(b);
+ Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
+ Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
+
+ //case RecoveryInProgressException
+ try {
+ FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.fail();
+ }
+ catch(RecoveryInProgressException ripe) {
+ System.out.println("GOOD: getting " + ripe);
+ }
+ }
+
+ { //replica not found
+ final long recoveryid = gs + 1;
+ final Block b = new Block(firstblockid - 1, length, gs);
+ ReplicaRecoveryInfo r = FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.assertNull("Data-node should not have this replica.", r);
+ }
+
+ { //case "THIS IS NOT SUPPOSED TO HAPPEN"
+ final long recoveryid = gs - 1;
+ final Block b = new Block(firstblockid + 1, length, gs);
+ try {
+ FSDataset.initReplicaRecovery(map, b, recoveryid);
+ Assert.fail();
+ }
+ catch(IOException ioe) {
+ System.out.println("GOOD: getting " + ioe);
+ }
+ }
+
+ }
+
+ /** Test {@link FSDataset#updateReplicaUnderRecovery(ReplicaUnderRecovery, long, long)} */
+ @Test
+ public void testUpdateReplicaUnderRecovery() throws IOException {
+ final Configuration conf = new Configuration();
+ MiniDFSCluster cluster = null;
+
+ try {
+ cluster = new MiniDFSCluster(conf, 3, true, null);
+ cluster.waitActive();
+
+ //create a file
+ DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+ String filestr = "/foo";
+ Path filepath = new Path(filestr);
+ DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
+
+ //get block info
+ final LocatedBlock locatedblock = getLastLocatedBlock(
+ dfs.getClient().getNamenode(), filestr);
+ final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
+ Assert.assertTrue(datanodeinfo.length > 0);
+
+ //get DataNode and FSDataset objects
+ final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
+ Assert.assertTrue(datanode != null);
+ Assert.assertTrue(datanode.data instanceof FSDataset);
+ final FSDataset fsdataset = (FSDataset)datanode.data;
+
+ //initReplicaRecovery
+ final Block b = locatedblock.getBlock();
+ final long recoveryid = b.getGenerationStamp() + 1;
+ final long newlength = b.getNumBytes() - 1;
+ FSDataset.initReplicaRecovery(fsdataset.volumeMap, b, recoveryid);
+
+ //check replica
+ final ReplicaInfo replica = fsdataset.getReplica(b.getBlockId());
+ Assert.assertTrue(replica instanceof ReplicaUnderRecovery);
+ final ReplicaUnderRecovery rur = (ReplicaUnderRecovery)replica;
+
+ //check meta data before update
+ FSDataset.checkReplicaFiles(rur);
+
+ //update
+ final ReplicaInfo finalized =
+ (ReplicaInfo)fsdataset.updateReplicaUnderRecovery(
+ rur, recoveryid, newlength);
+
+ //check meta data after update
+ FSDataset.checkReplicaFiles(finalized);
+ Assert.assertEquals(b.getBlockId(), finalized.getBlockId());
+ Assert.assertEquals(recoveryid, finalized.getGenerationStamp());
+ Assert.assertEquals(newlength, finalized.getNumBytes());
+
+ } finally {
+ if (cluster != null) cluster.shutdown();
+ }
+ }
}
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java?rev=820487&r1=820486&r2=820487&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java Wed Sep 30 22:57:30 2009
@@ -25,8 +25,10 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams;
import org.apache.hadoop.util.DataChecksum;
/**
@@ -62,14 +64,19 @@
int bytesAdded = 0;
for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
- OutputStream dataOut = fsdataset.writeToBlock(b, false).dataOut;
- assertEquals(0, fsdataset.getLength(b));
- for (int j=1; j <= blockIdToLen(i); ++j) {
- dataOut.write(j);
- assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
- bytesAdded++;
+ ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
+ BlockWriteStreams out = bInfo.createStreams();
+ try {
+ OutputStream dataOut = out.dataOut;
+ assertEquals(0, fsdataset.getLength(b));
+ for (int j=1; j <= blockIdToLen(i); ++j) {
+ dataOut.write(j);
+ assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
+ bytesAdded++;
+ }
+ } finally {
+ out.close();
}
- dataOut.close();
b.setNumBytes(blockIdToLen(i));
fsdataset.finalizeBlock(b);
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
@@ -139,24 +146,24 @@
public void testGetBlockReport() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
}
public void testInjectionEmpty() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -169,7 +176,7 @@
SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
sfsdataset.injectBlocks(blockReport);
blockReport = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -180,13 +187,13 @@
}
public void testInjectionNonEmpty() throws IOException {
- FSDatasetInterface fsdataset = new SimulatedFSDataset(conf);
+ SimulatedFSDataset fsdataset = new SimulatedFSDataset(conf);
- Block[] blockReport = fsdataset.getBlockReport();
- assertEquals(0, blockReport.length);
+ BlockListAsLongs blockReport = fsdataset.getBlockReport();
+ assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
@@ -201,13 +208,13 @@
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
- Block[] blockReport2 = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ BlockListAsLongs blockReport2 = sfsdataset.getBlockReport();
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
blockReport2 = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS, blockReport.length);
+ assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(blockReport);
blockReport = sfsdataset.getBlockReport();
- assertEquals(NUMBLOCKS*2, blockReport.length);
+ assertEquals(NUMBLOCKS*2, blockReport.getNumberOfBlocks());
for (Block b: blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=820487&r1=820486&r2=820487&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java Wed Sep 30 22:57:30 2009
@@ -519,7 +519,8 @@
.of(CreateFlag.OVERWRITE)), true, replication, BLOCK_SIZE);
long end = System.currentTimeMillis();
for(boolean written = !closeUponCreate; !written;
- written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
+ written = nameNode.complete(fileNames[daemonId][inputIdx],
+ clientName, null));
return end-start;
}
@@ -685,8 +686,9 @@
NamespaceInfo nsInfo;
DatanodeRegistration dnRegistration;
- Block[] blocks;
+ ArrayList<Block> blocks;
int nrBlocks; // actual number of blocks
+ long[] blockReportList;
/**
* Get data-node in the form
@@ -705,7 +707,7 @@
TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
dnRegistration = new DatanodeRegistration(getNodeName(dnIdx));
- this.blocks = new Block[blockCapacity];
+ this.blocks = new ArrayList<Block>(blockCapacity);
this.nrBlocks = 0;
}
@@ -738,19 +740,24 @@
}
boolean addBlock(Block blk) {
- if(nrBlocks == blocks.length) {
- LOG.debug("Cannot add block: datanode capacity = " + blocks.length);
+ if(nrBlocks == blocks.size()) {
+ LOG.debug("Cannot add block: datanode capacity = " + blocks.size());
return false;
}
- blocks[nrBlocks] = blk;
+ blocks.set(nrBlocks, blk);
nrBlocks++;
return true;
}
void formBlockReport() {
// fill remaining slots with blocks that do not exist
- for(int idx = blocks.length-1; idx >= nrBlocks; idx--)
- blocks[idx] = new Block(blocks.length - idx, 0, 0);
+ for(int idx = blocks.size()-1; idx >= nrBlocks; idx--)
+ blocks.set(idx, new Block(blocks.size() - idx, 0, 0));
+ blockReportList = new BlockListAsLongs(blocks,null).getBlockListAsLongs();
+ }
+
+ long[] getBlockReportList() {
+ return blockReportList;
}
public int compareTo(String name) {
@@ -760,6 +767,7 @@
/**
* Send a heartbeat to the name-node and replicate blocks if requested.
*/
+ @SuppressWarnings("unused") // keep it for future blockReceived benchmark
int replicateBlocks() throws IOException {
// register datanode
DatanodeCommand[] cmds = nameNode.sendHeartbeat(
@@ -889,8 +897,8 @@
nameNode.create(fileName, FsPermission.getDefault(), clientName,
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.OVERWRITE)), true, replication,
BLOCK_SIZE);
- addBlocks(fileName, clientName);
- nameNode.complete(fileName, clientName);
+ Block lastBlock = addBlocks(fileName, clientName);
+ nameNode.complete(fileName, clientName, lastBlock);
}
// prepare block reports
for(int idx=0; idx < nrDatanodes; idx++) {
@@ -898,9 +906,12 @@
}
}
- private void addBlocks(String fileName, String clientName) throws IOException {
+ private Block addBlocks(String fileName, String clientName)
+ throws IOException {
+ Block prevBlock = null;
for(int jdx = 0; jdx < blocksPerFile; jdx++) {
- LocatedBlock loc = nameNode.addBlock(fileName, clientName);
+ LocatedBlock loc = nameNode.addBlock(fileName, clientName, prevBlock);
+ prevBlock = loc.getBlock();
for(DatanodeInfo dnInfo : loc.getLocations()) {
int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
datanodes[dnIdx].addBlock(loc.getBlock());
@@ -910,6 +921,7 @@
new String[] {""});
}
}
+ return prevBlock;
}
/**
@@ -923,8 +935,7 @@
assert daemonId < numThreads : "Wrong daemonId.";
TinyDatanode dn = datanodes[daemonId];
long start = System.currentTimeMillis();
- nameNode.blockReport(dn.dnRegistration,
- BlockListAsLongs.convertToArrayLongs(dn.blocks));
+ nameNode.blockReport(dn.dnRegistration, dn.getBlockReportList());
long end = System.currentTimeMillis();
return end-start;
}
Propchange: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Propchange: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestAccessTokenWithDFS.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=820487&r1=820486&r2=820487&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Wed Sep 30 22:57:30 2009
@@ -182,7 +182,8 @@
File baseDir = new File(System.getProperty("test.build.data",
"build/test/data"),"dfs/data");
for (int i=0; i<8; i++) {
- File blockFile = new File(baseDir, "data" +(i+1)+ "/current/" + block);
+ File blockFile = new File(baseDir, "data" +(i+1) +
+ MiniDFSCluster.FINALIZED_DIR_NAME + block);
if(blockFile.exists()) {
assertTrue(blockFile.delete());
}
@@ -294,8 +295,8 @@
File baseDir = new File(System.getProperty("test.build.data",
"build/test/data"),"dfs/data");
for (int i=0; i < 6; i++) {
- File blockFile = new File(baseDir, "data" + (i+1) + "/current/" +
- block);
+ File blockFile = new File(baseDir, "data" + (i+1) +
+ MiniDFSCluster.FINALIZED_DIR_NAME + block);
if (blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
Propchange: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
------------------------------------------------------------------------------
svn:mime-type = text/plain
Modified: hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=820487&r1=820486&r2=820487&view=diff
==============================================================================
--- hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/branch-0.21/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Wed Sep 30 22:57:30 2009
@@ -56,7 +56,8 @@
DataNodeProperties dnProps = cluster.stopDataNode(0);
// remove block scanner log to trigger block scanning
File scanLog = new File(System.getProperty("test.build.data"),
- "dfs/data/data1/current/dncp_block_verification.log.curr");
+ "dfs/data/data1" + MiniDFSCluster.FINALIZED_DIR_NAME +
+ "dncp_block_verification.log.curr");
//wait for one minute for deletion to succeed;
for(int i=0; !scanLog.delete(); i++) {
assertTrue("Could not delete log file in one minute", i < 60);
Propchange: hadoop/hdfs/branches/branch-0.21/src/webapps/datanode/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 30 22:57:30 2009
@@ -1,3 +1,4 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/datanode:713112
/hadoop/core/trunk/src/webapps/datanode:776175-784663
+/hadoop/hdfs/branches/HDFS-265/src/webapps/datanode:796829-820463
/hadoop/hdfs/trunk/src/webapps/datanode:818294-818298
Propchange: hadoop/hdfs/branches/branch-0.21/src/webapps/hdfs/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 30 22:57:30 2009
@@ -1,3 +1,4 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/hdfs:713112
/hadoop/core/trunk/src/webapps/hdfs:776175-784663
+/hadoop/hdfs/branches/HDFS-265/src/webapps/hdfs:796829-820463
/hadoop/hdfs/trunk/src/webapps/hdfs:818294-818298
Propchange: hadoop/hdfs/branches/branch-0.21/src/webapps/secondary/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Wed Sep 30 22:57:30 2009
@@ -1,3 +1,4 @@
/hadoop/core/branches/branch-0.19/hdfs/src/webapps/secondary:713112
/hadoop/core/trunk/src/webapps/secondary:776175-784663
+/hadoop/hdfs/branches/HDFS-265/src/webapps/secondary:796829-820463
/hadoop/hdfs/trunk/src/webapps/secondary:818294-818298