You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2008/05/14 08:59:47 UTC
svn commit: r656122 [2/2] - in /hadoop/core/trunk: ./
src/java/org/apache/hadoop/dfs/ src/java/org/apache/hadoop/fs/
src/java/org/apache/hadoop/io/ src/test/org/apache/hadoop/dfs/
src/webapps/datanode/
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NameNode.java Tue May 13 23:59:46 2008
@@ -321,7 +321,7 @@
public void abandonBlock(Block b, String src, String holder
) throws IOException {
stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
- +b.getBlockName()+" of file "+src);
+ +b+" of file "+src);
if (!namesystem.abandonBlock(b, src, holder)) {
throw new IOException("Cannot abandon block during write to " + src);
}
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/NamenodeFsck.java Tue May 13 23:59:46 2008
@@ -191,7 +191,7 @@
int i = 0;
for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
Block block = lBlk.getBlock();
- String blkName = block.getBlockName();
+ String blkName = block.toString();
DatanodeInfo[] locs = lBlk.getLocations();
res.totalReplicas += locs.length;
short targetFileReplication = file.getReplication();
@@ -208,7 +208,7 @@
if (!showFiles) {
out.print("\n" + path + ": ");
}
- out.println(" Under replicated " + block.getBlockName() +
+ out.println(" Under replicated " + block +
". Target Replicas is " +
targetFileReplication + " but found " +
locs.length + " replica(s).");
@@ -225,14 +225,14 @@
out.print(path + ": ");
}
out.println(" Replica placement policy is violated for " +
- block.getBlockName() +
+ block +
". Block should be additionally replicated on " +
missingRacks + " more rack(s).");
}
report.append(i + ". " + blkName + " len=" + block.getNumBytes());
if (locs.length == 0) {
report.append(" MISSING!");
- res.addMissing(block.getBlockName(), block.getNumBytes());
+ res.addMissing(block.toString(), block.getNumBytes());
missing++;
missize += block.getNumBytes();
} else {
@@ -332,7 +332,7 @@
} catch (Exception e) {
e.printStackTrace();
// something went wrong copying this block...
- LOG.warn(" - could not copy block " + lblock.getBlock().getBlockName() + " to " + target);
+ LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
fos.flush();
fos.close();
fos = null;
@@ -388,7 +388,9 @@
blockReader =
DFSClient.BlockReader.newBlockReader(s, targetAddr.toString() + ":" +
block.getBlockId(),
- block.getBlockId(), 0, -1,
+ block.getBlockId(),
+ block.getGenerationStamp(),
+ 0, -1,
conf.getInt("io.file.buffer.size", 4096));
} catch (IOException ex) {
@@ -405,7 +407,7 @@
}
}
if (blockReader == null) {
- throw new Exception("Could not open data stream for " + lblock.getBlock().getBlockName());
+ throw new Exception("Could not open data stream for " + lblock.getBlock());
}
byte[] buf = new byte[1024];
int cnt = 0;
@@ -427,7 +429,7 @@
try {s.close(); } catch (Exception e1) {}
}
if (!success)
- throw new Exception("Could not copy block data for " + lblock.getBlock().getBlockName());
+ throw new Exception("Could not copy block data for " + lblock.getBlock());
}
/*
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UnderReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UnderReplicatedBlocks.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UnderReplicatedBlocks.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UnderReplicatedBlocks.java Tue May 13 23:59:46 2008
@@ -105,7 +105,7 @@
if(priLevel != LEVEL && priorityQueues.get(priLevel).add(block)) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add:"
- + block.getBlockName()
+ + block
+ " has only "+curReplicas
+ " replicas and need " + expectedReplicas
+ " replicas so is added to neededReplications"
@@ -132,7 +132,7 @@
&& priorityQueues.get(priLevel).remove(block)) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
- + "Removing block " + block.getBlockName()
+ + "Removing block " + block
+ " from priority queue "+ priLevel);
return true;
} else {
@@ -140,7 +140,7 @@
if(i!=priLevel && priorityQueues.get(i).remove(block)) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.remove: "
- + "Removing block " + block.getBlockName()
+ + "Removing block " + block
+ " from priority queue "+ i);
return true;
}
@@ -173,7 +173,7 @@
&& priorityQueues.get(curPri).add(block)) {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.update:"
- + block.getBlockName()
+ + block
+ " has only "+curReplicas
+ " replicas and need " + curExpectedReplicas
+ " replicas so is added to neededReplications"
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeManager.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeManager.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeManager.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeManager.java Tue May 13 23:59:46 2008
@@ -73,6 +73,13 @@
return true;
}
+ boolean isUpgradeCompleted() {
+ if (currentUpgrades == null) {
+ return true;
+ }
+ return false;
+ }
+
abstract FSConstants.NodeType getType();
abstract boolean startUpgrade() throws IOException;
abstract void completeUpgrade() throws IOException;
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/UpgradeObjectCollection.java Tue May 13 23:59:46 2008
@@ -33,6 +33,8 @@
initialize();
// Registered distributed upgrade objects here
// registerUpgrade(new UpgradeObject());
+ registerUpgrade(new GenerationStampUpgradeNamenode());
+ registerUpgrade(new GenerationStampUpgradeDatanode());
}
static class UOSignature implements Comparable<UOSignature> {
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/fs/FileUtil.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/fs/FileUtil.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/fs/FileUtil.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/fs/FileUtil.java Tue May 13 23:59:46 2008
@@ -615,9 +615,13 @@
return Integer.parseInt(inpMsg);
}
} catch (NumberFormatException e) {
- throw new IOException(StringUtils.stringifyException(e) + inpMsg + errMsg);
+ throw new IOException(StringUtils.stringifyException(e) +
+ inpMsg + errMsg +
+ " on file:" + fileName);
} catch (InterruptedException e) {
- throw new IOException(StringUtils.stringifyException(e) + inpMsg + errMsg);
+ throw new IOException(StringUtils.stringifyException(e) +
+ inpMsg + errMsg +
+ " on file:" + fileName);
} finally {
process.destroy();
if (in != null) in.close();
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/io/ObjectWritable.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/io/ObjectWritable.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/io/ObjectWritable.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/io/ObjectWritable.java Tue May 13 23:59:46 2008
@@ -182,7 +182,7 @@
try {
declaredClass = conf.getClassByName(className);
} catch (ClassNotFoundException e) {
- throw new RuntimeException("readObject can't find class", e);
+ throw new RuntimeException("readObject can't find class " + className, e);
}
}
@@ -225,10 +225,12 @@
instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
} else { // Writable
Class instanceClass = null;
+ String str = "";
try {
- instanceClass = conf.getClassByName(UTF8.readString(in));
+ str = UTF8.readString(in);
+ instanceClass = conf.getClassByName(str);
} catch (ClassNotFoundException e) {
- throw new RuntimeException("readObject can't find class", e);
+ throw new RuntimeException("readObject can't find class " + str, e);
}
Writable writable = WritableFactories.newInstance(instanceClass, conf);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/NNThroughputBenchmark.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/NNThroughputBenchmark.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/NNThroughputBenchmark.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/NNThroughputBenchmark.java Tue May 13 23:59:46 2008
@@ -624,7 +624,7 @@
void formBlockReport() {
// fill remaining slots with blocks that do not exist
for(int idx = blocks.length-1; idx >= nrBlocks; idx--)
- blocks[idx] = new Block(blocks.length - idx, 0);
+ blocks[idx] = new Block(blocks.length - idx, 0, 0);
}
public int compareTo(String name) {
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBalancer.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBalancer.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBalancer.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBalancer.java Tue May 13 23:59:46 2008
@@ -85,7 +85,7 @@
Block[] blocks = new Block[numOfBlocks];
for(int i=0; i<numOfBlocks; i++) {
Block b = locatedBlocks.get(i).getBlock();
- blocks[i] = new Block(b.getBlockId(), b.getNumBytes());
+ blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
}
return blocks;
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestBlockReplacement.java Tue May 13 23:59:46 2008
@@ -218,6 +218,7 @@
out.writeShort(FSConstants.DATA_TRANSFER_VERSION);
out.writeByte(FSConstants.OP_COPY_BLOCK);
out.writeLong(block.getBlockId());
+ out.writeLong(block.getGenerationStamp());
Text.writeString(out, source.getStorageID());
destination.write(out);
out.flush();
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDataTransferProtocol.java Tue May 13 23:59:46 2008
@@ -167,6 +167,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_WRITE_BLOCK);
sendOut.writeLong(newBlockId); // block id
+ sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
sendOut.writeBoolean(false); // recoveryFlag
Text.writeString(sendOut, "cl");// clientID
@@ -184,6 +185,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_WRITE_BLOCK);
sendOut.writeLong(newBlockId);
+ sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
sendOut.writeBoolean(false); // recoveryFlag
Text.writeString(sendOut, "cl");// clientID
@@ -198,6 +200,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_WRITE_BLOCK);
sendOut.writeLong(++newBlockId);
+ sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
sendOut.writeBoolean(false); // recoveryFlag
Text.writeString(sendOut, "cl");// clientID
@@ -223,6 +226,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_WRITE_BLOCK);
sendOut.writeLong(++newBlockId);
+ sendOut.writeLong(0); // generation stamp
sendOut.writeInt(0); // targets in pipeline
sendOut.writeBoolean(false); // recoveryFlag
Text.writeString(sendOut, "cl");// clientID
@@ -251,6 +255,7 @@
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
newBlockId = firstBlock.getBlockId()-1;
sendOut.writeLong(newBlockId);
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0L);
sendOut.writeLong(fileLen);
recvOut.writeShort((short)FSConstants.OP_STATUS_ERROR);
@@ -261,6 +266,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(-1L);
sendOut.writeLong(fileLen);
sendRecvData("Negative start-offset for read for block " +
@@ -271,6 +277,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(fileLen);
sendOut.writeLong(fileLen);
sendRecvData("Wrong start-offset for reading block " +
@@ -283,6 +290,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
sendOut.writeLong(-1-random.nextInt(oneMil));
sendRecvData("Negative length for reading block " +
@@ -295,6 +303,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
sendOut.writeLong(fileLen + 1);
sendRecvData("Wrong length for reading block " +
@@ -305,6 +314,7 @@
sendOut.writeShort((short)FSConstants.DATA_TRANSFER_VERSION);
sendOut.writeByte((byte)FSConstants.OP_READ_BLOCK);
sendOut.writeLong(firstBlock.getBlockId());
+ sendOut.writeLong(firstBlock.getGenerationStamp());
sendOut.writeLong(0);
sendOut.writeLong(fileLen);
readFile(fileSys, file, fileLen);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeBlockScanner.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestDatanodeBlockScanner.java Tue May 13 23:59:46 2008
@@ -74,7 +74,7 @@
long lastWarnTime = System.currentTimeMillis();
long verificationTime = 0;
- String block = DFSTestUtil.getFirstBlock(fs, file).toString();
+ String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
while (verificationTime <= 0) {
String response = urlGet(url);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFsck.java Tue May 13 23:59:46 2008
@@ -124,7 +124,7 @@
cluster.getNameNodePort()), conf);
String block = dfsClient.namenode.
getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
- get(0).getBlock().toString();
+ get(0).getBlock().getBlockName();
File baseDir = new File(System.getProperty("test.build.data"),"dfs/data");
for (int i=0; i<8; i++) {
File blockFile = new File(baseDir, "data" +(i+1)+ "/current/" + block);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestPendingReplication.java Tue May 13 23:59:46 2008
@@ -30,10 +30,10 @@
pendingReplications = new PendingReplicationBlocks(timeout * 1000);
//
- // Add 10 blocks to pendingReplciations.
+ // Add 10 blocks to pendingReplications.
//
for (int i = 0; i < 10; i++) {
- Block block = new Block(i, i);
+ Block block = new Block(i, i, 0);
pendingReplications.add(block, i);
}
assertEquals("Size of pendingReplications ",
@@ -43,7 +43,7 @@
//
// remove one item and reinsert it
//
- Block blk = new Block(8, 8);
+ Block blk = new Block(8, 8, 0);
pendingReplications.remove(blk); // removes one replica
assertEquals("pendingReplications.getNumReplicas ",
7, pendingReplications.getNumReplicas(blk));
@@ -60,7 +60,7 @@
// are sane.
//
for (int i = 0; i < 10; i++) {
- Block block = new Block(i, i);
+ Block block = new Block(i, i, 0);
int numReplicas = pendingReplications.getNumReplicas(block);
assertTrue(numReplicas == i);
}
@@ -79,7 +79,7 @@
}
for (int i = 10; i < 15; i++) {
- Block block = new Block(i, i);
+ Block block = new Block(i, i, 0);
pendingReplications.add(block, i);
}
assertTrue(pendingReplications.size() == 15);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestReplication.java Tue May 13 23:59:46 2008
@@ -266,7 +266,7 @@
// get first block of the file.
String block = dfsClient.namenode.
getBlockLocations(testFile, 0, Long.MAX_VALUE).
- get(0).getBlock().toString();
+ get(0).getBlock().getBlockName();
cluster.shutdown();
cluster = null;
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestSimulatedFSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestSimulatedFSDataset.java?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestSimulatedFSDataset.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestSimulatedFSDataset.java Tue May 13 23:59:46 2008
@@ -59,7 +59,7 @@
int addSomeBlocks() throws IOException {
int bytesAdded = 0;
for (int i = 1; i <= NUMBLOCKS; ++i) {
- Block b = new Block(i, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
+ Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
OutputStream dataOut = fsdataset.writeToBlock(b, false).dataOut;
assertEquals(0, fsdataset.getLength(b));
for (int j=1; j <= blockIdToLen(i); ++j) {
@@ -76,7 +76,7 @@
}
public void testGetMetaData() throws IOException {
- Block b = new Block(1, 5);
+ Block b = new Block(1, 5, 0);
try {
assertFalse(fsdataset.metaFileExists(b));
assertTrue("Expected an IO exception", false);
@@ -84,7 +84,7 @@
// ok - as expected
}
addSomeBlocks(); // Only need to add one but ....
- b = new Block(1, 0);
+ b = new Block(1, 0, 0);
InputStream metaInput = fsdataset.getMetaDataInputStream(b);
DataInputStream metaDataInput = new DataInputStream(metaInput);
short version = metaDataInput.readShort();
@@ -120,7 +120,7 @@
public void testWriteRead() throws IOException {
addSomeBlocks();
for (int i=1; i <= NUMBLOCKS; ++i) {
- Block b = new Block(i, 0);
+ Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
assertEquals(blockIdToLen(i), fsdataset.getLength(b));
checkBlockDataAndSize(b, blockIdToLen(i));
@@ -198,12 +198,12 @@
}
public void testInValidBlocks() throws IOException {
- Block b = new Block(1, 5);
+ Block b = new Block(1, 5, 0);
checkInvalidBlock(b);
// Now check invlaid after adding some blocks
addSomeBlocks();
- b = new Block(NUMBLOCKS + 99, 5);
+ b = new Block(NUMBLOCKS + 99, 5, 0);
checkInvalidBlock(b);
}
@@ -211,8 +211,8 @@
public void testInvalidate() throws IOException {
int bytesAdded = addSomeBlocks();
Block[] deleteBlocks = new Block[2];
- deleteBlocks[0] = new Block(1, 0);
- deleteBlocks[1] = new Block(2, 0);
+ deleteBlocks[0] = new Block(1, 0, 0);
+ deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(deleteBlocks);
checkInvalidBlock(deleteBlocks[0]);
checkInvalidBlock(deleteBlocks[1]);
@@ -224,7 +224,7 @@
// Now make sure the rest of the blocks are valid
for (int i=3; i <= NUMBLOCKS; ++i) {
- Block b = new Block(i, 0);
+ Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(b));
}
}
Modified: hadoop/core/trunk/src/webapps/datanode/browseBlock.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/datanode/browseBlock.jsp?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/datanode/browseBlock.jsp (original)
+++ hadoop/core/trunk/src/webapps/datanode/browseBlock.jsp Tue May 13 23:59:46 2008
@@ -130,7 +130,7 @@
blockId = cur.getBlock().getBlockId();
blockSize = cur.getBlock().getNumBytes();
String blk = "blk_" + Long.toString(blockId);
- out.print("<td>"+cur+":</td>");
+ out.print("<td>"+Long.toString(blockId)+":</td>");
DatanodeInfo[] locs = cur.getLocations();
for(int j=0; j<locs.length; j++) {
String datanodeAddr = locs[j].getName();
@@ -186,6 +186,15 @@
}
blockId = Long.parseLong(blockIdStr);
+ String blockGenStamp = null;
+ long genStamp = 0;
+ blockGenStamp = req.getParameter("genstamp");
+ if (blockGenStamp == null) {
+ out.print("Invalid input (genstamp absent)");
+ return;
+ }
+ genStamp = Long.parseLong(blockGenStamp);
+
String blockSizeStr;
long blockSize = 0;
blockSizeStr = req.getParameter("blockSize");
@@ -231,6 +240,7 @@
long nextStartOffset = 0;
long nextBlockSize = 0;
String nextBlockIdStr = null;
+ String nextGenStamp = null;
String nextHost = req.getServerName();
int nextPort = req.getServerPort();
int nextDatanodePort = datanodePort;
@@ -244,6 +254,7 @@
if (i != blocks.size() - 1) {
LocatedBlock nextBlock = blocks.get(i+1);
nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
+ nextGenStamp = Long.toString(nextBlock.getBlock().getGenerationStamp());
nextStartOffset = 0;
nextBlockSize = nextBlock.getBlock().getNumBytes();
DatanodeInfo d = jspHelper.bestNode(nextBlock);
@@ -263,6 +274,7 @@
nextBlockIdStr = blockIdStr;
nextStartOffset = startOffset + chunkSizeToView;
nextBlockSize = blockSize;
+ nextGenStamp = blockGenStamp;
}
String nextUrl = null;
if (nextBlockIdStr != null) {
@@ -271,6 +283,7 @@
"/browseBlock.jsp?blockId=" + nextBlockIdStr +
"&blockSize=" + nextBlockSize + "&startOffset=" +
nextStartOffset +
+ "&genstamp=" + nextGenStamp +
"&filename=" + URLEncoder.encode(filename, "UTF-8") +
"&chunkSizeToView=" + chunkSizeToView +
"&datanodePort=" + nextDatanodePort +
@@ -279,6 +292,7 @@
}
//determine data for the prev link
String prevBlockIdStr = null;
+ String prevGenStamp = null;
long prevStartOffset = 0;
long prevBlockSize = 0;
String prevHost = req.getServerName();
@@ -292,6 +306,7 @@
if (i != 0) {
LocatedBlock prevBlock = blocks.get(i-1);
prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
+ prevGenStamp = Long.toString(prevBlock.getBlock().getGenerationStamp());
prevStartOffset = prevBlock.getBlock().getNumBytes() - chunkSizeToView;
if (prevStartOffset < 0)
prevStartOffset = 0;
@@ -314,6 +329,7 @@
prevStartOffset = startOffset - chunkSizeToView;
if (prevStartOffset < 0) prevStartOffset = 0;
prevBlockSize = blockSize;
+ prevGenStamp = blockGenStamp;
}
String prevUrl = null;
@@ -325,6 +341,7 @@
prevStartOffset +
"&filename=" + URLEncoder.encode(filename, "UTF-8") +
"&chunkSizeToView=" + chunkSizeToView +
+ "&genstamp=" + prevGenStamp +
"&datanodePort=" + prevDatanodePort +
"&namenodeInfoPort=" + namenodeInfoPort;
out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a> ");
@@ -334,7 +351,7 @@
try {
jspHelper.streamBlockInAscii(
new InetSocketAddress(req.getServerName(), datanodePort), blockId,
- blockSize, startOffset, chunkSizeToView, out);
+ genStamp, blockSize, startOffset, chunkSizeToView, out);
} catch (Exception e){
out.print(e);
}
Modified: hadoop/core/trunk/src/webapps/datanode/browseDirectory.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/datanode/browseDirectory.jsp?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/datanode/browseDirectory.jsp (original)
+++ hadoop/core/trunk/src/webapps/datanode/browseDirectory.jsp Tue May 13 23:59:46 2008
@@ -64,6 +64,7 @@
"/browseBlock.jsp?blockId=" +
firstBlock.getBlock().getBlockId() +
"&blockSize=" + firstBlock.getBlock().getNumBytes() +
+ "&genstamp=" + firstBlock.getBlock().getGenerationStamp() +
"&filename=" + URLEncoder.encode(dir, "UTF-8") +
"&datanodePort=" + datanodePort +
"&namenodeInfoPort=" + namenodeInfoPort;
Modified: hadoop/core/trunk/src/webapps/datanode/tail.jsp
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/webapps/datanode/tail.jsp?rev=656122&r1=656121&r2=656122&view=diff
==============================================================================
--- hadoop/core/trunk/src/webapps/datanode/tail.jsp (original)
+++ hadoop/core/trunk/src/webapps/datanode/tail.jsp Tue May 13 23:59:46 2008
@@ -79,6 +79,7 @@
LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
long blockSize = lastBlk.getBlock().getNumBytes();
long blockId = lastBlk.getBlock().getBlockId();
+ long genStamp = lastBlk.getBlock().getGenerationStamp();
DatanodeInfo chosenNode;
try {
chosenNode = jspHelper.bestNode(lastBlk);
@@ -94,7 +95,7 @@
else startOffset = 0;
out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
- jspHelper.streamBlockInAscii(addr, blockId, blockSize, startOffset, chunkSizeToView, out);
+ jspHelper.streamBlockInAscii(addr, blockId, genStamp, blockSize, startOffset, chunkSizeToView, out);
out.print("</textarea>");
dfs.close();
}