You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by su...@apache.org on 2010/10/19 19:43:53 UTC
svn commit: r1024336 - in /hadoop/hdfs/branches/HDFS-1052: ./
src/java/org/apache/hadoop/hdfs/protocol/
src/java/org/apache/hadoop/hdfs/server/namenode/
src/test/hdfs/org/apache/hadoop/hdfs/
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/ src/tes...
Author: suresh
Date: Tue Oct 19 17:43:53 2010
New Revision: 1024336
URL: http://svn.apache.org/viewvc?rev=1024336&view=rev
Log:
HDFS-1449. Fix test failures - ExtendedBlock must return block file name in #getBlockName(). Contributed by Suresh Srinivas.
Modified:
hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
Modified: hadoop/hdfs/branches/HDFS-1052/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/CHANGES.txt?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/CHANGES.txt (original)
+++ hadoop/hdfs/branches/HDFS-1052/CHANGES.txt Tue Oct 19 17:43:53 2010
@@ -309,6 +309,9 @@ Trunk (unreleased changes)
HDFS-1440. Fix TestComputeInvalidateWork failure. (suresh)
+ HDFS-1449. Fix test failures - ExtendedBlock must return
+ block file name in #getBlockName(). (suresh)
+
Release 0.21.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/protocol/ExtendedBlock.java Tue Oct 19 17:43:53 2010
@@ -21,6 +21,8 @@ import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.DeprecatedUTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableFactories;
@@ -29,6 +31,8 @@ import org.apache.hadoop.io.WritableFact
/**
* Identifies a Block uniquely across the block pools
*/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
public class ExtendedBlock implements Writable {
private String poolId;
private Block block;
@@ -95,8 +99,9 @@ public class ExtendedBlock implements Wr
return poolId;
}
+ /** Returns the block file name for the block */
public String getBlockName() {
- return poolId + ":" + block;
+ return block.getBlockName();
}
public long getNumBytes() {
@@ -155,6 +160,6 @@ public class ExtendedBlock implements Wr
@Override // Object
public String toString() {
- return getBlockName();
+ return poolId + ":" + block;
}
}
Modified: hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Oct 19 17:43:53 2010
@@ -311,7 +311,8 @@ public class NamenodeFsck {
if (isCorrupt) {
corrupt++;
res.corruptBlocks++;
- out.print("\n" + path + ": CORRUPT block " + block.getBlockName()+"\n");
+ out.print("\n" + path + ": CORRUPT blockpool " + block.getPoolId() +
+ " block " + block.getBlockName()+"\n");
}
if (locs.length >= minReplication)
res.numMinReplicatedBlocks++;
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java Tue Oct 19 17:43:53 2010
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.B
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -689,20 +690,23 @@ public class MiniDFSCluster {
/*
* Corrupt a block on all datanode
*/
- void corruptBlockOnDataNodes(String blockName) throws Exception{
+ void corruptBlockOnDataNodes(ExtendedBlock block) throws Exception{
for (int i=0; i < dataNodes.size(); i++)
- corruptBlockOnDataNode(i,blockName);
+ corruptBlockOnDataNode(i, block);
}
/*
* Corrupt a block on a particular datanode
*/
- boolean corruptBlockOnDataNode(int i, String blockName) throws Exception {
+ boolean corruptBlockOnDataNode(int i, ExtendedBlock blk) throws Exception {
Random random = new Random();
boolean corrupted = false;
File dataDir = new File(getBaseDirectory() + "data");
if (i < 0 || i >= dataNodes.size())
return false;
+
+ // TODO:FEDERATION use blockPoolId
+ String blockName = blk.getBlockName();
for (int dn = i*2; dn < i*2+2; dn++) {
File blockFile = new File(dataDir, "data" + (dn+1) + FINALIZED_DIR_NAME +
blockName);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java Tue Oct 19 17:43:53 2010
@@ -31,6 +31,7 @@ import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.io.IOUtils;
/**
@@ -256,7 +257,7 @@ public class TestCrcCorruption {
DFSTestUtil.createFile(fs, file, fileSize, (short)numDataNodes, 12345L /*seed*/);
DFSTestUtil.waitReplication(fs, file, (short)numDataNodes);
- String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file);
cluster.corruptBlockOnDataNodes(block);
try {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java Tue Oct 19 17:43:53 2010
@@ -140,10 +140,12 @@ public class TestDatanodeBlockScanner ex
cluster.shutdown();
}
- public static boolean corruptReplica(String blockName, int replica) throws IOException {
+ public static boolean corruptReplica(ExtendedBlock blk, int replica) throws IOException {
+ String blockName = blk.getLocalBlock().getBlockName();
Random random = new Random();
File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
boolean corrupted = false;
+ // TODO:FEDERATION use BlockPoolId
for (int i=replica*2; i<replica*2+2; i++) {
File blockFile = new File(baseDir, "data" + (i+1) +
MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
@@ -177,7 +179,7 @@ public class TestDatanodeBlockScanner ex
fs = cluster.getFileSystem();
Path file1 = new Path("/tmp/testBlockVerification/file1");
DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
- String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
@@ -289,7 +291,6 @@ public class TestDatanodeBlockScanner ex
Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, file1);
- String block = blk.getBlockName();
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
@@ -314,7 +315,7 @@ public class TestDatanodeBlockScanner ex
// Corrupt numCorruptReplicas replicas of block
int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
- if (corruptReplica(block, i))
+ if (corruptReplica(blk, i))
corruptReplicasDNIDs[j++] = i;
}
@@ -393,7 +394,7 @@ public class TestDatanodeBlockScanner ex
DFSTestUtil.createFile(fs, fileName, 1, REPLICATION_FACTOR, 0);
DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
- String block = DFSTestUtil.getFirstBlock(fs, fileName).getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
// Truncate replica of block
changeReplicaLength(block, 0, -1);
@@ -420,7 +421,9 @@ public class TestDatanodeBlockScanner ex
/**
* Change the length of a block at datanode dnIndex
*/
- static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException {
+ static boolean changeReplicaLength(ExtendedBlock blk, int dnIndex,
+ int lenDelta) throws IOException {
+ String blockName = blk.getBlockName();
File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
File blockFile = new File(baseDir, "data" + (i+1) +
@@ -435,8 +438,9 @@ public class TestDatanodeBlockScanner ex
return false;
}
- private static void waitForBlockDeleted(String blockName, int dnIndex)
+ private static void waitForBlockDeleted(ExtendedBlock blk, int dnIndex)
throws IOException, InterruptedException {
+ String blockName = blk.getBlockName();
File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) +
MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java Tue Oct 19 17:43:53 2010
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configurat
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
/**
* The test makes sure that NameNode detects presense blocks that do not have
@@ -66,7 +67,7 @@ public class TestMissingBlocksAlert exte
// Corrupt the block
- String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getLocalBlock().getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(dfs, corruptFile);
assertTrue(TestDatanodeBlockScanner.corruptReplica(block, 0));
// read the file so that the corrupt block is reported to NN
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java Tue Oct 19 17:43:53 2010
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -163,7 +164,7 @@ public class TestReplication extends Tes
DFSTestUtil.waitReplication(fs, file1, (short)1);
// Corrupt the block belonging to the created file
- String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
cluster.corruptBlockOnDataNodes(block);
// Increase replication factor, this should invoke transfer request
@@ -422,7 +423,7 @@ public class TestReplication extends Tes
DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
- String block = DFSTestUtil.getFirstBlock(fs, fileName).getBlockName();
+ ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
// Change the length of a replica
for (int i=0; i<cluster.getDataNodes().size(); i++) {
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java Tue Oct 19 17:43:53 2010
@@ -17,7 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.datanode;
-import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Op.REPLACE_BLOCK;
import static org.apache.hadoop.hdfs.protocol.DataTransferProtocol.Status.*;
import java.io.DataInputStream;
@@ -50,7 +49,6 @@ import org.apache.hadoop.hdfs.security.t
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
/**
* This class tests if block replacement request to data nodes work correctly.
@@ -159,11 +157,11 @@ public class TestBlockReplacement extend
// start to replace the block
// case 1: proxySource does not contain the block
LOG.info("Testcase 1: Proxy " + newNode.getName()
- + " does not contain the block " + b.getBlockName() );
+ + " does not contain the block " + b);
assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
// case 2: destination contains the block
LOG.info("Testcase 2: Destination " + proxies.get(1).getName()
- + " contains the block " + b.getBlockName() );
+ + " contains the block " + b);
assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
// case 3: correct case
LOG.info("Testcase 3: Proxy=" + source.getName() + " source=" +
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestCorruptFilesJsp.java Tue Oct 19 17:43:53 2010
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.DFSTestUti
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestDatanodeBlockScanner;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
/** A JUnit test for corrupt_files.jsp */
@@ -80,9 +81,8 @@ public class TestCorruptFilesJsp {
// Now corrupt all the files except for the last one
for (int idx = 0; idx < filepaths.length - 1; idx++) {
- String blockName = DFSTestUtil.getFirstBlock(fs, filepaths[idx])
- .getBlockName();
- TestDatanodeBlockScanner.corruptReplica(blockName, 0);
+ ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, filepaths[idx]);
+ TestDatanodeBlockScanner.corruptReplica(blk, 0);
// read the file so that the corrupt block is reported to NN
FSDataInputStream in = fs.open(filepaths[idx]);
Modified: hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java?rev=1024336&r1=1024335&r2=1024336&view=diff
==============================================================================
--- hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java (original)
+++ hadoop/hdfs/branches/HDFS-1052/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestOverReplicatedBlocks.java Tue Oct 19 17:43:53 2010
@@ -54,8 +54,7 @@ public class TestOverReplicatedBlocks ex
// corrupt the block on datanode 0
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
- TestDatanodeBlockScanner.corruptReplica(block.getLocalBlock()
- .getBlockName(), 0);
+ TestDatanodeBlockScanner.corruptReplica(block, 0);
DataNodeProperties dnProps = cluster.stopDataNode(0);
// remove block scanner log to trigger block scanning
File scanLog = new File(System.getProperty("test.build.data"),