You are viewing a plain text version of this content. The canonical link for it is here.
Posted to hdfs-commits@hadoop.apache.org by co...@apache.org on 2009/11/13 20:25:04 UTC
svn commit: r835958 -
/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
Author: cos
Date: Fri Nov 13 19:25:04 2009
New Revision: 835958
URL: http://svn.apache.org/viewvc?rev=835958&view=rev
Log:
HDFS-733. TestBlockReport fails intermittently (cos)
Modified:
hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
Modified: hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
URL: http://svn.apache.org/viewvc/hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java?rev=835958&r1=835957&r2=835958&view=diff
==============================================================================
--- hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java (original)
+++ hadoop/hdfs/trunk/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java Fri Nov 13 19:25:04 2009
@@ -373,14 +373,14 @@
final int bytesChkSum = 1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 3 * bytesChkSum);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
ArrayList<Block> blocks =
- writeFile(METHOD_NAME, 6 * bytesChkSum, filePath);
- Block bl = findBlock(filePath, 6 * bytesChkSum);
+ writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
+ Block bl = findBlock(filePath, 12 * bytesChkSum);
BlockChecker bc = new BlockChecker(filePath);
bc.start();
@@ -412,16 +412,16 @@
final int bytesChkSum = 1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
- conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 3 * bytesChkSum);
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
shutDownCluster();
startUpCluster();
// write file and start second node to be "older" than the original
try {
ArrayList<Block> blocks =
- writeFile(METHOD_NAME, 6 * bytesChkSum, filePath);
+ writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
- Block bl = findBlock(filePath, 6 * bytesChkSum);
+ Block bl = findBlock(filePath, 12 * bytesChkSum);
BlockChecker bc = new BlockChecker(filePath);
bc.start();
corruptBlockGS(bl);