You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2009/03/05 00:49:05 UTC
svn commit: r750237 - in /hadoop/core/trunk: CHANGES.txt
src/test/org/apache/hadoop/hdfs/DataNodeCluster.java
Author: szetszwo
Date: Wed Mar 4 23:49:04 2009
New Revision: 750237
URL: http://svn.apache.org/viewvc?rev=750237&view=rev
Log:
HADOOP-5384. Fix a problem that DataNodeCluster creates blocks with generationStamp == 1. (szetszwo)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DataNodeCluster.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=750237&r1=750236&r2=750237&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Mar 4 23:49:04 2009
@@ -269,6 +269,9 @@
HADOOP-5383. Avoid building an unused string in NameNode's
verifyReplication(). (Raghu Angadi)
+ HADOOP-5384. Fix a problem that DataNodeCluster creates blocks with
+ generationStamp == 1. (szetszwo)
+
Release 0.20.0 - Unreleased
INCOMPATIBLE CHANGES
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DataNodeCluster.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DataNodeCluster.java?rev=750237&r1=750236&r2=750237&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DataNodeCluster.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DataNodeCluster.java Wed Mar 4 23:49:04 2009
@@ -24,12 +24,10 @@
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
import org.apache.hadoop.net.DNS;
@@ -196,7 +194,8 @@
long blkid = startingBlockId;
for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
for (int i = 0; i < blocks.length; ++i) {
- blocks[i] = new Block(blkid++, blockSize, 1);
+ blocks[i] = new Block(blkid++, blockSize,
+ Block.GRANDFATHER_GENERATION_STAMP);
}
for (int i = 1; i <= replication; ++i) {
// inject blocks for dn_i into dn_i and replica in dn_i's neighbors