You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by dh...@apache.org on 2008/03/15 00:42:21 UTC
svn commit: r637305 - in /hadoop/core/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/FSNamesystem.java
src/test/org/apache/hadoop/dfs/TestFileCreation.java
Author: dhruba
Date: Fri Mar 14 16:42:20 2008
New Revision: 637305
URL: http://svn.apache.org/viewvc?rev=637305&view=rev
Log:
HADOOP-3009. TestFileCreation sometimes fails because restarting
minidfscluster sometimes creates datanodes with ports that are
different from their original instance. (dhruba)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=637305&r1=637304&r2=637305&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Fri Mar 14 16:42:20 2008
@@ -218,6 +218,10 @@
HADOOP-2994. Code cleanup for DFSClient: remove redundant
conversions from string to string. (Dave Brosius via dhruba)
+ HADOOP-3009. TestFileCreation sometimes fails because restarting
+ minidfscluster sometimes creates datanodes with ports that are
+ different from their original instance. (dhruba)
+
Release 0.16.1 - 2008-03-13
INCOMPATIBLE CHANGES
Modified: hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?rev=637305&r1=637304&r2=637305&view=diff
==============================================================================
--- hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ hadoop/core/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Fri Mar 14 16:42:20 2008
@@ -2605,24 +2605,34 @@
+ block.getBlockName() + " on " + node.getName()
+ " size " + block.getNumBytes());
}
-
//
- // if file is being actively written to, then do not check
- // replication-factor here. It will be checked when the file is closed.
+ // If this block does not belong to anyfile, then we are done.
//
- if (fileINode == null || fileINode.isUnderConstruction()) {
+ if (fileINode == null) {
+ NameNode.stateChangeLog.info("BLOCK* NameSystem.addStoredBlock: "
+ + "addStoredBlock request received for "
+ + block.getBlockName() + " on " + node.getName()
+ + " size " + block.getNumBytes()
+ + " But it does not belong to any file.");
return block;
}
-
+
// filter out containingNodes that are marked for decommission.
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas()
+ pendingReplications.getNumReplicas(block);
-
+
// check whether safe replication is reached for the block
- // only if it is a part of a files
incrementSafeBlockCount(numCurrentReplica);
+ //
+ // if file is being actively written to, then do not check
+ // replication-factor here. It will be checked when the file is closed.
+ //
+ if (fileINode.isUnderConstruction()) {
+ return block;
+ }
+
// handle underReplication/overReplication
short fileReplication = fileINode.getReplication();
if (numCurrentReplica >= fileReplication) {
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java?rev=637305&r1=637304&r2=637305&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/dfs/TestFileCreation.java Fri Mar 14 16:42:20 2008
@@ -67,6 +67,16 @@
}
//
+ // writes specified bytes to file.
+ //
+ private void writeFile(FSDataOutputStream stm, int size) throws IOException {
+ byte[] buffer = new byte[fileSize];
+ Random rand = new Random(seed);
+ rand.nextBytes(buffer);
+ stm.write(buffer, 0, size);
+ }
+
+ //
// verify that the data written to the full blocks are sane
//
private void checkFile(FileSystem fileSys, Path name, int repl)
@@ -362,7 +372,10 @@
System.out.println("testFileCreationNamenodeRestart: "
+ "Created file filestatus.dat with one "
+ " replicas.");
- writeFile(stm);
+
+ // write two full blocks.
+ writeFile(stm, numBlocks * blockSize);
+ stm.flush();
// create another new file.
//
@@ -410,7 +423,7 @@
file1.toString(), 0, Long.MAX_VALUE);
System.out.println("locations = " + locations.locatedBlockCount());
assertTrue("Error blocks were not cleaned up for file " + file1,
- locations.locatedBlockCount() == 1);
+ locations.locatedBlockCount() == 3);
// verify filestatus2.dat
locations = client.namenode.getBlockLocations(