You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by sz...@apache.org on 2008/09/27 18:00:41 UTC
svn commit: r699676 - in /hadoop/core/trunk: CHANGES.txt
src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Author: szetszwo
Date: Sat Sep 27 09:00:40 2008
New Revision: 699676
URL: http://svn.apache.org/viewvc?rev=699676&view=rev
Log:
HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get meta file. (szetszwo)
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
Modified: hadoop/core/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=699676&r1=699675&r2=699676&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Sat Sep 27 09:00:40 2008
@@ -782,6 +782,9 @@
HADOOP-4116. Balancer should provide better resource management. (hairong)
+ HADOOP-3614. Fix a bug that Datanode may use an old GenerationStamp to get
+ meta file. (szetszwo)
+
Release 0.18.1 - 2008-09-17
IMPROVEMENTS
Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java?rev=699676&r1=699675&r2=699676&view=diff
==============================================================================
--- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java (original)
+++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/datanode/FSDataset.java Sat Sep 27 09:00:40 2008
@@ -621,9 +621,9 @@
}
}
- public File findBlockFile(Block b) {
- assert b.getGenerationStamp() == GenerationStamp.WILDCARD_STAMP;
-
+ /** Return the block file for the given ID */
+ public File findBlockFile(long blockId) {
+ final Block b = new Block(blockId);
File blockfile = null;
ActiveFile activefile = ongoingCreates.get(b);
if (activefile != null) {
@@ -643,15 +643,13 @@
/** {@inheritDoc} */
public synchronized Block getStoredBlock(long blkid) throws IOException {
- Block b = new Block(blkid);
- File blockfile = findBlockFile(b);
+ File blockfile = findBlockFile(blkid);
if (blockfile == null) {
return null;
}
File metafile = findMetaFile(blockfile);
- b.setGenerationStamp(parseGenerationStamp(blockfile, metafile));
- b.setNumBytes(blockfile.length());
- return b;
+ return new Block(blkid, blockfile.length(),
+ parseGenerationStamp(blockfile, metafile));
}
public boolean metaFileExists(Block b) throws IOException {
@@ -830,14 +828,13 @@
throw new IOException("Cannot update oldblock (=" + oldblock
+ ") to newblock (=" + newblock + ").");
}
-
- File blockFile = findBlockFile(oldblock);
+ File blockFile = findBlockFile(oldblock.getBlockId());
if (blockFile == null) {
throw new IOException("Block " + oldblock + " does not exist.");
}
interruptOngoingCreates(oldblock);
- File oldMetaFile = getMetaFile(blockFile, oldblock);
+ File oldMetaFile = findMetaFile(blockFile);
long oldgs = parseGenerationStamp(blockFile, oldMetaFile);
//rename meta file to a tmp file
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java
URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java?rev=699676&r1=699675&r2=699676&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCreation.java Sat Sep 27 09:00:40 2008
@@ -17,11 +17,20 @@
*/
package org.apache.hadoop.hdfs;
-import java.io.*;
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
import java.net.InetSocketAddress;
+import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -33,8 +42,6 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.io.IOUtils;
-
-import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.log4j.Level;
@@ -690,7 +697,7 @@
DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
FSDataset dataset = (FSDataset)datanode.data;
Block b = dataset.getStoredBlock(locatedblock.getBlock().getBlockId());
- File blockfile = dataset.findBlockFile(b);
+ File blockfile = dataset.findBlockFile(b.getBlockId());
System.out.println("blockfile=" + blockfile);
if (blockfile != null) {
BufferedReader in = new BufferedReader(new FileReader(blockfile));