You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cu...@apache.org on 2007/06/04 18:47:24 UTC
svn commit: r544181 - in /lucene/hadoop/trunk: CHANGES.txt
src/java/org/apache/hadoop/dfs/DFSClient.java
src/java/org/apache/hadoop/dfs/FSNamesystem.java
src/test/org/apache/hadoop/dfs/TestPread.java
Author: cutting
Date: Mon Jun 4 09:47:20 2007
New Revision: 544181
URL: http://svn.apache.org/viewvc?view=rev&rev=544181
Log:
HADOOP-1443. Fix a bug opening zero-length files in HDFS. Contributed by Konstantin.
Modified:
lucene/hadoop/trunk/CHANGES.txt
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
Modified: lucene/hadoop/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/CHANGES.txt?view=diff&rev=544181&r1=544180&r2=544181
==============================================================================
--- lucene/hadoop/trunk/CHANGES.txt (original)
+++ lucene/hadoop/trunk/CHANGES.txt Mon Jun 4 09:47:20 2007
@@ -519,6 +519,9 @@
to a long, permitting map outputs to exceed 2^31 bytes.
(omalley via cutting)
+133. HADOOP-1443. Fix a bug opening zero-length files in HDFS.
+ (Konstantin Shvachko via cutting)
+
Release 0.12.3 - 2007-04-06
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java?view=diff&rev=544181&r1=544180&r2=544181
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/DFSClient.java Mon Jun 4 09:47:20 2007
@@ -961,7 +961,7 @@
throw new IOException("Stream closed");
}
long filelen = getFileLength();
- if ((position < 0) || (position > filelen)) {
+ if ((position < 0) || (position >= filelen)) {
return -1;
}
int realLen = length;
Modified: lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java?view=diff&rev=544181&r1=544180&r2=544181
==============================================================================
--- lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java (original)
+++ lucene/hadoop/trunk/src/java/org/apache/hadoop/dfs/FSNamesystem.java Mon Jun 4 09:47:20 2007
@@ -432,7 +432,14 @@
synchronized LocatedBlocks getBlockLocations(String clientMachine,
String src,
long offset,
- long length) {
+ long length
+ ) throws IOException {
+ if (offset < 0) {
+ throw new IOException("Negative offset is not supported. File: " + src );
+ }
+ if (length < 0) {
+ throw new IOException("Negative length is not supported. File: " + src );
+ }
return getBlockLocations(clientMachine,
dir.getFileINode(src),
offset, length, Integer.MAX_VALUE);
@@ -442,7 +449,8 @@
FSDirectory.INode inode,
long offset,
long length,
- int nrBlocksToReturn) {
+ int nrBlocksToReturn
+ ) throws IOException {
if(inode == null || inode.isDir()) {
return null;
}
@@ -450,18 +458,24 @@
if (blocks == null) {
return null;
}
+ assert blocks.length > 0 : "Array of blocks is empty.";
List<LocatedBlock> results;
results = new ArrayList<LocatedBlock>(blocks.length);
int curBlk = 0;
long curPos = 0, blkSize = 0;
- for (curBlk = 0; curBlk < blocks.length; curBlk++) {
+ int nrBlocks = (blocks[0].getNumBytes() == 0) ? 0 : blocks.length;
+ for (curBlk = 0; curBlk < nrBlocks; curBlk++) {
blkSize = blocks[curBlk].getNumBytes();
+ assert blkSize > 0 : "Block of size 0";
if (curPos + blkSize > offset) {
break;
}
curPos += blkSize;
}
+
+ if (nrBlocks > 0 && curBlk == nrBlocks) // offset >= end of file
+ return null;
long endOff = offset + length;
Modified: lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java
URL: http://svn.apache.org/viewvc/lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java?view=diff&rev=544181&r1=544180&r2=544181
==============================================================================
--- lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java (original)
+++ lucene/hadoop/trunk/src/test/org/apache/hadoop/dfs/TestPread.java Mon Jun 4 09:47:20 2007
@@ -38,7 +38,25 @@
// create and write a file that contains three blocks of data
DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
(long)blockSize);
+ // test empty file open and read
+ stm.close();
+ FSDataInputStream in = fileSys.open(name);
byte[] buffer = new byte[(int)(12*blockSize)];
+ in.readFully(0, buffer, 0, 0);
+ IOException res = null;
+ try { // read beyond the end of the file
+ in.readFully(0, buffer, 0, 1);
+ } catch (IOException e) {
+ // should throw an exception
+ res = e;
+ }
+ assertTrue("Error reading beyond file boundary.", res != null);
+ in.close();
+ if (!fileSys.delete(name))
+ assertTrue("Cannot delete file", false);
+
+ // now create the real file
+ stm = fileSys.create(name, true, 4096, (short)1, (long)blockSize);
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
@@ -112,6 +130,17 @@
actual = new byte[8*4096];
stm.readFully(3*blockSize, actual, 0, 8*4096);
checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
+ // read the tail
+ stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
+ IOException res = null;
+ try { // read beyond the end of the file
+ stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
+ } catch (IOException e) {
+ // should throw an exception
+ res = e;
+ }
+ assertTrue("Error reading beyond file boundary.", res != null);
+
stm.close();
}