You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2011/10/11 04:04:11 UTC

svn commit: r1181386 - /hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java

Author: nspiegelberg
Date: Tue Oct 11 02:04:09 2011
New Revision: 1181386

URL: http://svn.apache.org/viewvc?rev=1181386&view=rev
Log:
HBASE-3038 : WALReaderFSDataInputStream.getPos() fails if Filesize > MAX_INT

Summary:
We've been hitting this on dev cluster pretty regularly.  We can't safely read any
file > 2GB with the current api because in.available().getClass() ==
Integer.class

Test Plan:
bin/hbase org.apache.hadoop.hbase.regionserver.wal.HLog --dump /debug/4.log >
badfile.out 2>&1
mvn clean install

DiffCamp Revision: 162776
Reviewed By: kannan
CC: nspiegelberg, kannan, pkhemani
Tasks:
#397343: Understand Split HLog EOF found in dev cluster

Revert Plan:
OK

Modified:
    hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java

Modified: hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java?rev=1181386&r1=1181385&r2=1181386&view=diff
==============================================================================
--- hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java (original)
+++ hbase/branches/0.89/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java Tue Oct 11 02:04:09 2011
@@ -20,10 +20,12 @@
 
 package org.apache.hadoop.hbase.regionserver.wal;
 
+import java.io.FilterInputStream;
 import java.io.IOException;
 import java.lang.Class;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
+import java.lang.reflect.Method;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -79,18 +81,45 @@ public class SequenceFileLogReader imple
         this.length = l;
       }
 
+      // This section can be confusing.  It is specific to how HDFS works.
+      // Let me try to break it down.  This is the problem:
+      //
+      //  1. HDFS DataNodes update the NameNode about a filename's length
+      //     on block boundaries or when a file is closed. Therefore,
+      //     if an RS dies, then the NN's fs.getLength() can be out of date
+      //  2. this.in.available() would work, but it returns int &
+      //     therefore breaks for files > 2GB (happens on big clusters)
+      //  3. DFSInputStream.getFileLength() gets the actual length from the DNs
+      //  4. DFSInputStream is wrapped 2 levels deep : this.in.in
+      //
+      // So, here we adjust getPos() using getFileLength() so the
+      // SequenceFile.Reader constructor (aka: first invokation) comes out
+      // with the correct end of the file:
+      //         this.end = in.getPos() + length;
       @Override
       public long getPos() throws IOException {
         if (this.firstGetPosInvocation) {
           this.firstGetPosInvocation = false;
-          // Tell a lie.  We're doing this just so that this line up in
-          // SequenceFile.Reader constructor comes out with the correct length
-          // on the file:
-          //         this.end = in.getPos() + length;
-          long available = this.in.available();
-          // Length gets added up in the SF.Reader constructor so subtract the
-          // difference.  If available < this.length, then return this.length.
-          return available >= this.length? available - this.length: this.length;
+          long adjust = 0;
+
+          try {
+            Field fIn = FilterInputStream.class.getDeclaredField("in");
+            fIn.setAccessible(true);
+            Object realIn = fIn.get(this.in);
+            Method getFileLength = realIn.getClass().
+              getMethod("getFileLength", new Class<?> []{});
+            getFileLength.setAccessible(true);
+            long realLength = ((Long)getFileLength.
+              invoke(realIn, new Object []{})).longValue();
+            assert(realLength >= this.length);
+            adjust = realLength - this.length;
+          } catch(Exception e) {
+            SequenceFileLogReader.LOG.warn(
+              "Error while trying to get accurate file length.  " +
+              "Truncation / data loss may occur if RegionServers die.", e);
+          }
+
+          return adjust + super.getPos();
         }
         return super.getPos();
       }