You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by st...@apache.org on 2011/12/19 19:52:34 UTC

svn commit: r1220896 - in /hbase/branches/0.92: pom.xml src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java

Author: stack
Date: Mon Dec 19 18:52:34 2011
New Revision: 1220896

URL: http://svn.apache.org/viewvc?rev=1220896&view=rev
Log:
HBASE-5029 TestDistributedLogSplitting fails on occasion; disabling failing test -- undoing an overcommitpatch -p0 -R < x.txt 

Modified:
    hbase/branches/0.92/pom.xml
    hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
    hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java

Modified: hbase/branches/0.92/pom.xml
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/pom.xml?rev=1220896&r1=1220895&r2=1220896&view=diff
==============================================================================
--- hbase/branches/0.92/pom.xml (original)
+++ hbase/branches/0.92/pom.xml Mon Dec 19 18:52:34 2011
@@ -811,7 +811,7 @@
     <slf4j.version>1.5.8</slf4j.version><!-- newer version available -->
     <stax-api.version>1.0.1</stax-api.version>
     <thrift.version>0.7.0</thrift.version>
-    <zookeeper.version>3.4.1</zookeeper.version>
+    <zookeeper.version>3.4.1rc0</zookeeper.version>
     <hadoop-snappy.version>0.0.1-SNAPSHOT</hadoop-snappy.version>
 
     <package.prefix>/usr</package.prefix>	           

Modified: hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java?rev=1220896&r1=1220895&r2=1220896&view=diff
==============================================================================
--- hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java (original)
+++ hbase/branches/0.92/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SequenceFileLogReader.java Mon Dec 19 18:52:34 2011
@@ -26,15 +26,13 @@ import java.lang.Class;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
-import java.util.Arrays;
-
+ 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient.DFSInputStream;
 import org.apache.hadoop.io.SequenceFile;
 
 public class SequenceFileLogReader implements HLog.Reader {
@@ -84,8 +82,8 @@ public class SequenceFileLogReader imple
       // This section can be confusing.  It is specific to how HDFS works.
       // Let me try to break it down.  This is the problem:
       //
-      //  1. HDFS DataNodes update the NameNode about a filename's length
-      //     on block boundaries or when a file is closed. Therefore,
+      //  1. HDFS DataNodes update the NameNode about a filename's length 
+      //     on block boundaries or when a file is closed. Therefore, 
       //     if an RS dies, then the NN's fs.getLength() can be out of date
       //  2. this.in.available() would work, but it returns int &
       //     therefore breaks for files > 2GB (happens on big clusters)
@@ -93,7 +91,7 @@ public class SequenceFileLogReader imple
       //  4. DFSInputStream is wrapped 2 levels deep : this.in.in
       //
       // So, here we adjust getPos() using getFileLength() so the
-      // SequenceFile.Reader constructor (aka: first invocation) comes out
+      // SequenceFile.Reader constructor (aka: first invocation) comes out 
       // with the correct end of the file:
       //         this.end = in.getPos() + length;
       @Override
@@ -106,18 +104,13 @@ public class SequenceFileLogReader imple
             Field fIn = FilterInputStream.class.getDeclaredField("in");
             fIn.setAccessible(true);
             Object realIn = fIn.get(this.in);
-            if (realIn.getClass() == DFSInputStream.class) {
-              Method getFileLength = realIn.getClass().
-                getDeclaredMethod("getFileLength", new Class<?> []{});
-              getFileLength.setAccessible(true);
-              long realLength = ((Long)getFileLength.
-                invoke(realIn, new Object []{})).longValue();
-              assert(realLength >= this.length);
-              adjust = realLength - this.length;
-            } else {
-              LOG.info("Input stream class: " + realIn.getClass().getName() +
-                  ", not adjusting length");
-            }
+            Method getFileLength = realIn.getClass().
+              getMethod("getFileLength", new Class<?> []{});
+            getFileLength.setAccessible(true);
+            long realLength = ((Long)getFileLength.
+              invoke(realIn, new Object []{})).longValue();
+            assert(realLength >= this.length);
+            adjust = realLength - this.length;
           } catch(Exception e) {
             SequenceFileLogReader.LOG.warn(
               "Error while trying to get accurate file length.  " +
@@ -149,7 +142,7 @@ public class SequenceFileLogReader imple
   /**
    * This constructor allows a specific HLogKey implementation to override that
    * which would otherwise be chosen via configuration property.
-   *
+   * 
    * @param keyClass
    */
   public SequenceFileLogReader(Class<? extends HLogKey> keyClass) {
@@ -196,7 +189,7 @@ public class SequenceFileLogReader imple
           throw new IOException(iae);
         }
       }
-
+      
       WALEdit val = new WALEdit();
       e = new HLog.Entry(key, val);
     }
@@ -242,8 +235,8 @@ public class SequenceFileLogReader imple
     } catch(Exception e) { /* reflection fail. keep going */ }
 
     String msg = (this.path == null? "": this.path.toString()) +
-      ", entryStart=" + entryStart + ", pos=" + pos +
-      ((end == Long.MAX_VALUE) ? "" : ", end=" + end) +
+      ", entryStart=" + entryStart + ", pos=" + pos + 
+      ((end == Long.MAX_VALUE) ? "" : ", end=" + end) + 
       ", edit=" + this.edit;
 
     // Enhance via reflection so we don't change the original class type
@@ -253,7 +246,7 @@ public class SequenceFileLogReader imple
         .newInstance(msg)
         .initCause(ioe);
     } catch(Exception e) { /* reflection fail. keep going */ }
-
+    
     return ioe;
   }
 }

Modified: hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java?rev=1220896&r1=1220895&r2=1220896&view=diff
==============================================================================
--- hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java (original)
+++ hbase/branches/0.92/src/test/java/org/apache/hadoop/hbase/master/TestDistributedLogSplitting.java Mon Dec 19 18:52:34 2011
@@ -255,7 +255,6 @@ public class TestDistributedLogSplitting
    * detects that the region server has aborted.
    * @throws Exception
    */
-  @Ignore
   @Test (timeout=300000)
   public void testWorkerAbort() throws Exception {
     LOG.info("testWorkerAbort");