You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by ns...@apache.org on 2012/01/20 03:22:47 UTC

svn commit: r1233728 - /hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java

Author: nspiegelberg
Date: Fri Jan 20 02:22:47 2012
New Revision: 1233728

URL: http://svn.apache.org/viewvc?rev=1233728&view=rev
Log:
[master] Fix the MapReduce NPE for ODS rollup job

Summary:
The detailed output of the problem:
http://odsbase122.snc6.facebook.com.:50060/tasklog?taskid=attempt_201201091147_0003_m_000457_0&all=true
or
https://our.intern.facebook.com/intern/paste/?pid=276796

When one scanner times out, the mapper should restart scanning from the last
successful row.
However if there is no last successful row (lastRow is null), which means the
scanner times out at beginning, then there will be an NPE for restart from the
lastRow.
The mapper should simply restart from the startRow in this case.

Tag as [master] since there is no change needed for apache for this case.

Test Plan: mvn test

Reviewers: kannan, kranganathan

Reviewed By: kranganathan

CC: cgthayer, vinodv, daniellee, ods-storage@lists, hbase-eng@lists,
kranganathan, liyintang

Differential Revision: https://phabricator.fb.com/D387115

Modified:
    hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java

Modified: hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java?rev=1233728&r1=1233727&r2=1233728&view=diff
==============================================================================
--- hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java (original)
+++ hbase/branches/0.89-fb/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java Fri Jan 20 02:22:47 2012
@@ -32,7 +32,6 @@ import org.apache.hadoop.hbase.filter.Fi
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Writables;
-
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -44,7 +43,7 @@ public class TableRecordReaderImpl {
 
   private byte [] startRow;
   private byte [] endRow;
-  private byte [] lastRow;
+  private byte [] lastRow = null;
   private Filter trrRowFilter;
   private ResultScanner scanner;
   private HTable htable;
@@ -172,13 +171,17 @@ public class TableRecordReaderImpl {
    */
   public boolean next(ImmutableBytesWritable key, Result value)
   throws IOException {
-    Result result;
+    Result result = null;
     try {
       result = this.scanner.next();
     } catch (UnknownScannerException e) {
       LOG.debug("recovered from " + StringUtils.stringifyException(e));
-      restart(lastRow);
-      this.scanner.next();    // skip presumed already mapped row
+      if (lastRow == null) {
+        restart(startRow);
+      } else {
+        restart(lastRow);
+        this.scanner.next(); // skip presumed already mapped row
+      }
       result = this.scanner.next();
     }