You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jd...@apache.org on 2010/04/05 22:04:37 UTC

svn commit: r930945 - in /hadoop/hbase/trunk: CHANGES.txt core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java

Author: jdcryans
Date: Mon Apr  5 20:04:36 2010
New Revision: 930945

URL: http://svn.apache.org/viewvc?rev=930945&view=rev
Log:
HBASE-2252  Mapping a very big table kills region servers

Modified:
    hadoop/hbase/trunk/CHANGES.txt
    hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java

Modified: hadoop/hbase/trunk/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/CHANGES.txt?rev=930945&r1=930944&r2=930945&view=diff
==============================================================================
--- hadoop/hbase/trunk/CHANGES.txt (original)
+++ hadoop/hbase/trunk/CHANGES.txt Mon Apr  5 20:04:36 2010
@@ -482,6 +482,7 @@ Release 0.21.0 - Unreleased
    HBASE-2402  [stargate] set maxVersions on gets
    HBASE-2087  The wait on compaction because "Too many store files" 
                holds up all flushing
+   HBASE-2252  Mapping a very big table kills region servers
 
   NEW FEATURES
    HBASE-1961  HBase EC2 scripts

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java?rev=930945&r1=930944&r2=930945&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java Mon Apr  5 20:04:36 2010
@@ -62,6 +62,7 @@ public class TableRecordReaderImpl {
         Scan scan = new Scan(firstRow, endRow);
         scan.addColumns(trrInputColumns);
         scan.setFilter(trrRowFilter);
+        scan.setCacheBlocks(false);
         this.scanner = this.htable.getScanner(scan);
       } else {
         LOG.debug("TIFB.restart, firstRow: " +

Modified: hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java?rev=930945&r1=930944&r2=930945&view=diff
==============================================================================
--- hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java (original)
+++ hadoop/hbase/trunk/core/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java Mon Apr  5 20:04:36 2010
@@ -127,14 +127,13 @@ implements Configurable {
         if (conf.get(SCAN_MAXVERSIONS) != null) {
           scan.setMaxVersions(Integer.parseInt(conf.get(SCAN_MAXVERSIONS)));
         }
-        
-        if (conf.get(SCAN_CACHEBLOCKS) != null) {
-          scan.setCacheBlocks(Boolean.parseBoolean(conf.get(SCAN_CACHEBLOCKS)));
-        }
 
         if (conf.get(SCAN_CACHEDROWS) != null) {
           scan.setCaching(Integer.parseInt(conf.get(SCAN_CACHEDROWS)));
         }
+
+        // false by default, full table scans generate too much BC churn
+        scan.setCacheBlocks((conf.getBoolean(SCAN_CACHEBLOCKS, false)));
       } catch (Exception e) {
           LOG.error(StringUtils.stringifyException(e));
       }