You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by jg...@apache.org on 2009/08/13 00:34:36 UTC

svn commit: r803729 - in /hadoop/hbase/branches/0.20: CHANGES.txt src/java/org/apache/hadoop/hbase/client/HTable.java src/java/org/apache/hadoop/hbase/client/Scan.java

Author: jgray
Date: Wed Aug 12 22:34:35 2009
New Revision: 803729

URL: http://svn.apache.org/viewvc?rev=803729&view=rev
Log:
HBASE-1759  Ability to specify scanner caching on a per-scan basis

Modified:
    hadoop/hbase/branches/0.20/CHANGES.txt
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java
    hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java

Modified: hadoop/hbase/branches/0.20/CHANGES.txt
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/CHANGES.txt?rev=803729&r1=803728&r2=803729&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/CHANGES.txt (original)
+++ hadoop/hbase/branches/0.20/CHANGES.txt Wed Aug 12 22:34:35 2009
@@ -550,6 +550,8 @@
    HBASE-1743  [debug tool] Add regionsInTransition list to ClusterStatus
                detailed output
    HBASE-1760  Cleanup TODOs in HTable
+   HBASE-1759  Ability to specify scanner caching on a per-scan basis
+               (Ken Weiner via jgray)
 
   OPTIMIZATIONS
    HBASE-1412  Change values for delete column and column family in KeyValue

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java?rev=803729&r1=803728&r2=803729&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/HTable.java Wed Aug 12 22:34:35 2009
@@ -1785,7 +1785,7 @@
     private HRegionInfo currentRegion = null;
     private ScannerCallable callable = null;
     private final LinkedList<Result> cache = new LinkedList<Result>();
-    private final int caching = HTable.this.scannerCaching;
+    private final int caching;
     private long lastNext;
     // Keep lastResult returned successfully in case we have to reset scanner.
     private Result lastResult = null;
@@ -1798,7 +1798,14 @@
       }
       this.scan = scan;
       this.lastNext = System.currentTimeMillis();
-      
+
+      // Use the caching from the Scan.  If not set, use the default cache setting for this table.
+      if (this.scan.getCaching() > 0) {
+        this.caching = this.scan.getCaching();
+      } else {
+        this.caching = HTable.this.scannerCaching;
+      }
+
       // Removed filter validation.  We have a new format now, only one of all
       // the current filters has a validate() method.  We can add it back,
       // need to decide on what we're going to do re: filter redesign.

Modified: hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java
URL: http://svn.apache.org/viewvc/hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java?rev=803729&r1=803728&r2=803729&view=diff
==============================================================================
--- hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java (original)
+++ hadoop/hbase/branches/0.20/src/java/org/apache/hadoop/hbase/client/Scan.java Wed Aug 12 22:34:35 2009
@@ -20,6 +20,14 @@
 
 package org.apache.hadoop.hbase.client;
 
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeMap;
+import java.util.TreeSet;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.KeyValue;
@@ -30,14 +38,6 @@
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeMap;
-import java.util.TreeSet;
-
 /**
  * Used to perform Scan operations.
  * <p>
@@ -47,6 +47,9 @@
  * iterate over all rows.
  * <p>
  * To scan everything for each row, instantiate a Scan object.
+ * <p>
+ * To modify scanner caching for just this scan, use {@link #setCaching(int) setCaching}.
+ * <p>
  * To further define the scope of what to get when scanning, perform additional 
  * methods as outlined below.
  * <p>
@@ -71,6 +74,7 @@
   private byte [] startRow = HConstants.EMPTY_START_ROW;
   private byte [] stopRow  = HConstants.EMPTY_END_ROW;
   private int maxVersions = 1;
+  private int caching = -1;
   private Filter filter = null;
   private RowFilterInterface oldFilter = null;
   private TimeRange tr = new TimeRange();
@@ -118,6 +122,7 @@
     startRow = scan.getStartRow();
     stopRow  = scan.getStopRow();
     maxVersions = scan.getMaxVersions();
+    caching = scan.getCaching();
     filter = scan.getFilter(); // clone?
     oldFilter = scan.getOldFilter(); // clone?
     TimeRange ctr = scan.getTimeRange();
@@ -308,7 +313,17 @@
     this.maxVersions = maxVersions;
     return this;
   }
-  
+
+  /**
+   * Set the number of rows for caching that will be passed to scanners.
+   * If not set, the default setting from {@link HTable#getScannerCaching()} will apply.
+   * Higher caching values will enable faster scanners but will use more memory.
+   * @param caching the number of rows for caching
+   */
+  public void setCaching(int caching) {
+    this.caching = caching;
+  }
+
   /**
    * Apply the specified server-side filter when performing the Scan.
    * @param filter filter to run on the server
@@ -397,6 +412,13 @@
   } 
 
   /**
+   * @return caching the number of rows fetched when calling next on a scanner
+   */
+  public int getCaching() {
+    return this.caching;
+  } 
+
+  /**
    * @return TimeRange
    */
   public TimeRange getTimeRange() {
@@ -438,6 +460,8 @@
     sb.append(Bytes.toString(this.stopRow));
     sb.append(", maxVersions=");
     sb.append("" + this.maxVersions);
+    sb.append(", caching=");
+    sb.append("" + this.caching);
     sb.append(", timeRange=");
     sb.append("[" + this.tr.getMin() + "," + this.tr.getMax() + ")");
     sb.append(", families=");
@@ -493,6 +517,7 @@
     this.startRow = Bytes.readByteArray(in);
     this.stopRow = Bytes.readByteArray(in);
     this.maxVersions = in.readInt();
+    this.caching = in.readInt();
     if(in.readBoolean()) {
       this.filter = (Filter)createForName(Bytes.toString(Bytes.readByteArray(in)));
       this.filter.readFields(in);
@@ -524,6 +549,7 @@
     Bytes.writeByteArray(out, this.startRow);
     Bytes.writeByteArray(out, this.stopRow);
     out.writeInt(this.maxVersions);
+    out.writeInt(this.caching);
     if(this.filter == null) {
       out.writeBoolean(false);
     } else {