You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hbase.apache.org by la...@apache.org on 2013/07/06 09:32:18 UTC

svn commit: r1500218 - in /hbase/branches/0.94/src: main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java

Author: larsh
Date: Sat Jul  6 07:32:18 2013
New Revision: 1500218

URL: http://svn.apache.org/r1500218
Log:
HBASE-8809 Addendum to correctly hand versions in raw scans aswell (Jesse and LarsH)

Modified:
    hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
    hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java

Modified: hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java?rev=1500218&r1=1500217&r2=1500218&view=diff
==============================================================================
--- hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java (original)
+++ hbase/branches/0.94/src/main/java/org/apache/hadoop/hbase/regionserver/ScanQueryMatcher.java Sat Jul  6 07:32:18 2013
@@ -158,7 +158,10 @@ public class ScanQueryMatcher {
     // seePastDeleteMarker: user initiated scans
     this.seePastDeleteMarkers = scanInfo.getKeepDeletedCells() && isUserScan;
 
-    int maxVersions = Math.min(scan.getMaxVersions(), scanInfo.getMaxVersions());
+    int maxVersions =
+        scan.isRaw() ? scan.getMaxVersions() : Math.min(scan.getMaxVersions(),
+          scanInfo.getMaxVersions());
+
     // Single branch to deal with two types of reads (columns vs all in family)
     if (columns == null || columns.size() == 0) {
       // there is always a null column in the wildcard column query.

Modified: hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java
URL: http://svn.apache.org/viewvc/hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java?rev=1500218&r1=1500217&r2=1500218&view=diff
==============================================================================
--- hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java (original)
+++ hbase/branches/0.94/src/test/java/org/apache/hadoop/hbase/client/TestFromClientSide.java Sat Jul  6 07:32:18 2013
@@ -50,6 +50,7 @@ import java.util.concurrent.ThreadPoolEx
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.commons.lang.ArrayUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -4860,6 +4861,81 @@ public class TestFromClientSide {
     assertEquals(1, bar.length);
   }
 
+  @Test
+  public void testRawScanRespectsVersions() throws Exception {
+    byte[] TABLE = Bytes.toBytes("testRawScan");
+    HTable table = TEST_UTIL.createTable(TABLE, new byte[][] { FAMILY });
+    byte[] row = Bytes.toBytes("row");
+
+    // put the same row 4 times, with different values
+    Put p = new Put(row);
+    p.add(FAMILY, QUALIFIER, 10, VALUE);
+    table.put(p);
+    table.flushCommits();
+
+    p = new Put(row);
+    p.add(FAMILY, QUALIFIER, 11, ArrayUtils.add(VALUE, (byte) 2));
+    table.put(p);
+    table.flushCommits();
+
+    p = new Put(row);
+    p.add(FAMILY, QUALIFIER, 12, ArrayUtils.add(VALUE, (byte) 3));
+    table.put(p);
+    table.flushCommits();
+
+    p = new Put(row);
+    p.add(FAMILY, QUALIFIER, 13, ArrayUtils.add(VALUE, (byte) 4));
+    table.put(p);
+    table.flushCommits();
+
+    int versions = 4;
+    Scan s = new Scan(row);
+    // get all the possible versions
+    s.setMaxVersions();
+    s.setRaw(true);
+
+    ResultScanner scanner = table.getScanner(s);
+    int count = 0;
+    for (Result r : scanner) {
+      assertEquals("Found an unexpected number of results for the row!", versions, r.list().size());
+      count++;
+    }
+    assertEquals("Found more than a single row when raw scanning the table with a single row!", 1,
+      count);
+    scanner.close();
+
+    // then if we decrease the number of versions, but keep the scan raw, we should see exactly that
+    // number of versions
+    versions = 2;
+    s.setMaxVersions(versions);
+    scanner = table.getScanner(s);
+    count = 0;
+    for (Result r : scanner) {
+      assertEquals("Found an unexpected number of results for the row!", versions, r.list().size());
+      count++;
+    }
+    assertEquals("Found more than a single row when raw scanning the table with a single row!", 1,
+      count);
+    scanner.close();
+
+    // finally, if we turn off raw scanning, but max out the number of versions, we should go back
+    // to seeing just three
+    versions = 3;
+    s.setMaxVersions(versions);
+    scanner = table.getScanner(s);
+    count = 0;
+    for (Result r : scanner) {
+      assertEquals("Found an unexpected number of results for the row!", versions, r.list().size());
+      count++;
+    }
+    assertEquals("Found more than a single row when raw scanning the table with a single row!", 1,
+      count);
+    scanner.close();
+
+    table.close();
+    TEST_UTIL.deleteTable(TABLE);
+  }
+
   @org.junit.Rule
   public org.apache.hadoop.hbase.ResourceCheckerJUnitRule cu =
     new org.apache.hadoop.hbase.ResourceCheckerJUnitRule();