You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2015/12/14 20:47:59 UTC

[1/4] phoenix git commit: PHOENIX-2332 Fix timing out of MutableIndexFailureIT.testWriteFailureDisablesIndex()

Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 0989a80b4 -> 59a859eb3


PHOENIX-2332 Fix timing out of MutableIndexFailureIT.testWriteFailureDisablesIndex()


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/e3bc665f
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/e3bc665f
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/e3bc665f

Branch: refs/heads/4.x-HBase-0.98
Commit: e3bc665fa513f52957a3ba19252e68c398300117
Parents: 0989a80
Author: James Taylor <jt...@salesforce.com>
Authored: Mon Dec 14 11:45:17 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Dec 14 11:45:17 2015 -0800

----------------------------------------------------------------------
 .../end2end/index/MutableIndexFailureIT.java    | 260 ++++++++++---------
 1 file changed, 131 insertions(+), 129 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/e3bc665f/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
index 6d9d5aa..e92c710 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexFailureIT.java
@@ -61,6 +61,7 @@ import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.BeforeClass;
+import org.junit.Ignore;
 import org.junit.Test;
 import org.junit.experimental.categories.Category;
 import org.junit.runner.RunWith;
@@ -99,7 +100,7 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
         this.fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
         this.fullIndexName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
     }
-    
+
     @BeforeClass
     public static void doSetup() throws Exception {
         Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(10);
@@ -111,7 +112,7 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
         NUM_SLAVES_BASE = 4;
         setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
     }
-    
+
     @Parameters(name = "transactional = {0}")
     public static Collection<Boolean[]> data() {
         return Arrays.asList(new Boolean[][] { { false }, { true } });
@@ -121,12 +122,12 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
     public void testWriteFailureDisablesLocalIndex() throws Exception {
         helpTestWriteFailureDisablesIndex(true);
     }
- 
+
     @Test
     public void testWriteFailureDisablesIndex() throws Exception {
         helpTestWriteFailureDisablesIndex(false);
     }
-    
+
     public void helpTestWriteFailureDisablesIndex(boolean localIndex) throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         try (Connection conn = driver.connect(url, props)) {
@@ -138,21 +139,21 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             query = "SELECT * FROM " + fullTableName;
             rs = conn.createStatement().executeQuery(query);
             assertFalse(rs.next());
-    
+
             if(localIndex) {
                 conn.createStatement().execute(
-                    "CREATE LOCAL INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
+                        "CREATE LOCAL INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
                 conn.createStatement().execute(
-                    "CREATE LOCAL INDEX " + indexName+ "_2" + " ON " + fullTableName + " (v2) INCLUDE (v1)");
+                        "CREATE LOCAL INDEX " + indexName+ "_2" + " ON " + fullTableName + " (v2) INCLUDE (v1)");
             } else {
                 conn.createStatement().execute(
-                    "CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
+                        "CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
             }
-                
+
             query = "SELECT * FROM " + fullIndexName;
             rs = conn.createStatement().executeQuery(query);
             assertFalse(rs.next());
-    
+
             // Verify the metadata for index is correct.
             rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
                     new String[] { PTableType.INDEX.toString() });
@@ -160,24 +161,24 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertEquals(indexName, rs.getString(3));
             assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
             assertFalse(rs.next());
-            
+
             PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?)");
             stmt.setString(1, "a");
             stmt.setString(2, "x");
             stmt.setString(3, "1");
             stmt.execute();
             conn.commit();
-    
+
             TableName indexTable =
                     TableName.valueOf(localIndex ? MetaDataUtil
                             .getLocalIndexTableName(fullTableName) : fullIndexName);
             HBaseAdmin admin = this.getUtility().getHBaseAdmin();
             HTableDescriptor indexTableDesc = admin.getTableDescriptor(indexTable);
             try{
-              admin.disableTable(indexTable);
-              admin.deleteTable(indexTable);
+                admin.disableTable(indexTable);
+                admin.deleteTable(indexTable);
             } catch (TableNotFoundException ignore) {}
-    
+
             stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?)");
             stmt.setString(1, "a2");
             stmt.setString(2, "x2");
@@ -194,7 +195,7 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             else {
                 conn.commit();
             }
-    
+
             // Verify the metadata for index is correct.
             rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
                     new String[] { PTableType.INDEX.toString() });
@@ -206,13 +207,13 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertFalse(rs.next());
             if(localIndex) {
                 rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName + "_2",
-                    new String[] { PTableType.INDEX.toString() });
+                        new String[] { PTableType.INDEX.toString() });
                 assertTrue(rs.next());
                 assertEquals(indexName + "_2", rs.getString(3));
                 assertEquals(indexState.toString(), rs.getString("INDEX_STATE"));
                 assertFalse(rs.next());
             }
-    
+
             // if the table is transactional the write to the index table will fail because the
             // index has not been disabled
             if (!transactional) {
@@ -224,7 +225,7 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
                 stmt.execute();
                 conn.commit();
             }
-            
+
             if (transactional) {
                 // if the table was transactional there should be 1 row (written before the index
                 // was disabled)
@@ -254,27 +255,27 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
                 assertEquals("3", rs.getString(1));
                 assertFalse(rs.next());
             }
-            
+
             // recreate index table
             admin.createTable(indexTableDesc);
             do {
-              Thread.sleep(15 * 1000); // sleep 15 secs
-              rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
-                  new String[] { PTableType.INDEX.toString() });
-              assertTrue(rs.next());
-              if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
-                  break;
-              }
-              if(localIndex) {
-                  rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName + "_2",
-                      new String[] { PTableType.INDEX.toString() });
-                  assertTrue(rs.next());
-                  if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
-                      break;
-                  }
-              }
+                Thread.sleep(15 * 1000); // sleep 15 secs
+                rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
+                        new String[] { PTableType.INDEX.toString() });
+                assertTrue(rs.next());
+                if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
+                    break;
+                }
+                if(localIndex) {
+                    rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName + "_2",
+                            new String[] { PTableType.INDEX.toString() });
+                    assertTrue(rs.next());
+                    if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
+                        break;
+                    }
+                }
             } while(true);
-            
+
             // Verify UPSERT on data table still work after index table is recreated
             stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?)");
             stmt.setString(1, "a4");
@@ -293,105 +294,106 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             // from where we failed and the oldest
             // index row has been deleted when we dropped the index table during test
             assertEquals(transactional ? 1 : 3, rs.getInt(1));
-            }
         }
-        
-        @Test
-        public void testWriteFailureWithRegionServerDown() throws Exception {
-            String query;
-            ResultSet rs;
-    
-            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-            try (Connection conn = driver.connect(url, props);) {
-                conn.setAutoCommit(false);
-                conn.createStatement().execute(
-                        "CREATE TABLE " + fullTableName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) "+tableDDLOptions);
-                query = "SELECT * FROM " + fullTableName;
-                rs = conn.createStatement().executeQuery(query);
-                assertFalse(rs.next());
-        
-                conn.createStatement().execute(
-                        "CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
-                query = "SELECT * FROM " + fullIndexName;
-                rs = conn.createStatement().executeQuery(query);
-                assertFalse(rs.next());
-        
-                // Verify the metadata for index is correct.
+    }
+
+    @Ignore("See PHOENIX-2332")
+    @Test
+    public void testWriteFailureWithRegionServerDown() throws Exception {
+        String query;
+        ResultSet rs;
+
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        try (Connection conn = driver.connect(url, props);) {
+            conn.setAutoCommit(false);
+            conn.createStatement().execute(
+                    "CREATE TABLE " + fullTableName + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) "+tableDDLOptions);
+            query = "SELECT * FROM " + fullTableName;
+            rs = conn.createStatement().executeQuery(query);
+            assertFalse(rs.next());
+
+            conn.createStatement().execute(
+                    "CREATE INDEX " + indexName + " ON " + fullTableName + " (v1) INCLUDE (v2)");
+            query = "SELECT * FROM " + fullIndexName;
+            rs = conn.createStatement().executeQuery(query);
+            assertFalse(rs.next());
+
+            // Verify the metadata for index is correct.
+            rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
+                    new String[] { PTableType.INDEX.toString() });
+            assertTrue(rs.next());
+            assertEquals(indexName, rs.getString(3));
+            assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
+            assertFalse(rs.next());
+
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?)");
+            stmt.setString(1, "a");
+            stmt.setString(2, "x");
+            stmt.setString(3, "1");
+            stmt.execute();
+            conn.commit();
+
+            // find a RS which doesn't has CATALOG table
+            TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
+            TableName indexTable = TableName.valueOf(fullIndexName);
+            final HBaseCluster cluster = this.getUtility().getHBaseCluster();
+            Collection<ServerName> rss = cluster.getClusterStatus().getServers();
+            HBaseAdmin admin = this.getUtility().getHBaseAdmin();
+            List<HRegionInfo> regions = admin.getTableRegions(catalogTable);
+            ServerName catalogRS = cluster.getServerHoldingRegion(regions.get(0).getRegionName());
+            ServerName metaRS = cluster.getServerHoldingMeta();
+            ServerName rsToBeKilled = null;
+
+            // find first RS isn't holding META or CATALOG table
+            for(ServerName curRS : rss) {
+                if(!curRS.equals(catalogRS) && !metaRS.equals(curRS)) {
+                    rsToBeKilled = curRS;
+                    break;
+                }
+            }
+            assertTrue(rsToBeKilled != null);
+
+            regions = admin.getTableRegions(indexTable);
+            final HRegionInfo indexRegion = regions.get(0);
+            final ServerName dstRS = rsToBeKilled;
+            admin.move(indexRegion.getEncodedNameAsBytes(), Bytes.toBytes(rsToBeKilled.getServerName()));
+            this.getUtility().waitFor(30000, 200, new Waiter.Predicate<Exception>() {
+                @Override
+                public boolean evaluate() throws Exception {
+                    ServerName sn = cluster.getServerHoldingRegion(indexRegion.getRegionName());
+                    return (sn != null && sn.equals(dstRS));
+                }
+            });
+
+            // use timer sending updates in every 10ms
+            this.scheduleTimer = new Timer(true);
+            this.scheduleTimer.schedule(new SendingUpdatesScheduleTask(conn, fullTableName), 0, 10);
+            // let timer sending some updates
+            Thread.sleep(100);
+
+            // kill RS hosting index table
+            this.getUtility().getHBaseCluster().killRegionServer(rsToBeKilled);
+
+            // wait for index table completes recovery
+            this.getUtility().waitUntilAllRegionsAssigned(indexTable);
+
+            // Verify the metadata for index is correct.       
+            do {
+                Thread.sleep(15 * 1000); // sleep 15 secs
                 rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
                         new String[] { PTableType.INDEX.toString() });
                 assertTrue(rs.next());
-                assertEquals(indexName, rs.getString(3));
-                assertEquals(PIndexState.ACTIVE.toString(), rs.getString("INDEX_STATE"));
-                assertFalse(rs.next());
-                
-                PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + fullTableName + " VALUES(?,?,?)");
-                stmt.setString(1, "a");
-                stmt.setString(2, "x");
-                stmt.setString(3, "1");
-                stmt.execute();
-                conn.commit();
-                
-                // find a RS which doesn't has CATALOG table
-                TableName catalogTable = TableName.valueOf("SYSTEM.CATALOG");
-                TableName indexTable = TableName.valueOf(fullIndexName);
-                final HBaseCluster cluster = this.getUtility().getHBaseCluster();
-                Collection<ServerName> rss = cluster.getClusterStatus().getServers();
-                HBaseAdmin admin = this.getUtility().getHBaseAdmin();
-                List<HRegionInfo> regions = admin.getTableRegions(catalogTable);
-                ServerName catalogRS = cluster.getServerHoldingRegion(regions.get(0).getRegionName());
-                ServerName metaRS = cluster.getServerHoldingMeta();
-                ServerName rsToBeKilled = null;
-                
-                // find first RS isn't holding META or CATALOG table
-                for(ServerName curRS : rss) {
-                    if(!curRS.equals(catalogRS) && !metaRS.equals(curRS)) {
-                        rsToBeKilled = curRS;
-                        break;
-                    }
+                if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
+                    break;
                 }
-                assertTrue(rsToBeKilled != null);
-                
-                regions = admin.getTableRegions(indexTable);
-                final HRegionInfo indexRegion = regions.get(0);
-                final ServerName dstRS = rsToBeKilled;
-                admin.move(indexRegion.getEncodedNameAsBytes(), Bytes.toBytes(rsToBeKilled.getServerName()));
-                this.getUtility().waitFor(30000, 200, new Waiter.Predicate<Exception>() {
-                    @Override
-                    public boolean evaluate() throws Exception {
-                      ServerName sn = cluster.getServerHoldingRegion(indexRegion.getRegionName());
-                      return (sn != null && sn.equals(dstRS));
-                    }
-                  });
-                
-                // use timer sending updates in every 10ms
-                this.scheduleTimer = new Timer(true);
-                this.scheduleTimer.schedule(new SendingUpdatesScheduleTask(conn, fullTableName), 0, 10);
-                // let timer sending some updates
-                Thread.sleep(100);
-                
-                // kill RS hosting index table
-                this.getUtility().getHBaseCluster().killRegionServer(rsToBeKilled);
-                
-                // wait for index table completes recovery
-                this.getUtility().waitUntilAllRegionsAssigned(indexTable);
-                
-                // Verify the metadata for index is correct.       
-                do {
-                  Thread.sleep(15 * 1000); // sleep 15 secs
-                  rs = conn.getMetaData().getTables(null, StringUtil.escapeLike(TestUtil.DEFAULT_SCHEMA_NAME), indexName,
-                      new String[] { PTableType.INDEX.toString() });
-                  assertTrue(rs.next());
-                  if(PIndexState.ACTIVE.toString().equals(rs.getString("INDEX_STATE"))){
-                      break;
-                  }
-                } while(true);
-                this.scheduleTimer.cancel();
-            }
+            } while(true);
+            this.scheduleTimer.cancel();
+        }
     }
-    
+
     static class SendingUpdatesScheduleTask extends TimerTask {
         private static final Log LOG = LogFactory.getLog(SendingUpdatesScheduleTask.class);
-        
+
         // inProgress is to prevent timer from invoking a new task while previous one is still
         // running
         private final static AtomicInteger inProgress = new AtomicInteger(0);
@@ -408,7 +410,7 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             if(inProgress.get() > 0){
                 return;
             }
-            
+
             try {
                 inProgress.incrementAndGet();
                 inserts++;
@@ -425,5 +427,5 @@ public class MutableIndexFailureIT extends BaseOwnClusterHBaseManagedTimeIT {
             }
         }
     }
-    
+
 }


[3/4] phoenix git commit: PHOENIX-2455 Partial results for a query when PHOENIX-2194 is applied

Posted by ja...@apache.org.
PHOENIX-2455 Partial results for a query when PHOENIX-2194 is applied


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9f82aaa1
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9f82aaa1
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9f82aaa1

Branch: refs/heads/4.x-HBase-0.98
Commit: 9f82aaa1399200de560958ff9be85bc5c43b3132
Parents: 0790f56
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Dec 13 14:32:29 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Dec 14 11:46:17 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/SkipScanQueryIT.java | 31 +++++++++
 .../apache/phoenix/filter/SkipScanFilter.java   |  3 +-
 .../java/org/apache/phoenix/query/KeyRange.java | 48 +++++++++-----
 .../java/org/apache/phoenix/util/ScanUtil.java  |  2 +-
 .../phoenix/filter/SkipScanFilterTest.java      | 66 +++++++++++++-------
 5 files changed, 108 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9f82aaa1/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
index 1937f65..2ade0a4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipScanQueryIT.java
@@ -110,6 +110,37 @@ public class SkipScanQueryIT extends BaseHBaseManagedTimeIT {
     }
     
     @Test
+    public void testSkipScanFilterQuery() throws Exception {
+        String createTableDDL = "CREATE TABLE test" + "(col1 VARCHAR," + "col2 VARCHAR," + "col3 VARCHAR,"
+             + "col4 VARCHAR," + "CONSTRAINT pk  " + "PRIMARY KEY (col1,col2,col3,col4))";
+        String upsertQuery = "upsert into test values(?,?,?,?)";
+        String query = "SELECT col1, col2, col3, col4 FROM test WHERE col1 IN ('a','e','f') AND col2 = 'b' AND col4 = '1' ";
+        String[] col1Values = { "a", "e.f", "f" };
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        createTestTable(getUrl(), createTableDDL);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(true);
+        try {
+            PreparedStatement statement = conn.prepareStatement(upsertQuery);
+            for (String col1Value : col1Values) {
+                statement.setString(1, col1Value);
+                statement.setString(2, "b");
+                statement.setString(3, "");
+                statement.setString(4, "1");
+                statement.execute();
+            }
+            ResultSet rs = conn.createStatement().executeQuery(query);
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "a");
+            assertTrue(rs.next());
+            assertEquals(rs.getString(1), "f");
+            assertFalse(rs.next());
+        } finally {
+            conn.close();
+        }
+    }
+
+    @Test
     public void testSelectAfterUpsertInQuery() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
         initSelectAfterUpsertTable(conn);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9f82aaa1/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index d1c8532..667b3d7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -580,8 +580,7 @@ public class SkipScanFilter extends FilterBase implements Writable {
             List<KeyRange> orClause = Lists.newArrayListWithExpectedSize(orLen);
             slots.add(orClause);
             for (int j=0; j<orLen; j++) {
-                KeyRange range = new KeyRange();
-                range.readFields(in);
+                KeyRange range = KeyRange.read(in);
                 orClause.add(range);
             }
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9f82aaa1/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
index 6618aa5..f4bf793 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/KeyRange.java
@@ -119,15 +119,30 @@ public class KeyRange implements Writable {
         return getKeyRange(lowerRange, true, upperRange, false);
     }
 
-    // TODO: make non public and move to org.apache.phoenix.type soon
-    public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive,
+    private static KeyRange getSingleton(byte[] lowerRange, boolean lowerInclusive,
             byte[] upperRange, boolean upperInclusive) {
         if (lowerRange == null || upperRange == null) {
             return EMPTY_RANGE;
         }
-        // Need to treat null differently for a point range
-        if (lowerRange.length == 0 && upperRange.length == 0 && lowerInclusive && upperInclusive) {
-            return IS_NULL_RANGE;
+        if (lowerRange.length == 0 && upperRange.length == 0) {
+            // Need singleton to represent NULL range so it gets treated differently
+            // than an unbound RANGE.
+            return lowerInclusive && upperInclusive ? IS_NULL_RANGE : EVERYTHING_RANGE;
+        }
+        if (lowerRange.length != 0 && upperRange.length != 0) {
+            int cmp = Bytes.compareTo(lowerRange, upperRange);
+            if (cmp > 0 || (cmp == 0 && !(lowerInclusive && upperInclusive))) {
+                return EMPTY_RANGE;
+            }
+        }
+        return null;
+    }
+    
+    public static KeyRange getKeyRange(byte[] lowerRange, boolean lowerInclusive,
+            byte[] upperRange, boolean upperInclusive) {
+        KeyRange range = getSingleton(lowerRange, lowerInclusive, upperRange, upperInclusive);
+        if (range != null) {
+            return range;
         }
         boolean unboundLower = false;
         boolean unboundUpper = false;
@@ -142,20 +157,23 @@ public class KeyRange implements Writable {
             unboundUpper = true;
         }
 
-        if (unboundLower && unboundUpper) {
-            return EVERYTHING_RANGE;
-        }
-        if (!unboundLower && !unboundUpper) {
-            int cmp = Bytes.compareTo(lowerRange, upperRange);
-            if (cmp > 0 || (cmp == 0 && !(lowerInclusive && upperInclusive))) {
-                return EMPTY_RANGE;
-            }
-        }
         return new KeyRange(lowerRange, unboundLower ? false : lowerInclusive,
                 upperRange, unboundUpper ? false : upperInclusive);
     }
 
-    public KeyRange() {
+    public static KeyRange read(DataInput input) throws IOException {
+        KeyRange range = new KeyRange();
+        range.readFields(input);
+        // Translate to singleton after reading
+        KeyRange singletonRange = getSingleton(range.lowerRange, range.lowerInclusive, range.upperRange, range.upperInclusive);
+        if (singletonRange != null) {
+            return singletonRange;
+        }
+        // Otherwise, just keep the range we read
+        return range;
+    }
+    
+    private KeyRange() {
         this.lowerRange = DEGENERATE_KEY;
         this.lowerInclusive = false;
         this.upperRange = DEGENERATE_KEY;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9f82aaa1/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 912dd03..b6a2c85 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -375,7 +375,7 @@ public class ScanUtil {
              * incrementing the key value itself, and thus bumping it up too much.
              */
             boolean inclusiveUpper = range.isUpperInclusive() && bound == Bound.UPPER;
-            boolean exclusiveLower = !range.isLowerInclusive() && bound == Bound.LOWER;
+            boolean exclusiveLower = !range.isLowerInclusive() && bound == Bound.LOWER && range != KeyRange.EVERYTHING_RANGE;
             boolean exclusiveUpper = !range.isUpperInclusive() && bound == Bound.UPPER;
             // If we are setting the upper bound of using inclusive single key, we remember 
             // to increment the key if we exit the loop after this iteration.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9f82aaa1/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
index 7e68e25..898f778 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/filter/SkipScanFilterTest.java
@@ -23,19 +23,17 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 
-import junit.framework.TestCase;
-
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.filter.Filter.ReturnCode;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.schema.types.PChar;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.PDatum;
-import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PChar;
+import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.util.ByteUtil;
 import org.junit.Test;
 import org.junit.runner.RunWith;
@@ -45,6 +43,8 @@ import org.junit.runners.Parameterized.Parameters;
 import com.google.common.base.Function;
 import com.google.common.collect.Lists;
 
+import junit.framework.TestCase;
+
 //reset()
 //filterAllRemaining() -> true indicates scan is over, false, keep going on.
 //filterRowKey(byte[],int,int) -> true to drop this row, if false, we will also call
@@ -97,7 +97,6 @@ public class SkipScanFilterTest extends TestCase {
 
     @Test
     public void test() throws IOException {
-        System.out.println("CNF: " + cnf + "\n" + "Expectations: " + expectations);
         for (Expectation expectation : expectations) {
             expectation.examine(skipper);
         }
@@ -106,6 +105,39 @@ public class SkipScanFilterTest extends TestCase {
     @Parameters(name="{0} {1} {2}")
     public static Collection<Object> data() {
         List<Object> testCases = Lists.newArrayList();
+        // Variable length tests
+        testCases.addAll(
+                foreach(new KeyRange[][]{{
+                    PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("a"), true, Bytes.toBytes("a"), true),
+                    PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("e"), true, Bytes.toBytes("e"), true),
+                    PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("f"), true, Bytes.toBytes("f"), true),
+                },
+                {
+                    PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("b"), true, Bytes.toBytes("b"), true),
+                },
+                {
+                    KeyRange.EVERYTHING_RANGE,
+                },
+                {
+                    PVarchar.INSTANCE.getKeyRange(Bytes.toBytes("1"), true, Bytes.toBytes("1"), true),
+                }},
+                new int[4],
+                new Include(ByteUtil.concat(Bytes.toBytes("a"),QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                                            Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                            QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                            Bytes.toBytes("1") ) ),
+                new SeekNext(ByteUtil.concat(Bytes.toBytes("e.f"),QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                                             Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                             QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                             Bytes.toBytes("1") ), 
+                            ByteUtil.concat(Bytes.toBytes("f"),QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                                            Bytes.toBytes("b") )),
+                new Include(ByteUtil.concat(Bytes.toBytes("f"),QueryConstants.SEPARATOR_BYTE_ARRAY, 
+                                            Bytes.toBytes("b"), QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                            QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                            Bytes.toBytes("1") ) ) )
+        );
+        // Fixed length tests
         testCases.addAll(
                 foreach(new KeyRange[][]{{
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("abc"), true, Bytes.toBytes("def"), true),
@@ -124,7 +156,6 @@ public class SkipScanFilterTest extends TestCase {
                     PChar.INSTANCE.getKeyRange(Bytes.toBytes("AA"), true, Bytes.toBytes("AB"), false),
                 }},
                 new int[]{3,2,2,2,2},
-                //new SeekNext("abcABABABAB", "abdAAAAAAAA"),
                 new SeekNext("defAAABABAB", "dzzAAAAAAAA"),
                 new Finished("xyyABABABAB"))
         );
@@ -309,23 +340,6 @@ public class SkipScanFilterTest extends TestCase {
                 new SeekNext("dzzAB250", "dzzAB701"),
                 new Finished("zzzAA000"))
         );
-// TODO variable length columns
-//        testCases.addAll(
-//                foreach(new KeyRange[][]{{
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("apple"), true, Bytes.toBytes("lemon"), true),
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("pear"), false, Bytes.toBytes("yam"), false),
-//                },
-//                {
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("AB"), true, Bytes.toBytes("AX"), true),
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("EA"), false, Bytes.toBytes("EZ"), false),
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("PO"), true, Bytes.toBytes("PP"), false),
-//                },
-//                {
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("100"), true, Bytes.toBytes("250"), false),
-//                    Char.INSTANCE.getKeyRange(Bytes.toBytes("700"), false, Bytes.toBytes("901"), false),
-//                }},
-//                new int[]{3,3})
-//        );
         return testCases;
     }
 
@@ -378,6 +392,10 @@ public class SkipScanFilterTest extends TestCase {
             this.rowkey = Bytes.toBytes(rowkey);
         }
         
+        public Include(byte[] rowkey) {
+            this.rowkey = rowkey;
+        }
+        
         @SuppressWarnings("deprecation")
         @Override public void examine(SkipScanFilter skipper) throws IOException {
             KeyValue kv = KeyValue.createFirstOnRow(rowkey);


[4/4] phoenix git commit: PHOENIX-2505 Unexpected error caused by GROUP BY

Posted by ja...@apache.org.
PHOENIX-2505 Unexpected error caused by GROUP BY


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/59a859eb
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/59a859eb
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/59a859eb

Branch: refs/heads/4.x-HBase-0.98
Commit: 59a859eb3d9e077f3d58a0c83231f5668efd5810
Parents: 9f82aaa
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Dec 13 17:12:20 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Dec 14 11:46:39 2015 -0800

----------------------------------------------------------------------
 .../apache/phoenix/end2end/GroupByCaseIT.java   | 49 ++++++++++++++++++++
 1 file changed, 49 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/59a859eb/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
index 64444da..0f1568c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GroupByCaseIT.java
@@ -121,6 +121,55 @@ public class GroupByCaseIT extends BaseHBaseManagedTimeIT {
     }
     
     @Test
+    public void testBooleanInGroupBy() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        String ddl = " create table bool_gb(id varchar primary key,v1 boolean, v2 integer, v3 integer)";
+
+        createTestTable(getUrl(), ddl);
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO bool_gb(id,v2,v3) VALUES(?,?,?)");
+        stmt.setString(1, "a");
+        stmt.setInt(2, 1);
+        stmt.setInt(3, 1);
+        stmt.execute();
+        stmt.close();
+        stmt = conn.prepareStatement("UPSERT INTO bool_gb VALUES(?,?,?,?)");
+        stmt.setString(1, "b");
+        stmt.setBoolean(2, false);
+        stmt.setInt(3, 2);
+        stmt.setInt(4, 2);
+        stmt.execute();
+        stmt.setString(1, "c");
+        stmt.setBoolean(2, true);
+        stmt.setInt(3, 3);
+        stmt.setInt(4, 3);
+        stmt.execute();
+        conn.commit();
+
+        String[] gbs = {"v1,v2,v3","v1,v3,v2","v2,v1,v3"};
+        for (String gb : gbs) {
+            ResultSet rs = conn.createStatement().executeQuery("SELECT v1, v2, v3 from bool_gb group by " + gb);
+            assertTrue(rs.next());
+            assertEquals(false,rs.getBoolean("v1"));
+            assertTrue(rs.wasNull());
+            assertEquals(1,rs.getInt("v2"));
+            assertEquals(1,rs.getInt("v3"));
+            assertTrue(rs.next());
+            assertEquals(false,rs.getBoolean("v1"));
+            assertFalse(rs.wasNull());
+            assertEquals(2,rs.getInt("v2"));
+            assertEquals(2,rs.getInt("v3"));
+            assertTrue(rs.next());
+            assertEquals(true,rs.getBoolean("v1"));
+            assertEquals(3,rs.getInt("v2"));
+            assertEquals(3,rs.getInt("v3"));
+            assertFalse(rs.next());
+            rs.close();
+        }
+        conn.close();
+    }
+    
+    @Test
     public void testScanUri() throws Exception {
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(getUrl(), props);


[2/4] phoenix git commit: PHOENIX-2519 Prevent RPC for SYSTEM tables when querying

Posted by ja...@apache.org.
PHOENIX-2519 Prevent RPC for SYSTEM tables when querying


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/0790f563
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/0790f563
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/0790f563

Branch: refs/heads/4.x-HBase-0.98
Commit: 0790f5636fb3fa76cc7d2891b3cb036991ec29d3
Parents: e3bc665
Author: James Taylor <jt...@salesforce.com>
Authored: Sun Dec 13 14:31:00 2015 -0800
Committer: James Taylor <jt...@salesforce.com>
Committed: Mon Dec 14 11:45:51 2015 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/rpc/UpdateCacheIT.java   | 46 +++++++++++++++-----
 .../phoenix/rpc/UpdateCacheWithScnIT.java       |  2 +-
 .../apache/phoenix/execute/BaseQueryPlan.java   | 12 ++++-
 3 files changed, 47 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/0790f563/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
index c34019d..0e6d7af 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheIT.java
@@ -33,6 +33,7 @@ import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
 import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
@@ -81,26 +82,49 @@ public class UpdateCacheIT extends BaseHBaseManagedTimeIT {
         ensureTableCreated(getUrl(), TRANSACTIONAL_DATA_TABLE);
     }
 
+    private static void setupSystemTable(Long scn) throws SQLException {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        if (scn != null) {
+            props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn));
+        }
+        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+            conn.createStatement().execute(
+            "create table " + QueryConstants.SYSTEM_SCHEMA_NAME + QueryConstants.NAME_SEPARATOR + MUTABLE_INDEX_DATA_TABLE + TEST_TABLE_SCHEMA);
+        }
+    }
+    
     @Test
     public void testUpdateCacheForTxnTable() throws Exception {
-        helpTestUpdateCache(true, null);
+        helpTestUpdateCache(true, false, null);
     }
     
     @Test
     public void testUpdateCacheForNonTxnTable() throws Exception {
-        helpTestUpdateCache(false, null);
+        helpTestUpdateCache(false, false, null);
     }
 	
-	public static void helpTestUpdateCache(boolean isTransactional, Long scn) throws Exception {
+    @Test
+    public void testUpdateCacheForNonTxnSystemTable() throws Exception {
+        helpTestUpdateCache(false, true, null);
+    }
+    
+	public static void helpTestUpdateCache(boolean isTransactional, boolean isSystem, Long scn) throws Exception {
 	    String tableName = isTransactional ? TRANSACTIONAL_DATA_TABLE : MUTABLE_INDEX_DATA_TABLE;
-	    String fullTableName = INDEX_DATA_SCHEMA + QueryConstants.NAME_SEPARATOR + tableName;
+	    String schemaName;
+	    if (isSystem) {
+	        setupSystemTable(scn);
+	        schemaName = QueryConstants.SYSTEM_SCHEMA_NAME;
+	    } else {
+	        schemaName = INDEX_DATA_SCHEMA;
+	    }
+	    String fullTableName = schemaName + QueryConstants.NAME_SEPARATOR + tableName;
 		String selectSql = "SELECT * FROM "+fullTableName;
 		// use a spyed ConnectionQueryServices so we can verify calls to getTable
 		ConnectionQueryServices connectionQueryServices = Mockito.spy(driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)));
 		Properties props = new Properties();
 		props.putAll(PhoenixEmbeddedDriver.DEFFAULT_PROPS.asMap());
 		if (scn!=null) {
-            props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn));
+            props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn+10));
         }
 		Connection conn = connectionQueryServices.connect(getUrl(), props);
 		try {
@@ -115,13 +139,14 @@ public class UpdateCacheIT extends BaseHBaseManagedTimeIT {
 			TestUtil.setRowKeyColumns(stmt, 3);
 			stmt.execute();
 			conn.commit();
-			// verify only one rpc to fetch table metadata, 
-            verify(connectionQueryServices).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(INDEX_DATA_SCHEMA)), eq(PVarchar.INSTANCE.toBytes(tableName)), anyLong(), anyLong());
+            int numUpsertRpcs = isSystem ? 0 : 1; 
+			// verify only 0 or 1 rpc to fetch table metadata, 
+            verify(connectionQueryServices, times(numUpsertRpcs)).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(schemaName)), eq(PVarchar.INSTANCE.toBytes(tableName)), anyLong(), anyLong());
             reset(connectionQueryServices);
             
             if (scn!=null) {
                 // advance scn so that we can see the data we just upserted
-                props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn+2));
+                props.put(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(scn+20));
                 conn = connectionQueryServices.connect(getUrl(), props);
             }
 			
@@ -146,8 +171,9 @@ public class UpdateCacheIT extends BaseHBaseManagedTimeIT {
 	        // for non-transactional tables without a scn : verify one rpc to getTable occurs *per* query
             // for non-transactional tables with a scn : verify *only* one rpc occurs
             // for transactional tables : verify *only* one rpc occurs
-            int numRpcs = isTransactional || scn!=null ? 1 : 3; 
-            verify(connectionQueryServices, times(numRpcs)).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(INDEX_DATA_SCHEMA)), eq(PVarchar.INSTANCE.toBytes(tableName)), anyLong(), anyLong());
+	        // for non-transactional, system tables : verify non rpc occurs
+            int numRpcs = isSystem ? 0 : (isTransactional || scn!=null ? 1 : 3); 
+            verify(connectionQueryServices, times(numRpcs)).getTable((PName)isNull(), eq(PVarchar.INSTANCE.toBytes(schemaName)), eq(PVarchar.INSTANCE.toBytes(tableName)), anyLong(), anyLong());
 		}
         finally {
         	conn.close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0790f563/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheWithScnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheWithScnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheWithScnIT.java
index dbc7fd1..5ff2fb0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheWithScnIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/rpc/UpdateCacheWithScnIT.java
@@ -35,7 +35,7 @@ public class UpdateCacheWithScnIT extends BaseClientManagedTimeIT {
 	
 	@Test
 	public void testUpdateCacheWithScn() throws Exception {
-		UpdateCacheIT.helpTestUpdateCache(false, ts+2);
+		UpdateCacheIT.helpTestUpdateCache(false, false, ts+2);
 	}
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/0790f563/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 5ae25ed..3071620 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -28,6 +28,7 @@ import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.TimeRange;
@@ -57,6 +58,7 @@ import org.apache.phoenix.parse.FilterableStatement;
 import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.TableName;
+import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.KeyValueSchema;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
@@ -203,7 +205,8 @@ public abstract class BaseQueryPlan implements QueryPlan {
         // Set miscellaneous scan attributes. This is the last chance to set them before we
         // clone the scan for each parallelized chunk.
         Scan scan = context.getScan();
-        PTable table = context.getCurrentTable().getTable();
+        TableRef tableRef = context.getCurrentTable();
+        PTable table = tableRef.getTable();
         
         if (dynamicFilter != null) {
             WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);            
@@ -225,7 +228,12 @@ public abstract class BaseQueryPlan implements QueryPlan {
 	        TimeRange scanTimeRange = scan.getTimeRange();
 	        Long scn = connection.getSCN();
 	        if (scn == null) {
-	            scn = context.getCurrentTime();
+	            // If we haven't resolved the time at the beginning of compilation, don't
+	            // force the lookup on the server, but use HConstants.LATEST_TIMESTAMP instead.
+	            scn = tableRef.getTimeStamp();
+	            if (scn == QueryConstants.UNSET_TIMESTAMP) {
+	                scn = HConstants.LATEST_TIMESTAMP;
+	            }
 	        }
 	        try {
 	            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);