You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ra...@apache.org on 2014/10/01 08:54:48 UTC

git commit: Phoenix-1263 Only cache guideposts on physical PTable

Repository: phoenix
Updated Branches:
  refs/heads/3.0 1d22fdc98 -> ff47a9594


Phoenix-1263 Only cache guideposts on physical PTable


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/ff47a959
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/ff47a959
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/ff47a959

Branch: refs/heads/3.0
Commit: ff47a9594ea82f552f13d59bb58ed0158ca15d9f
Parents: 1d22fdc
Author: Ramkrishna <ra...@intel.com>
Authored: Wed Oct 1 12:24:10 2014 +0530
Committer: Ramkrishna <ra...@intel.com>
Committed: Wed Oct 1 12:24:10 2014 +0530

----------------------------------------------------------------------
 ...efaultParallelIteratorsRegionSplitterIT.java |  2 +-
 .../phoenix/end2end/GuidePostsLifeCycleIT.java  |  4 +-
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |  2 +-
 .../phoenix/end2end/MultiCfQueryExecIT.java     |  2 +-
 ...ipRangeParallelIteratorRegionSplitterIT.java |  2 +-
 .../end2end/TenantSpecificTablesDMLIT.java      | 49 +++++++++++++++++++-
 .../DefaultParallelIteratorRegionSplitter.java  | 12 ++---
 .../ParallelIteratorRegionSplitterFactory.java  |  4 +-
 .../phoenix/iterate/ParallelIterators.java      | 25 +++++++++-
 ...SkipRangeParallelIteratorRegionSplitter.java | 10 ++--
 .../apache/phoenix/schema/MetaDataClient.java   | 25 +++++++---
 11 files changed, 107 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
index a6ec835..e7a1044 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
@@ -79,7 +79,7 @@ public class DefaultParallelIteratorsRegionSplitterIT extends BaseParallelIterat
         final List<HRegionLocation> regions =  pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
         PhoenixStatement statement = new PhoenixStatement(pconn);
         StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
-        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef, HintNode.EMPTY_HINT_NODE) {
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef.getTable(), HintNode.EMPTY_HINT_NODE) {
             @Override
             protected List<HRegionLocation> getAllRegions() throws SQLException {
                 return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(), scan.getStopRow());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
index 3cef492..ba9f961 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
@@ -89,7 +89,7 @@ public class GuidePostsLifeCycleIT extends BaseHBaseManagedTimeIT {
                 tableRef.getTable().getPhysicalName().getBytes());
         PhoenixStatement statement = new PhoenixStatement(pconn);
         StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
-        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef.getTable(),
                 HintNode.EMPTY_HINT_NODE) {
             @Override
             protected List<HRegionLocation> getAllRegions() throws SQLException {
@@ -158,7 +158,7 @@ public class GuidePostsLifeCycleIT extends BaseHBaseManagedTimeIT {
         conn.commit();
         conn.close();
     }
-    
+
     protected static TableRef getTableRef(Connection conn) throws SQLException {
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
         TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index f713fff..ed081d9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -200,7 +200,7 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
                 tableRef.getTable().getPhysicalName().getBytes());
         PhoenixStatement statement = new PhoenixStatement(pconn);
         StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
-        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef.getTable(),
                 HintNode.EMPTY_HINT_NODE) {
             @Override
             protected List<HRegionLocation> getAllRegions() throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index f01d985..fbd1cf6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -298,7 +298,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
                 tableRef.getTable().getPhysicalName().getBytes());
         PhoenixStatement statement = new PhoenixStatement(pconn);
         StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
-        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef.getTable(),
                 HintNode.EMPTY_HINT_NODE) {
             @Override
             protected List<HRegionLocation> getAllRegions() throws SQLException {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
index 31e3a3b..28bc011 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
@@ -348,7 +348,7 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
         PhoenixStatement statement = new PhoenixStatement(connection);
         StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
         context.setScanRanges(scanRanges);
-        SkipRangeParallelIteratorRegionSplitter splitter = SkipRangeParallelIteratorRegionSplitter.getInstance(context, tableRef, HintNode.EMPTY_HINT_NODE);
+        SkipRangeParallelIteratorRegionSplitter splitter = SkipRangeParallelIteratorRegionSplitter.getInstance(context, tableRef.getTable(), HintNode.EMPTY_HINT_NODE);
         List<KeyRange> keyRanges = splitter.getSplits();
         Collections.sort(keyRanges, new Comparator<KeyRange>() {
             @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
index 593a0e4..8f9e553 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
@@ -28,9 +28,24 @@ import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.SequenceManager;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.DefaultParallelIteratorRegionSplitter;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -138,6 +153,9 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             assertTrue("Expected 1 row in result set", rs.next());
             assertEquals(2, rs.getInt(3));
             assertEquals("Viva Las Vegas", rs.getString(4));
+            conn1 = nextConnection(getUrl());
+            List<KeyRange> splits = getSplits(conn1, new Scan());
+            assertEquals(splits.size(), 5);
         }
         finally {
             conn1.close();
@@ -266,7 +284,6 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 2, 'Billy Gibbons')");
             conn.close();
-            
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             conn.setAutoCommit(true);
             int count = conn.createStatement().executeUpdate("delete from " + TENANT_TABLE_NAME_NO_TENANT_TYPE_ID);
@@ -491,4 +508,34 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
         assertFalse(rs.next());
         conn.close();
     }
+    private static List<KeyRange> getSplits(Connection conn, final Scan scan) throws SQLException {
+        TableRef tableRef = getTableRef(conn);
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        final List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(
+                tableRef.getTable().getPhysicalName().getBytes());
+        PhoenixStatement statement = new PhoenixStatement(pconn);
+        StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef.getTable(),
+                HintNode.EMPTY_HINT_NODE) {
+            @Override
+            protected List<HRegionLocation> getAllRegions() throws SQLException {
+                return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(),
+                        scan.getStopRow());
+            }
+        };
+        List<KeyRange> keyRanges = splitter.getSplits();
+        Collections.sort(keyRanges, new Comparator<KeyRange>() {
+            @Override
+            public int compare(KeyRange o1, KeyRange o2) {
+                return Bytes.compareTo(o1.getLowerRange(), o2.getLowerRange());
+            }
+        });
+        return keyRanges;
+    }
+    protected static TableRef getTableRef(Connection conn) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(
+                new PTableKey(pconn.getTenantId(), PARENT_TABLE_NAME)), System.currentTimeMillis(), false);
+        return table;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
index bff2a8b..b8135d1 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/DefaultParallelIteratorRegionSplitter.java
@@ -31,7 +31,6 @@ import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -54,16 +53,16 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
 
     protected final long guidePostsDepth;
     protected final StatementContext context;
-    protected final TableRef tableRef;
+    protected final PTable table;
 
     private static final Logger logger = LoggerFactory.getLogger(DefaultParallelIteratorRegionSplitter.class);
-    public static DefaultParallelIteratorRegionSplitter getInstance(StatementContext context, TableRef table, HintNode hintNode) {
+    public static DefaultParallelIteratorRegionSplitter getInstance(StatementContext context, PTable table, HintNode hintNode) {
         return new DefaultParallelIteratorRegionSplitter(context, table, hintNode);
     }
 
-    protected DefaultParallelIteratorRegionSplitter(StatementContext context, TableRef table, HintNode hintNode) {
+    protected DefaultParallelIteratorRegionSplitter(StatementContext context, PTable table, HintNode hintNode) {
         this.context = context;
-        this.tableRef = table;
+        this.table = table;
         ReadOnlyProps props = context.getConnection().getQueryServices().getProps();
         this.guidePostsDepth = props.getLong(QueryServices.HISTOGRAM_BYTE_DEPTH_ATTRIB,
                 QueryServicesOptions.DEFAULT_HISTOGRAM_BYTE_DEPTH);
@@ -73,7 +72,7 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
     protected List<HRegionLocation> getAllRegions() throws SQLException {
         Scan scan = context.getScan();
         List<HRegionLocation> allTableRegions = context.getConnection().getQueryServices()
-                .getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
+                .getAllTableRegions(table.getPhysicalName().getBytes());
         // If we're not salting, then we've already intersected the minMaxRange with the scan range
         // so there's nothing to do here.
         return filterRegions(allTableRegions, scan.getStartRow(), scan.getStopRow());
@@ -107,7 +106,6 @@ public class DefaultParallelIteratorRegionSplitter implements ParallelIteratorRe
 
     protected List<KeyRange> genKeyRanges(List<HRegionLocation> regions) {
         if (regions.isEmpty()) { return Collections.emptyList(); }
-        PTable table = tableRef.getTable();
         Scan scan = context.getScan();
         byte[] defaultCF = SchemaUtil.getEmptyColumnFamily(table);
         List<byte[]> gps = Lists.newArrayList();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitterFactory.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitterFactory.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitterFactory.java
index 82f56f1..0448e46 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitterFactory.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIteratorRegionSplitterFactory.java
@@ -21,7 +21,7 @@ import java.sql.SQLException;
 
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.parse.HintNode;
-import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.schema.PTable;
 
 
 /**
@@ -29,7 +29,7 @@ import org.apache.phoenix.schema.TableRef;
  */
 public class ParallelIteratorRegionSplitterFactory {
 
-    public static ParallelIteratorRegionSplitter getSplitter(StatementContext context, TableRef table, HintNode hintNode) throws SQLException {
+    public static ParallelIteratorRegionSplitter getSplitter(StatementContext context, PTable table, HintNode hintNode) throws SQLException {
         if (context.getScanRanges().useSkipScanFilter()) {
             return SkipRangeParallelIteratorRegionSplitter.getInstance(context, table, hintNode);
         }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
index 59814cc..edab575 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ParallelIterators.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.RowProjector;
 import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.filter.ColumnProjectionFilter;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.job.JobManager.JobCallable;
@@ -53,9 +54,11 @@ import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.PColumnFamily;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ViewType;
+import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.StaleRegionBoundaryCacheException;
 import org.apache.phoenix.schema.TableRef;
@@ -101,7 +104,25 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
             RowProjector projector, GroupBy groupBy, Integer limit, ParallelIteratorFactory iteratorFactory)
             throws SQLException {
         super(context, tableRef, groupBy);
-        this.splits = getSplits(context, tableRef, statement.getHint());
+        MetaDataClient client = new MetaDataClient(context.getConnection());
+        PTable physicalTable = tableRef.getTable();
+        String physicalName = tableRef.getTable().getPhysicalName().getString();
+        if ((physicalTable.getViewIndexId() == null) && (!physicalName.equals(physicalTable.getName().getString()))) { // tableRef is not for the physical table
+            String physicalSchemaName = SchemaUtil.getSchemaNameFromFullName(physicalName);
+            String physicalTableName = SchemaUtil.getTableNameFromFullName(physicalName);
+            // TODO: this will be an extra RPC to ensure we have the latest guideposts, but is almost always
+            // unnecessary. We should instead track when the last time an update cache was done for this
+            // for physical table and not do it again until some interval has passed (it's ok to use stale stats).
+            MetaDataMutationResult result = client.updateCache(null, /* use global tenant id to get physical table */
+                    physicalSchemaName, physicalTableName);
+            physicalTable = result.getTable();
+            if(physicalTable == null) {
+                client = new MetaDataClient(context.getConnection());
+                physicalTable = client.getConnection().getMetaDataCache()
+                        .getTable(new PTableKey(null, physicalTableName));
+            }
+        }
+        this.splits = getSplits(context, physicalTable, statement.getHint());
         this.iteratorFactory = iteratorFactory;
         Scan scan = context.getScan();
         PTable table = tableRef.getTable();
@@ -227,7 +248,7 @@ public class ParallelIterators extends ExplainTable implements ResultIterators {
      * @return the key ranges that should be scanned in parallel
      */
     // exposed for tests
-    public static List<KeyRange> getSplits(StatementContext context, TableRef table, HintNode hintNode) throws SQLException {
+    public static List<KeyRange> getSplits(StatementContext context, PTable table, HintNode hintNode) throws SQLException {
         return ParallelIteratorRegionSplitterFactory.getSplitter(context, table, hintNode).getSplits();
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/main/java/org/apache/phoenix/iterate/SkipRangeParallelIteratorRegionSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SkipRangeParallelIteratorRegionSplitter.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SkipRangeParallelIteratorRegionSplitter.java
index 3c3f933..81f5af6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/SkipRangeParallelIteratorRegionSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/SkipRangeParallelIteratorRegionSplitter.java
@@ -25,8 +25,8 @@ import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.SaltingUtil;
-import org.apache.phoenix.schema.TableRef;
 
 import com.google.common.base.Predicate;
 import com.google.common.collect.Iterables;
@@ -38,17 +38,17 @@ import com.google.common.collect.Lists;
  */
 public class SkipRangeParallelIteratorRegionSplitter extends DefaultParallelIteratorRegionSplitter {
 
-    public static SkipRangeParallelIteratorRegionSplitter getInstance(StatementContext context, TableRef table, HintNode hintNode) {
+    public static SkipRangeParallelIteratorRegionSplitter getInstance(StatementContext context, PTable table, HintNode hintNode) {
         return new SkipRangeParallelIteratorRegionSplitter(context, table, hintNode);
     }
 
-    protected SkipRangeParallelIteratorRegionSplitter(StatementContext context, TableRef table, HintNode hintNode) {
+    protected SkipRangeParallelIteratorRegionSplitter(StatementContext context, PTable table, HintNode hintNode) {
         super(context, table, hintNode);
     }
 
     @Override
     protected List<HRegionLocation> getAllRegions() throws SQLException {
-        List<HRegionLocation> allTableRegions = context.getConnection().getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
+        List<HRegionLocation> allTableRegions = context.getConnection().getQueryServices().getAllTableRegions(table.getPhysicalName().getBytes());
         return filterRegions(allTableRegions, context.getScanRanges());
     }
 
@@ -66,7 +66,7 @@ public class SkipRangeParallelIteratorRegionSplitter extends DefaultParallelIter
                         KeyRange minMaxRange = context.getMinMaxRange();
                         if (minMaxRange != null) {
                             KeyRange range = KeyRange.getKeyRange(region.getRegionInfo().getStartKey(), region.getRegionInfo().getEndKey());
-                            if (tableRef.getTable().getBucketNum() != null) {
+                            if (table.getBucketNum() != null) {
                                 // Add salt byte, as minMaxRange won't have it
                                 minMaxRange = SaltingUtil.addSaltByte(region.getRegionInfo().getStartKey(), minMaxRange);
                             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/ff47a959/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index ccfbdd9..76f1b2a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -273,7 +273,16 @@ public class MetaDataClient {
         MetaDataMutationResult result = updateCache(schemaName, tableName, true);
         return result.getMutationTime();
     }
-    
+
+    private MetaDataMutationResult updateCache(String schemaName, String tableName, boolean alwaysHitServer)
+            throws SQLException {
+        return updateCache(connection.getTenantId(), schemaName, tableName, alwaysHitServer);
+    }
+
+    public MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName) throws SQLException {
+        return updateCache(tenantId, schemaName, tableName, false);
+    }
+        
     /**
      * Update the cache with the latest as of the connection scn.
      * @param schemaName
@@ -285,11 +294,11 @@ public class MetaDataClient {
         return updateCache(schemaName, tableName, false);
     }
     
-    private MetaDataMutationResult updateCache(String schemaName, String tableName, boolean alwaysHitServer) throws SQLException {
+    private MetaDataMutationResult updateCache(PName tenantId, String schemaName, String tableName, boolean alwaysHitServer) throws SQLException {
         Long scn = connection.getSCN();
         boolean systemTable = SYSTEM_CATALOG_SCHEMA.equals(schemaName);
         // System tables must always have a null tenantId
-        PName tenantId = systemTable ? null : connection.getTenantId();
+        tenantId = systemTable ? null : tenantId;
         long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
         PTable table = null;
         String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
@@ -474,8 +483,9 @@ public class MetaDataClient {
         Long scn = connection.getSCN();
         // Always invalidate the cache
         long clientTS = connection.getSCN() == null ? HConstants.LATEST_TIMESTAMP : scn;
-        connection.getQueryServices().clearCacheForTable(tenantIdBytes, table.getSchemaName().getBytes(),
-                table.getTableName().getBytes(), clientTS);
+        connection.getQueryServices().clearCacheForTable(tenantIdBytes,
+                Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
+                Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS);
         String query = "SELECT CURRENT_DATE(),"+ LAST_STATS_UPDATE_TIME + " FROM " + SYSTEM_CATALOG_SCHEMA
                 + "." + SYSTEM_STATS_TABLE + " WHERE " + PHYSICAL_NAME + "='" + physicalName.getString() + "' AND " + COLUMN_FAMILY
                 + " IS NULL AND " + REGION_NAME + " IS NULL";
@@ -502,8 +512,9 @@ public class MetaDataClient {
             // A single Cell will be returned with the count(*) - we decode that here
             long rowCount = PDataType.LONG.getCodec().decodeLong(tempPtr, SortOrder.getDefault());
             // We need to update the stats table
-            connection.getQueryServices().clearCacheForTable(tenantIdBytes, table.getSchemaName().getBytes(),
-                    table.getTableName().getBytes(), clientTS);
+            connection.getQueryServices().clearCacheForTable(tenantIdBytes,
+                    Bytes.toBytes(SchemaUtil.getSchemaNameFromFullName(physicalName.getString())),
+                    Bytes.toBytes(SchemaUtil.getTableNameFromFullName(physicalName.getString())), clientTS);
             return new  MutationState(0, connection, rowCount);
         } else {
             return new MutationState(0, connection);