You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/09/20 01:43:32 UTC

[5/5] git commit: PHOENIX-180 Use stats to guide query parallelization (Ramkrishna S Vasudevan)

PHOENIX-180 Use stats to guide query parallelization (Ramkrishna S Vasudevan)

Conflicts:
	phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/5cdc938e
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/5cdc938e
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/5cdc938e

Branch: refs/heads/4.0
Commit: 5cdc938e8f6ffc7db629f39951270e89dd4873b1
Parents: a18862d
Author: James Taylor <jt...@salesforce.com>
Authored: Fri Sep 19 16:38:28 2014 -0700
Committer: James Taylor <jt...@salesforce.com>
Committed: Fri Sep 19 16:48:14 2014 -0700

----------------------------------------------------------------------
 .../org/apache/phoenix/end2end/ArrayIT.java     |   12 +-
 .../end2end/BaseClientManagedTimeIT.java        |    2 +-
 .../BaseParallelIteratorsRegionSplitterIT.java  |    9 +-
 .../org/apache/phoenix/end2end/BaseQueryIT.java |    8 +-
 .../org/apache/phoenix/end2end/BaseViewIT.java  |    2 -
 ...efaultParallelIteratorsRegionSplitterIT.java |   55 +-
 .../phoenix/end2end/GuidePostsLifeCycleIT.java  |  148 ++
 .../org/apache/phoenix/end2end/HashJoinIT.java  |    2 -
 .../org/apache/phoenix/end2end/KeyOnlyIT.java   |   61 +-
 .../phoenix/end2end/MultiCfQueryExecIT.java     |   78 +-
 .../end2end/QueryDatabaseMetaDataIT.java        |    4 +
 .../org/apache/phoenix/end2end/QueryIT.java     |   39 +-
 .../apache/phoenix/end2end/ReverseScanIT.java   |    2 -
 ...ipRangeParallelIteratorRegionSplitterIT.java |   32 +-
 .../phoenix/end2end/StatsCollectorIT.java       |  231 ++++
 .../apache/phoenix/end2end/StatsManagerIT.java  |    2 +-
 .../end2end/TenantSpecificTablesDDLIT.java      |    4 +
 .../end2end/TenantSpecificTablesDMLIT.java      |   23 +-
 .../end2end/TenantSpecificViewIndexIT.java      |    2 +-
 .../phoenix/end2end/index/MutableIndexIT.java   |    1 -
 .../phoenix/end2end/index/SaltedIndexIT.java    |   17 +-
 .../salted/SaltedTableUpsertSelectIT.java       |   21 +-
 phoenix-core/src/main/antlr3/PhoenixSQL.g       |   10 +-
 .../org/apache/phoenix/cache/GlobalCache.java   |    7 +-
 .../coprocessor/MetaDataEndpointImpl.java       |  132 +-
 .../coprocessor/generated/MetaDataProtos.java   | 1223 ++++++++++++++++-
 .../generated/StatCollectorProtos.java          | 1269 ++++++++++++++++++
 .../DefaultParallelIteratorRegionSplitter.java  |  198 +--
 ...ocalIndexParallelIteratorRegionSplitter.java |    6 +-
 .../iterate/ParallelIteratorRegionSplitter.java |    2 -
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   16 +
 .../apache/phoenix/jdbc/PhoenixStatement.java   |   49 +
 .../apache/phoenix/parse/ParseNodeFactory.java  |    4 +
 .../parse/UpdateStatisticsStatement.java        |   26 +
 .../phoenix/query/ConnectionQueryServices.java  |    6 +-
 .../query/ConnectionQueryServicesImpl.java      |  164 ++-
 .../query/ConnectionlessQueryServicesImpl.java  |   43 +-
 .../query/DelegateConnectionQueryServices.java  |   16 +-
 .../apache/phoenix/query/QueryConstants.java    |   28 +-
 .../org/apache/phoenix/query/QueryServices.java |    8 +-
 .../phoenix/query/QueryServicesOptions.java     |   34 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  225 +++-
 .../apache/phoenix/schema/PColumnFamily.java    |    3 +
 .../phoenix/schema/PColumnFamilyImpl.java       |   25 +-
 .../java/org/apache/phoenix/schema/PTable.java  |    7 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  118 +-
 .../apache/phoenix/schema/stat/PTableStats.java |   27 +-
 .../phoenix/schema/stat/PTableStatsImpl.java    |   37 +-
 .../schema/stat/StatisticsCollector.java        |  447 ++++++
 .../phoenix/schema/stat/StatisticsScanner.java  |  117 ++
 .../phoenix/schema/stat/StatisticsTable.java    |  168 +++
 .../phoenix/schema/stat/StatisticsTracker.java  |   62 +
 .../phoenix/schema/stat/StatisticsUtils.java    |   80 ++
 .../java/org/apache/phoenix/query/BaseTest.java |   17 +-
 .../phoenix/query/QueryServicesTestImpl.java    |    6 +-
 phoenix-protocol/src/main/MetaDataService.proto |   15 +-
 phoenix-protocol/src/main/PTable.proto          |    2 +-
 .../src/main/StatisticsCollect.proto            |   20 +
 pom.xml                                         |    2 +-
 59 files changed, 4829 insertions(+), 545 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
index df0a76f..7863f5b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArrayIT.java
@@ -143,10 +143,11 @@ public class ArrayIT extends BaseClientManagedTimeIT {
 		String query = "SELECT a_double_array, /* comment ok? */ b_string, a_float FROM table_with_array WHERE ?=organization_id and ?=a_float";
 		Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
 		props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
-				Long.toString(ts + 2)); // Execute at timestamp 2
+		        Long.toString(ts + 2)); // Execute at timestamp 2
 		Connection conn = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn, TABLE_WITH_ARRAY);
 		try {
-			PreparedStatement statement = conn.prepareStatement(query);
+		    PreparedStatement statement = conn.prepareStatement(query);
 			statement.setString(1, tenantId);
 			statement.setFloat(2, 0.01f);
 			ResultSet rs = statement.executeQuery();
@@ -169,6 +170,12 @@ public class ArrayIT extends BaseClientManagedTimeIT {
 		}
 	}
 
+    private void analyzeTable(Connection conn, String tableWithArray) throws SQLException {
+        String analyse = "ANALYZE  "+tableWithArray;
+		PreparedStatement statement = conn.prepareStatement(analyse);
+        statement.execute();
+    }
+
 	@Test
 	public void testScanWithArrayInWhereClause() throws Exception {
 		long ts = nextTimestamp();
@@ -181,6 +188,7 @@ public class ArrayIT extends BaseClientManagedTimeIT {
 		props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB,
 				Long.toString(ts + 2)); // Execute at timestamp 2
 		Connection conn = DriverManager.getConnection(getUrl(), props);
+		analyzeTable(conn, TABLE_WITH_ARRAY);
 		try {
 			PreparedStatement statement = conn.prepareStatement(query);
 			statement.setString(1, tenantId);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
index 79cb64b..8b47e11 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseClientManagedTimeIT.java
@@ -52,7 +52,7 @@ import org.junit.experimental.categories.Category;
 public abstract class BaseClientManagedTimeIT extends BaseTest {
     private static String url;
     protected static PhoenixTestDriver driver;
-    private static final Configuration config = HBaseConfiguration.create(); 
+    protected static final Configuration config = HBaseConfiguration.create(); 
     private static boolean clusterInitialized = false;
     
     protected final static String getUrl() {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseParallelIteratorsRegionSplitterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseParallelIteratorsRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseParallelIteratorsRegionSplitterIT.java
index d2d3c7e..cfaa8ee 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseParallelIteratorsRegionSplitterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseParallelIteratorsRegionSplitterIT.java
@@ -28,7 +28,6 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
@@ -58,12 +57,7 @@ public class BaseParallelIteratorsRegionSplitterIT extends BaseClientManagedTime
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        int targetQueryConcurrency = 3;
-        int maxQueryConcurrency = 5;
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        props.put(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB, Integer.toString(maxQueryConcurrency));
-        props.put(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB, Integer.toString(targetQueryConcurrency));
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(Integer.MAX_VALUE));
         // Must update config before starting server
         setUpTestDriver(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
     }
@@ -88,7 +82,8 @@ public class BaseParallelIteratorsRegionSplitterIT extends BaseClientManagedTime
 
     protected static TableRef getTableRef(Connection conn, long ts) throws SQLException {
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        TableRef table = new TableRef(null,pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), STABLE_NAME)),ts, false);
+        TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(
+                new PTableKey(pconn.getTenantId(), STABLE_NAME)), ts, false);
         return table;
     }
     

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
index 4263dd2..9a66499 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseQueryIT.java
@@ -70,16 +70,12 @@ public abstract class BaseQueryIT extends BaseClientManagedTimeIT {
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        int targetQueryConcurrency = 2;
-        int maxQueryConcurrency = 3;
         Map<String,String> props = Maps.newHashMapWithExpectedSize(5);
-        props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(100));
-        props.put(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB, Integer.toString(maxQueryConcurrency));
-        props.put(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB, Integer.toString(targetQueryConcurrency));
+        props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(5000));
         props.put(IndexWriterUtils.HTABLE_THREAD_KEY, Integer.toString(100));
         // Make a small batch size to test multiple calls to reserve sequences
         props.put(QueryServices.SEQUENCE_CACHE_SIZE_ATTRIB, Long.toString(BATCH_SIZE));
-        
+        props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_CONF_KEY, Integer.toString(20));
         // Must update config before starting server
         setUpTestDriver(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index 5d8df0f..1cc9207 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -27,7 +27,6 @@ import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.util.Map;
 
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
@@ -43,7 +42,6 @@ public class BaseViewIT extends BaseHBaseManagedTimeIT {
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
         // Don't split intra region so we can more easily know that the n-way parallelization is for the explain plan
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(1));
         // Must update config before starting server
         setUpTestDriver(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
index cd8f1fb..dd1dc8b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/DefaultParallelIteratorsRegionSplitterIT.java
@@ -22,6 +22,7 @@ import static org.junit.Assert.assertEquals;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
+import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.Collections;
 import java.util.Comparator;
@@ -38,7 +39,6 @@ import org.apache.phoenix.iterate.DefaultParallelIteratorRegionSplitter;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.HintNode;
-import org.apache.phoenix.query.ConnectionQueryServices;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.TableRef;
@@ -85,64 +85,61 @@ public class DefaultParallelIteratorsRegionSplitterIT extends BaseParallelIterat
     public void testGetSplits() throws Exception {
         long ts = nextTimestamp();
         initTableValues(ts);
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts + 2;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
-
+        PreparedStatement stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
         Scan scan = new Scan();
         
         // number of regions > target query concurrency
         scan.setStartRow(K1);
         scan.setStopRow(K12);
         List<KeyRange> keyRanges = getSplits(conn, ts, scan);
-        assertEquals("Unexpected number of splits: " + keyRanges, 5, keyRanges.size());
-        assertEquals(newKeyRange(KeyRange.UNBOUND, K3), keyRanges.get(0));
-        assertEquals(newKeyRange(K3, K4), keyRanges.get(1));
-        assertEquals(newKeyRange(K4, K9), keyRanges.get(2));
-        assertEquals(newKeyRange(K9, K11), keyRanges.get(3));
-        assertEquals(newKeyRange(K11, KeyRange.UNBOUND), keyRanges.get(4));
+        assertEquals("Unexpected number of splits: " + keyRanges, 7, keyRanges.size());
+        assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0));
+        assertEquals(newKeyRange(KMIN, K3), keyRanges.get(1));
+        assertEquals(newKeyRange(K3, K4), keyRanges.get(2));
+        assertEquals(newKeyRange(K4, K9), keyRanges.get(3));
+        assertEquals(newKeyRange(K9, K11), keyRanges.get(4));
+        assertEquals(newKeyRange(K11, KMAX), keyRanges.get(5));
+        assertEquals(newKeyRange(KMAX,  KeyRange.UNBOUND), keyRanges.get(6));
         
-        // (number of regions / 2) > target query concurrency
         scan.setStartRow(K3);
         scan.setStopRow(K6);
         keyRanges = getSplits(conn, ts, scan);
-        assertEquals("Unexpected number of splits: " + keyRanges, 3, keyRanges.size());
+        assertEquals("Unexpected number of splits: " + keyRanges, 2, keyRanges.size());
         // note that we get a single split from R2 due to small key space
         assertEquals(newKeyRange(K3, K4), keyRanges.get(0));
-        assertEquals(newKeyRange(K4, K6), keyRanges.get(1));
-        assertEquals(newKeyRange(K6, K9), keyRanges.get(2));
+        assertEquals(newKeyRange(K4, K9), keyRanges.get(1));
         
-        // (number of regions / 2) <= target query concurrency
         scan.setStartRow(K5);
         scan.setStopRow(K6);
         keyRanges = getSplits(conn, ts, scan);
-        assertEquals("Unexpected number of splits: " + keyRanges, 3, keyRanges.size());
-        assertEquals(newKeyRange(K4, K5), keyRanges.get(0));
-        assertEquals(newKeyRange(K5, K6), keyRanges.get(1));
-        assertEquals(newKeyRange(K6, K9), keyRanges.get(2));
+        assertEquals("Unexpected number of splits: " + keyRanges, 1, keyRanges.size());
+        assertEquals(newKeyRange(K4, K9), keyRanges.get(0));
         conn.close();
     }
 
     @Test
-    public void testGetLowerUnboundSplits() throws Exception {
+    public void testGetLowerUnboundSplits() throws Throwable {
         long ts = nextTimestamp();
         initTableValues(ts);
-        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
-
+        PreparedStatement stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
+        // The query would use all the split points here
+        conn.createStatement().executeQuery("SELECT * FROM STABLE");
+        conn.close();
         Scan scan = new Scan();
-        
-        ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
-        TableRef table = getTableRef(conn,ts);
-        services.getStatsManager().updateStats(table);
         scan.setStartRow(HConstants.EMPTY_START_ROW);
         scan.setStopRow(K1);
         List<KeyRange> keyRanges = getSplits(conn, ts, scan);
-        assertEquals("Unexpected number of splits: " + keyRanges, 3, keyRanges.size());
-        assertEquals(newKeyRange(KeyRange.UNBOUND, new byte[] {'7'}), keyRanges.get(0));
-        assertEquals(newKeyRange(new byte[] {'7'}, new byte[] {'M'}), keyRanges.get(1));
-        assertEquals(newKeyRange(new byte[] {'M'}, K3), keyRanges.get(2));
+        assertEquals("Unexpected number of splits: " + keyRanges, 2, keyRanges.size());
+        assertEquals(newKeyRange(KeyRange.UNBOUND, KMIN), keyRanges.get(0));
+        assertEquals(newKeyRange(KMIN, K3), keyRanges.get(1));
     }
 
     private static KeyRange newKeyRange(byte[] lowerRange, byte[] upperRange) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
new file mode 100644
index 0000000..7645040
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/GuidePostsLifeCycleIT.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.STABLE_NAME;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.SequenceManager;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.DefaultParallelIteratorRegionSplitter;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableRef;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+@Category(HBaseManagedTimeTest.class)
+public class GuidePostsLifeCycleIT extends BaseHBaseManagedTimeIT {
+
+    protected static final byte[] KMIN  = new byte[] {'!'};
+    protected static final byte[] KMIN2  = new byte[] {'.'};
+    protected static final byte[] K1  = new byte[] {'a'};
+    protected static final byte[] K3  = new byte[] {'c'};
+    protected static final byte[] K4  = new byte[] {'d'};
+    protected static final byte[] K5  = new byte[] {'e'};
+    protected static final byte[] K6  = new byte[] {'f'};
+    protected static final byte[] K9  = new byte[] {'i'};
+    protected static final byte[] K11 = new byte[] {'k'};
+    protected static final byte[] K12 = new byte[] {'l'};
+    protected static final byte[] KMAX  = new byte[] {'~'};
+    protected static final byte[] KMAX2  = new byte[] {'z'};
+    protected static final byte[] KR = new byte[] { 'r' };
+    protected static final byte[] KP = new byte[] { 'p' };
+
+    private static List<KeyRange> getSplits(Connection conn, final Scan scan) throws SQLException {
+        TableRef tableRef = getTableRef(conn);
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        final List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(
+                tableRef.getTable().getPhysicalName().getBytes());
+        PhoenixStatement statement = new PhoenixStatement(pconn);
+        StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+                HintNode.EMPTY_HINT_NODE) {
+            @Override
+            protected List<HRegionLocation> getAllRegions() throws SQLException {
+                return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(),
+                        scan.getStopRow());
+            }
+        };
+        List<KeyRange> keyRanges = splitter.getSplits();
+        Collections.sort(keyRanges, new Comparator<KeyRange>() {
+            @Override
+            public int compare(KeyRange o1, KeyRange o2) {
+                return Bytes.compareTo(o1.getLowerRange(), o2.getLowerRange());
+            }
+        });
+        return keyRanges;
+    }
+
+    // This test ensures that as we keep adding new records the splits gets updated
+    @Test
+    public void testGuidePostsLifeCycle() throws Exception {
+        byte[][] splits = new byte[][] { K3, K9, KR };
+        ensureTableCreated(getUrl(), STABLE_NAME, splits);
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(url, props);
+        PreparedStatement stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
+        Scan scan = new Scan();
+        List<KeyRange> keyRanges = getSplits(conn, scan);
+        assertEquals(4, keyRanges.size());
+        upsert(new byte[][] { KMIN, K4, K11 });
+        stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
+        keyRanges = getSplits(conn, scan);
+        assertEquals(7, keyRanges.size());
+        upsert(new byte[][] { KMIN2, K5, K12 });
+        stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
+        keyRanges = getSplits(conn, scan);
+        assertEquals(10, keyRanges.size());
+        upsert(new byte[][] { K1, K6, KP });
+        stmt = conn.prepareStatement("ANALYZE STABLE");
+        stmt.execute();
+        keyRanges = getSplits(conn, scan);
+        assertEquals(13, keyRanges.size());
+        conn.close();
+    }
+
+    protected void upsert( byte[][] val) throws Exception {
+        String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(url, props);
+        PreparedStatement stmt = conn.prepareStatement("upsert into " + STABLE_NAME + " VALUES (?, ?)");
+        stmt.setString(1, new String(val[0]));
+        stmt.setInt(2, 1);
+        stmt.execute();
+        stmt.setString(1, new String(val[1]));
+        stmt.setInt(2, 2);
+        stmt.execute();
+        stmt.setString(1, new String(val[2]));
+        stmt.setInt(2, 3);
+        stmt.execute();
+        conn.commit();
+        conn.close();
+    }
+    
+    protected static TableRef getTableRef(Connection conn) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(
+                new PTableKey(pconn.getTenantId(), STABLE_NAME)), System.currentTimeMillis(), false);
+        return table;
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
index 05f2837..1d2ca4c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/HashJoinIT.java
@@ -82,8 +82,6 @@ public class HashJoinIT extends BaseHBaseManagedTimeIT {
     @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Don't split intra region so we can more easily know that the n-way parallelization is for the explain plan
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(1));
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         // Must update config before starting server

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
index 813f4b8..4b0d07f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/KeyOnlyIT.java
@@ -23,12 +23,29 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.SequenceManager;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.DefaultParallelIteratorRegionSplitter;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -45,6 +62,7 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
         
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+5));
         Connection conn5 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn5, KEYONLY_NAME);
         String query = "SELECT i1, i2 FROM KEYONLY";
         PreparedStatement statement = conn5.prepareStatement(query);
         ResultSet rs = statement.executeQuery();
@@ -55,6 +73,9 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
         assertEquals(3, rs.getInt(1));
         assertEquals(4, rs.getInt(2));
         assertFalse(rs.next());
+        Scan scan = new Scan();
+        List<KeyRange> splits = getSplits(conn5, ts, scan);
+        assertEquals(3, splits.size());
         conn5.close();
         
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+6));
@@ -76,6 +97,7 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
         
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+8));
         Connection conn8 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn8, KEYONLY_NAME);
         query = "SELECT i1 FROM KEYONLY";
         statement = conn8.prepareStatement(query);
         rs = statement.executeQuery();
@@ -113,6 +135,7 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
         
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts+5));
         Connection conn5 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn5, KEYONLY_NAME);
         String query = "SELECT i1 FROM KEYONLY WHERE i1 < 2 or i1 = 3";
         PreparedStatement statement = conn5.prepareStatement(query);
         ResultSet rs = statement.executeQuery();
@@ -142,5 +165,41 @@ public class KeyOnlyIT extends BaseClientManagedTimeIT {
         conn.commit();
         conn.close();
     }
-        
+
+    private void analyzeTable(Connection conn, String tableName) throws IOException, SQLException {
+        String query = "ANALYZE " + tableName;
+        conn.createStatement().execute(query);
+    }
+    
+    private static TableRef getTableRef(Connection conn, long ts) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(
+                new PTableKey(pconn.getTenantId(), KEYONLY_NAME)), ts, false);
+        return table;
+    }
+
+    private static List<KeyRange> getSplits(Connection conn, long ts, final Scan scan) throws SQLException {
+        TableRef tableRef = getTableRef(conn, ts);
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        final List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(
+                tableRef.getTable().getPhysicalName().getBytes());
+        PhoenixStatement statement = new PhoenixStatement(pconn);
+        StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+                HintNode.EMPTY_HINT_NODE) {
+            @Override
+            protected List<HRegionLocation> getAllRegions() throws SQLException {
+                return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(),
+                        scan.getStopRow());
+            }
+        };
+        List<KeyRange> keyRanges = splitter.getSplits();
+        Collections.sort(keyRanges, new Comparator<KeyRange>() {
+            @Override
+            public int compare(KeyRange o1, KeyRange o2) {
+                return Bytes.compareTo(o1.getLowerRange(), o2.getLowerRange());
+            }
+        });
+        return keyRanges;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
index b5e0ef4..ebf03d0 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/MultiCfQueryExecIT.java
@@ -22,13 +22,30 @@ import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
+import java.io.IOException;
 import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.HRegionLocation;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.SequenceManager;
+import org.apache.phoenix.compile.StatementContext;
+import org.apache.phoenix.iterate.DefaultParallelIteratorRegionSplitter;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.parse.HintNode;
+import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -74,7 +91,12 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         stmt.setLong(7, 22222);
         stmt.execute();
     }
-    
+
+    private void analyzeTable(Connection conn, String tableName) throws IOException, SQLException {
+        String query = "ANALYZE " + tableName;
+        conn.createStatement().execute(query);
+    }
+
     @Test
     public void testConstantCount() throws Exception {
         long ts = nextTimestamp();
@@ -84,6 +106,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -103,6 +126,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -123,6 +147,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -143,12 +168,24 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
             assertEquals(2222, rs.getLong(1));
             assertEquals(22222, rs.getLong(2));
             assertFalse(rs.next());
+            Scan scan = new Scan();
+            // See if F has splits in it
+            scan.addFamily(Bytes.toBytes("E"));
+            List<KeyRange> splits = getSplits(conn, ts, scan);
+            assertEquals(3, splits.size());
+            scan = new Scan();
+            // See if G has splits in it
+            scan.addFamily(Bytes.toBytes("G"));
+            splits = getSplits(conn, ts, scan);
+            // We get splits from different CF
+            assertEquals(3, splits.size());
         } finally {
             conn.close();
         }
@@ -163,6 +200,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -178,11 +216,11 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
     public void testDefaultCFToDisambiguate() throws Exception {
         long ts = nextTimestamp();
         initTableValues(ts);
-        
         String ddl = "ALTER TABLE multi_cf ADD response_time BIGINT";
         String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 3);
         Connection conn = DriverManager.getConnection(url);
         conn.createStatement().execute(ddl);
+        analyzeTable(conn, "MULTI_CF");
         conn.close();
        
         String dml = "upsert into " +
@@ -195,7 +233,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         conn.createStatement().execute(dml);
         conn.commit();
         conn.close();
-
+        analyzeTable(conn, "MULTI_CF");
         String query = "SELECT ID,RESPONSE_TIME from multi_cf where RESPONSE_TIME = 333";
         url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 5); // Run query at timestamp 5
         conn = DriverManager.getConnection(url);
@@ -220,6 +258,7 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
         Connection conn = DriverManager.getConnection(url, props);
         try {
             initTableValues(ts);
+            analyzeTable(conn, "MULTI_CF");
             PreparedStatement statement = conn.prepareStatement(query);
             ResultSet rs = statement.executeQuery();
             assertTrue(rs.next());
@@ -230,5 +269,36 @@ public class MultiCfQueryExecIT extends BaseClientManagedTimeIT {
             conn.close();
         }
     }
-    
+
+    private static TableRef getTableRef(Connection conn, long ts) throws SQLException {
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        TableRef table = new TableRef(null, pconn.getMetaDataCache().getTable(
+                new PTableKey(pconn.getTenantId(), "MULTI_CF")), ts, false);
+        return table;
+    }
+
+    private static List<KeyRange> getSplits(Connection conn, long ts, final Scan scan) throws SQLException {
+        TableRef tableRef = getTableRef(conn, ts);
+        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+        final List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(
+                tableRef.getTable().getPhysicalName().getBytes());
+        PhoenixStatement statement = new PhoenixStatement(pconn);
+        StatementContext context = new StatementContext(statement, null, scan, new SequenceManager(statement));
+        DefaultParallelIteratorRegionSplitter splitter = new DefaultParallelIteratorRegionSplitter(context, tableRef,
+                HintNode.EMPTY_HINT_NODE) {
+            @Override
+            protected List<HRegionLocation> getAllRegions() throws SQLException {
+                return DefaultParallelIteratorRegionSplitter.filterRegions(regions, scan.getStartRow(),
+                        scan.getStopRow());
+            }
+        };
+        List<KeyRange> keyRanges = splitter.getSplits();
+        Collections.sort(keyRanges, new Comparator<KeyRange>() {
+            @Override
+            public int compare(KeyRange o1, KeyRange o2) {
+                return Bytes.compareTo(o1.getLowerRange(), o2.getLowerRange());
+            }
+        });
+        return keyRanges;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
index b9cd477..f243562 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryDatabaseMetaDataIT.java
@@ -113,6 +113,10 @@ public class QueryDatabaseMetaDataIT extends BaseClientManagedTimeIT {
         assertEquals(rs.getString("TABLE_NAME"),TYPE_SEQUENCE);
         assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE"));
         assertTrue(rs.next());
+        assertEquals(rs.getString("TABLE_SCHEM"),SYSTEM_CATALOG_SCHEMA);
+        assertEquals(rs.getString("TABLE_NAME"),PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE);
+        assertEquals(PTableType.SYSTEM.toString(), rs.getString("TABLE_TYPE"));
+        assertTrue(rs.next());
         assertEquals(rs.getString("TABLE_SCHEM"),null);
         assertEquals(rs.getString("TABLE_NAME"),ATABLE_NAME);
         assertEquals(PTableType.TABLE.toString(), rs.getString("TABLE_TYPE"));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
index c9ec25e..674009b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/QueryIT.java
@@ -38,17 +38,18 @@ import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.sql.Time;
 import java.sql.Timestamp;
 import java.util.Arrays;
 import java.util.Properties;
 
-import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -59,7 +60,6 @@ import org.apache.phoenix.schema.ConstraintViolationException;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.SequenceNotFoundException;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.junit.Test;
@@ -102,6 +102,11 @@ public class QueryIT extends BaseQueryIT {
         stmt.setInt(3, -10);
         stmt.execute();
         upsertConn.close();
+        url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + (ts + 6);
+        props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        upsertConn = DriverManager.getConnection(url, props);
+        analyzeTable(upsertConn, "ATABLE");
+        upsertConn.close();
 
         String query = "SELECT entity_id FROM aTable WHERE organization_id=? and a_integer >= ?";
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 2)); // Execute at timestamp 2
@@ -210,6 +215,9 @@ public class QueryIT extends BaseQueryIT {
         stmt.setString(2, ROW5);
         stmt.setString(3, value);
         stmt.execute(); // should commit too
+        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
         upsertConn.close();
         
         String query = "SELECT a_string, b_string FROM aTable WHERE organization_id=? and a_integer = 5";
@@ -254,6 +262,9 @@ public class QueryIT extends BaseQueryIT {
         stmt.setString(2, ROW4);
         stmt.setInt(3, 5);
         stmt.execute(); // should commit too
+        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
         upsertConn.close();
 
         // Override value again, but should be ignored since it's past the SCN
@@ -389,7 +400,9 @@ public class QueryIT extends BaseQueryIT {
         byte[] ts1 = PDataType.TIMESTAMP.toBytes(tsValue1);
         stmt.setTimestamp(3, tsValue1);
         stmt.execute();
-
+        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
         updateStmt = 
             "upsert into " +
             "ATABLE(" +
@@ -408,7 +421,10 @@ public class QueryIT extends BaseQueryIT {
         stmt.setTime(4, new Time(tsValue2.getTime()));
         stmt.execute();
         upsertConn.close();
-        
+        conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
+        analyzeTable(upsertConn, "ATABLE");
         assertTrue(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts2), new ImmutableBytesWritable(ts1)));
         assertFalse(compare(CompareOp.GREATER, new ImmutableBytesWritable(ts1), new ImmutableBytesWritable(ts1)));
 
@@ -734,7 +750,7 @@ public class QueryIT extends BaseQueryIT {
             assertTrue(rs.next());
             assertEquals(A_VALUE, rs.getString(1));
             assertEquals(E_VALUE, rs.getString(2));
-           assertEquals(1, rs.getLong(3));
+            assertEquals(1, rs.getLong(3));
             assertFalse(rs.next());
             
             byte[] tableName = Bytes.toBytes(ATABLE_NAME);
@@ -801,6 +817,10 @@ public class QueryIT extends BaseQueryIT {
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(true);
         conn.createStatement().execute("UPSERT INTO atable(organization_id,entity_id,a_integer) VALUES('" + getOrganizationId() + "','" + ROW3 + "',NULL)");
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
+        Connection conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
         conn = DriverManager.getConnection(getUrl(), props);
         try {
@@ -816,6 +836,10 @@ public class QueryIT extends BaseQueryIT {
         conn = DriverManager.getConnection(getUrl(), props);
         conn.setAutoCommit(true);
         conn.createStatement().execute("UPSERT INTO atable(organization_id,entity_id,a_integer) SELECT organization_id, entity_id, null FROM atable");
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
+        conn1 = DriverManager.getConnection(getUrl(), props);
+        analyzeTable(conn1, "ATABLE");
+        conn1.close();
         props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 9));
         conn = DriverManager.getConnection(getUrl(), props);
         try {
@@ -829,4 +853,9 @@ public class QueryIT extends BaseQueryIT {
             conn.close();
         }
     }
+
+    private void analyzeTable(Connection conn, String tableName) throws IOException, SQLException {
+        String query = "ANALYZE " + tableName;
+        conn.createStatement().execute(query);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
index e87d290..805ec3b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ReverseScanIT.java
@@ -38,7 +38,6 @@ import org.apache.hadoop.hbase.TableNotFoundException;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.TestUtil;
@@ -54,7 +53,6 @@ public class ReverseScanIT extends BaseClientManagedTimeIT {
     @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(1));
         setUpTestDriver(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
         // Ensures our split points will be used
         // TODO: do deletePriorTables before test?

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
index 93abfb9..d32441b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/SkipRangeParallelIteratorRegionSplitterIT.java
@@ -45,7 +45,6 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.HintNode;
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PDatum;
@@ -81,12 +80,8 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
     private static final String DDL = "CREATE TABLE " + TABLE_NAME + " (id char(3) NOT NULL PRIMARY KEY, \"value\" integer)";
     private static final byte[] Ka1A = Bytes.toBytes("a1A");
     private static final byte[] Ka1B = Bytes.toBytes("a1B");
-    private static final byte[] Ka1C = Bytes.toBytes("a1C");
-    private static final byte[] Ka1D = Bytes.toBytes("a1D");
     private static final byte[] Ka1E = Bytes.toBytes("a1E");
-    private static final byte[] Ka1F = Bytes.toBytes("a1F");
     private static final byte[] Ka1G = Bytes.toBytes("a1G");
-    private static final byte[] Ka1H = Bytes.toBytes("a1H");
     private static final byte[] Ka1I = Bytes.toBytes("a1I");
     private static final byte[] Ka2A = Bytes.toBytes("a2A");
 
@@ -109,11 +104,13 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
         PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        TableRef tableRef = new TableRef(null,pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME)),ts, false);
-        List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
         
-        conn.close();
         initTableValues();
+        PreparedStatement stmt = conn.prepareStatement("ANALYZE "+TABLE_NAME);
+        stmt.execute();
+        conn.close();
+        TableRef tableRef = new TableRef(null,pconn.getMetaDataCache().getTable(new PTableKey(pconn.getTenantId(), TABLE_NAME)),ts, false);
+        List<HRegionLocation> regions = pconn.getQueryServices().getAllTableRegions(tableRef.getTable().getPhysicalName().getBytes());
         List<KeyRange> ranges = getSplits(tableRef, scan, regions, scanRanges);
         assertEquals("Unexpected number of splits: " + ranges.size(), expectedSplits.size(), ranges.size());
         for (int i=0; i<expectedSplits.size(); i++) {
@@ -188,9 +185,7 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
                         }},
                     new int[] {1,1,1},
                     new KeyRange[] {
-                        getKeyRange(Ka1B, true, Ka1C, false),
-                        getKeyRange(Ka1C, true, Ka1D, false),
-                        getKeyRange(Ka1D, true, Ka1E, false),
+                        getKeyRange(Ka1B, true, Ka1E, false)
                 }));
         // Scan range spans third, split into 3 due to concurrency config.
         testCases.addAll(
@@ -204,9 +199,7 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
                         }},
                     new int[] {1,1,1},
                     new KeyRange[] {
-                        getKeyRange(Ka1B, true, Ka1C, false),
-                        getKeyRange(Ka1C, true, Ka1D, false),
-                        getKeyRange(Ka1D, true, Ka1E, false),
+                        getKeyRange(Ka1B, true, Ka1E, false),
                 }));
         // Scan range spans 2 ranges, split into 4 due to concurrency config.
         testCases.addAll(
@@ -220,10 +213,8 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
                         }},
                     new int[] {1,1,1},
                     new KeyRange[] {
-                        getKeyRange(Ka1E, true, Ka1F, false),
-                        getKeyRange(Ka1F, true, Ka1G, false),
-                        getKeyRange(Ka1G, true, Ka1H, false),
-                        getKeyRange(Ka1H, true, Ka1I, false),
+                        getKeyRange(Ka1E, true, Ka1G, false),
+                        getKeyRange(Ka1G, true, Ka1I, false),
                 }));
         // Scan range spans more than 3 range, no split.
         testCases.addAll(
@@ -326,12 +317,7 @@ public class SkipRangeParallelIteratorRegionSplitterIT extends BaseClientManaged
     @BeforeClass
     @Shadower(classBeingShadowed = BaseClientManagedTimeIT.class)
     public static void doSetup() throws Exception {
-        int targetQueryConcurrency = 3;
-        int maxQueryConcurrency = 5;
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        props.put(QueryServices.MAX_QUERY_CONCURRENCY_ATTRIB, Integer.toString(maxQueryConcurrency));
-        props.put(QueryServices.TARGET_QUERY_CONCURRENCY_ATTRIB, Integer.toString(targetQueryConcurrency));
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(Integer.MAX_VALUE));
         // Must update config before starting server
         setUpTestDriver(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
new file mode 100644
index 0000000..407c128
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -0,0 +1,231 @@
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.JDBC_PROTOCOL_TERMINATOR;
+import static org.apache.phoenix.util.PhoenixRuntime.PHOENIX_TEST_DRIVER_URL_PARAM;
+import static org.apache.phoenix.util.TestUtil.LOCALHOST;
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.sql.Array;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.HBaseTestingUtility;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import com.google.common.collect.Maps;
+
+@Category(HBaseManagedTimeTest.class)
+public class StatsCollectorIT extends BaseHBaseManagedTimeIT {
+    private static String url;
+    private static HBaseTestingUtility util;
+    private static int frequency = 4000;
+
+    @BeforeClass
+    public static void doSetup() throws Exception {
+        Configuration conf = HBaseConfiguration.create();
+        setUpConfigForMiniCluster(conf);
+        conf.setInt("hbase.client.retries.number", 2);
+        conf.setInt("hbase.client.pause", 5000);
+        conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
+        util = new HBaseTestingUtility(conf);
+        util.startMiniCluster();
+        String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
+        url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
+                + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
+        int histogramDepth = 60;
+        Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
+        props.put(QueryServices.HISTOGRAM_BYTE_DEPTH_CONF_KEY, Integer.toString(histogramDepth));
+        props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Integer.toString(frequency));
+        driver = initAndRegisterDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
+    }
+
+    @Test
+    public void testUpdateStatsForTheTable() throws Throwable {
+        Connection conn;
+        PreparedStatement stmt;
+        ResultSet rs;
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
+        conn = DriverManager.getConnection(url, props);
+        conn.createStatement().execute(
+                "CREATE TABLE t ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+                        + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC)) \n");
+        String[] s;
+        Array array;
+        conn = upsertValues(props, "t");
+        // CAll the update statistics query here. If already major compaction has run this will not get executed.
+        stmt = conn.prepareStatement("ANALYZE T");
+        stmt.execute();
+        stmt = upsertStmt(conn, "t");
+        stmt.setString(1, "z");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.close();
+        conn = DriverManager.getConnection(getUrl(), props);
+        // This analyze would not work
+        stmt = conn.prepareStatement("ANALYZE T");
+        stmt.execute();
+        rs = conn.createStatement().executeQuery("SELECT k FROM T");
+        assertTrue(rs.next());
+        conn.close();
+    }
+
+    @Test
+    public void testUpdateStatsWithMultipleTables() throws Throwable {
+        Connection conn;
+        PreparedStatement stmt;
+        ResultSet rs;
+        long ts = nextTimestamp();
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
+        conn = DriverManager.getConnection(url, props);
+        conn.createStatement().execute(
+                "CREATE TABLE x ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+                        + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC)) \n");
+        conn.createStatement().execute(
+                "CREATE TABLE z ( k VARCHAR, a_string_array VARCHAR(100) ARRAY[4], b_string_array VARCHAR(100) ARRAY[4] \n"
+                        + " CONSTRAINT pk PRIMARY KEY (k, b_string_array DESC)) \n");
+        String[] s;
+        Array array;
+        conn = upsertValues(props, "x");
+        conn = upsertValues(props, "z");
+        // CAll the update statistics query here
+        stmt = conn.prepareStatement("ANALYZE X");
+        stmt.execute();
+        stmt = conn.prepareStatement("ANALYZE Z");
+        stmt.execute();
+        stmt = upsertStmt(conn, "x");
+        stmt.setString(1, "z");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        stmt = upsertStmt(conn, "z");
+        stmt.setString(1, "z");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.close();
+        conn = DriverManager.getConnection(getUrl(), props);
+        // This analyze would not work
+        stmt = conn.prepareStatement("ANALYZE Z");
+        stmt.execute();
+        rs = conn.createStatement().executeQuery("SELECT k FROM Z");
+        assertTrue(rs.next());
+        conn.close();
+    }
+
+    private Connection upsertValues(Properties props, String tableName) throws SQLException, IOException,
+            InterruptedException {
+        Connection conn;
+        PreparedStatement stmt;
+        // props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 30));
+        conn = DriverManager.getConnection(url, props);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "a");
+        String[] s = new String[] { "abc", "def", "ghi", "jkll", null, null, "xxx" };
+        Array array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "abc", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "b");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "c");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "d");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "b");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        stmt = upsertStmt(conn, tableName);
+        stmt.setString(1, "e");
+        s = new String[] { "xyz", "def", "ghi", "jkll", null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(2, array);
+        s = new String[] { "zya", "def", "ghi", "jkll", null, null, null, "xxx" };
+        array = conn.createArrayOf("VARCHAR", s);
+        stmt.setArray(3, array);
+        stmt.execute();
+        conn.commit();
+        flush(tableName);
+        return conn;
+    }
+
+    private void flush(String tableName) throws IOException, InterruptedException {
+        util.getHBaseAdmin().flush(tableName.toUpperCase());
+    }
+
+    private PreparedStatement upsertStmt(Connection conn, String tableName) throws SQLException {
+        PreparedStatement stmt;
+        stmt = conn.prepareStatement("UPSERT INTO " + tableName + " VALUES(?,?,?)");
+        return stmt;
+    }
+
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsManagerIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsManagerIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsManagerIT.java
index f19f776..b13379b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsManagerIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsManagerIT.java
@@ -121,7 +121,7 @@ public class StatsManagerIT extends BaseParallelIteratorsRegionSplitterIT {
         String url = getUrl() + ";" + PhoenixRuntime.CURRENT_SCN_ATTRIB + "=" + ts;
         Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
         Connection conn = DriverManager.getConnection(url, props);
-        TableRef table = getTableRef(conn,ts);
+        TableRef table = getTableRef(conn, ts);
 
         int updateFreq = 5;
         int maxAge = 10;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
index 591efe1..655fe44 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDDLIT.java
@@ -474,6 +474,8 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
             assertTrue(rs.next());
             assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, TYPE_SEQUENCE, SYSTEM);
             assertTrue(rs.next());
+            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, SYSTEM);
+            assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME, TABLE);
             assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, TABLE);
@@ -538,6 +540,8 @@ public class TenantSpecificTablesDDLIT extends BaseTenantSpecificTablesIT {
             assertTrue(rs.next());
             assertTableMetaData(rs, PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.TYPE_SEQUENCE, PTableType.SYSTEM);
             assertTrue(rs.next());
+            assertTableMetaData(rs, SYSTEM_CATALOG_SCHEMA, PhoenixDatabaseMetaData.SYSTEM_STATS_TABLE, PTableType.SYSTEM);
+            assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME, PTableType.TABLE);
             assertTrue(rs.next());
             assertTableMetaData(rs, null, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID, PTableType.TABLE);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
index 107ca34..dba4264 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificTablesDMLIT.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
@@ -47,6 +48,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (2, 'Viva Las Vegas')");
             conn.commit();
             conn.close();
+            analyzeTable(conn, TENANT_TABLE_NAME);
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             ResultSet rs = conn.createStatement().executeQuery("select tenant_col from " + TENANT_TABLE_NAME + " where id = 1");
@@ -70,11 +72,11 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn1.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('you','" + TENANT_TYPE_ID +"',2,'Viva Las Vegas')");
             conn1.commit();
             
-            
+            analyzeTable(conn1, TENANT_TABLE_NAME);
             conn2.setAutoCommit(true);
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('them','" + TENANT_TYPE_ID + "',1,'Long Hair')");
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " values ('us','" + TENANT_TYPE_ID + "',2,'Black Hat')");
-
+            analyzeTable(conn2, TENANT_TABLE_NAME);
             conn2.close();            
             conn1.close();
             
@@ -96,6 +98,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select * from " + TENANT_TABLE_NAME );
             conn2.commit();
+            analyzeTable(conn2, TENANT_TABLE_NAME);
             conn2.close();
             
             conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
@@ -112,6 +115,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
             conn2.setAutoCommit(true);;
             conn2.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " select 'all', tenant_type_id, id, 'Big ' || tenant_col from " + TENANT_TABLE_NAME );
+            analyzeTable(conn2, TENANT_TABLE_NAME);
             conn2.close();
 
             conn2 = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL2);
@@ -159,6 +163,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (1, 'Cheap Sunglasses')");
             conn.createStatement().executeUpdate("upsert into " + TENANT_TABLE_NAME + " (id, tenant_col) values (2, 'Viva Las Vegas')");
             conn.commit();
+            analyzeTable(conn, TENANT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -185,6 +190,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -216,6 +222,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -253,6 +260,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('AC/DC', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 2, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -289,7 +297,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'abc', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 1, 'Billy Gibbons')");
-            
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -320,7 +328,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('AC/DC', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME_NO_TENANT_TYPE_ID + " (tenant_id, id, user) values ('" + TENANT_ID + "', 2, 'Billy Gibbons')");
-            
+            analyzeTable(conn, PARENT_TABLE_NAME_NO_TENANT_TYPE_ID);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -350,6 +358,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'aaa', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 2, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -384,6 +393,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('AC/DC', 'aaa', 1, 'Bon Scott')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', '" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_id, tenant_type_id, id, user) values ('" + TENANT_ID + "', 'def', 2, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
             
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -432,6 +442,7 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
             conn.setAutoCommit(true);
             conn.createStatement().executeUpdate("upsert into " + PARENT_TABLE_NAME + " (tenant_type_id, id, user) values ('" + TENANT_TYPE_ID + "', 1, 'Billy Gibbons')");
+            analyzeTable(conn, PARENT_TABLE_NAME);
             conn.close();
 
             conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);
@@ -460,6 +471,10 @@ public class TenantSpecificTablesDMLIT extends BaseTenantSpecificTablesIT {
         }
     }
     
+    private void analyzeTable(Connection conn, String tableName) throws IOException, SQLException {
+        String query = "ANALYZE " + tableName;
+        conn.createStatement().execute(query);
+    }
     @Test
     public void testUpsertValuesUsingViewWithNoWhereClause() throws Exception {
         Connection conn = nextConnection(PHOENIX_JDBC_TENANT_SPECIFIC_URL);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
index 5452d38..20104a4 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/TenantSpecificViewIndexIT.java
@@ -122,7 +122,7 @@ public class TenantSpecificViewIndexIT extends BaseTenantSpecificViewIndexIT {
         if(localIndex){
             assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _LOCAL_IDX_MT_BASE ['a',-32768,'f']\nCLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
         } else {
-            assertEquals("CLIENT PARALLEL 4-WAY RANGE SCAN OVER _IDX_MT_BASE ['a',-32768,'f']",QueryUtil.getExplainPlan(rs));
+            assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_MT_BASE ['a',-32768,'f']",QueryUtil.getExplainPlan(rs));
         }
         
         try {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
index 8aaebba..cdd44f1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/MutableIndexIT.java
@@ -58,7 +58,6 @@ public class MutableIndexIT extends BaseMutableIndexIT {
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
         // Don't split intra region so we can more easily know that the n-way parallelization is for the explain plan
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(1));
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));

http://git-wip-us.apache.org/repos/asf/phoenix/blob/5cdc938e/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
index b269cd1..8eae5f9 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/SaltedIndexIT.java
@@ -53,8 +53,6 @@ public class SaltedIndexIT extends BaseIndexIT {
     @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
     public static void doSetup() throws Exception {
         Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
-        // Don't split intra region so we can more easily know that the n-way parallelization is for the explain plan
-        props.put(QueryServices.MAX_INTRA_REGION_PARALLELIZATION_ATTRIB, Integer.toString(1));
         // Forces server cache to be used
         props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
         // Drop the HBase table metadata for this test
@@ -123,7 +121,9 @@ public class SaltedIndexIT extends BaseIndexIT {
         stmt.setString(2, "y");
         stmt.execute();
         conn.commit();
-        
+        stmt = conn.prepareStatement("ANALYZE "+DATA_TABLE_FULL_NAME);
+        stmt.execute();
+
         query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
         rs = conn.createStatement().executeQuery(query);
         assertTrue(rs.next());
@@ -177,10 +177,10 @@ public class SaltedIndexIT extends BaseIndexIT {
         assertFalse(rs.next());
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         expectedPlan = tableSaltBuckets == null ? 
-                "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" +
+                "CLIENT PARALLEL 3-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" +
                 "    SERVER SORTED BY [V]\n" + 
                 "CLIENT MERGE SORT" :
-                    "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" + 
+                    "CLIENT PARALLEL 2-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" + 
                     "    SERVER SORTED BY [V]\n" + 
                     "CLIENT MERGE SORT";
         assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
@@ -199,15 +199,16 @@ public class SaltedIndexIT extends BaseIndexIT {
         assertFalse(rs.next());
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         expectedPlan = tableSaltBuckets == null ? 
-             "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
+             "CLIENT PARALLEL 3-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
              "    SERVER FILTER BY V >= 'x'\n" + 
              "    SERVER 2 ROW LIMIT\n" + 
              "CLIENT 2 ROW LIMIT" :
-                 "CLIENT PARALLEL 3-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
+                 "CLIENT PARALLEL 4-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
                  "    SERVER FILTER BY V >= 'x'\n" + 
                  "    SERVER 2 ROW LIMIT\n" + 
                  "CLIENT MERGE SORT\n" + 
                  "CLIENT 2 ROW LIMIT";
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
+        String explainPlan = QueryUtil.getExplainPlan(rs);
+        assertEquals(expectedPlan,explainPlan);
     }
 }