You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by st...@apache.org on 2020/12/18 10:24:17 UTC

[phoenix] branch 4.x updated: PHOENIX-6269 : Extend ExplainPlan attributes based comparison to some tests

This is an automated email from the ASF dual-hosted git repository.

stoty pushed a commit to branch 4.x
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x by this push:
     new 0d16fcf  PHOENIX-6269 : Extend ExplainPlan attributes based comparison to some tests
0d16fcf is described below

commit 0d16fcf1c62ee414374781e15368a6a9f2499e21
Author: Viraj Jasani <vj...@apache.org>
AuthorDate: Thu Dec 17 17:16:46 2020 +0530

    PHOENIX-6269 : Extend ExplainPlan attributes based comparison to some tests
---
 .../apache/phoenix/end2end/BaseAggregateIT.java    | 86 +++++++++++++++-------
 .../org/apache/phoenix/end2end/BaseOrderByIT.java  | 26 +++++--
 .../end2end/BaseTenantSpecificViewIndexIT.java     | 86 ++++++++++++++++------
 .../org/apache/phoenix/end2end/BaseViewIT.java     | 85 +++++++++++++++------
 .../CountDistinctApproximateHyperLogLogIT.java     | 30 +++++---
 .../apache/phoenix/end2end/IndexExtendedIT.java    | 64 +++++++++++-----
 .../end2end/IndexToolForPartialBuildIT.java        | 50 +++++++------
 7 files changed, 298 insertions(+), 129 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseAggregateIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseAggregateIT.java
index 5b466df..84a5d9e 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseAggregateIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseAggregateIT.java
@@ -21,7 +21,6 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
 import static org.apache.phoenix.util.TestUtil.assertResultSet;
 
 import java.io.IOException;
@@ -35,20 +34,16 @@ import java.util.List;
 import java.util.Properties;
 
 import com.google.common.collect.Lists;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.phoenix.compile.QueryPlan;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
-import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.types.PChar;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryBuilder;
-import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Test;
 
@@ -375,12 +370,22 @@ public abstract class BaseAggregateIT extends ParallelStatsDisabledIT {
         assertEquals("abc", rs.getString(2));
         assertFalse(rs.next());
         
-        String expectedPhoenixPlan = "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName
-                + " ['000001111122222','333334444455555',0,*] - ['000001111122222','333334444455555',0,1]\n" +
-                "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [MATCH_STATUS, EXTERNAL_DATASOURCE_KEY]\n" +
-                "CLIENT FILTER BY COUNT(1) > 1";
-        validateQueryPlan(conn, queryBuilder, expectedPhoenixPlan, null);
+        ExplainPlan plan = conn.prepareStatement(queryBuilder.build())
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("RANGE SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals(" ['000001111122222','333334444455555',0,*] - ['000001111122222','333334444455555',0,1]",
+            explainPlanAttributes.getKeyRanges());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [MATCH_STATUS, EXTERNAL_DATASOURCE_KEY]",
+            explainPlanAttributes.getServerAggregate());
+        assertEquals("COUNT(1) > 1", explainPlanAttributes.getClientFilterBy());
     }
     
     @Test
@@ -420,10 +425,20 @@ public abstract class BaseAggregateIT extends ParallelStatsDisabledIT {
         assertEquals("a", rs.getString(1));
         assertEquals(4, rs.getLong(2));
         assertFalse(rs.next());
-        String expectedPhoenixPlan = "CLIENT PARALLEL 1-WAY REVERSE FULL SCAN OVER " + tableName + "\n" +
-                "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]";
-        validateQueryPlan(conn, queryBuilder, expectedPhoenixPlan, null);
+        ExplainPlan plan = conn.prepareStatement(queryBuilder.build())
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("REVERSE", explainPlanAttributes.getClientSortedBy());
+        assertEquals("FULL SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]",
+            explainPlanAttributes.getServerAggregate());
     }
     
     @Test
@@ -471,10 +486,20 @@ public abstract class BaseAggregateIT extends ParallelStatsDisabledIT {
         assertEquals("a", rs.getString(1));
         assertEquals(10, rs.getLong(2));
         assertFalse(rs.next());
-        String expectedPhoenixPlan = "CLIENT PARALLEL 1-WAY REVERSE FULL SCAN OVER " + tableName + "\n" +
-                "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]";
-        validateQueryPlan(conn, queryBuilder, expectedPhoenixPlan, null);
+        ExplainPlan plan = conn.prepareStatement(queryBuilder.build())
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("REVERSE", explainPlanAttributes.getClientSortedBy());
+        assertEquals("FULL SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]",
+            explainPlanAttributes.getServerAggregate());
     }
 
     @Test
@@ -529,10 +554,19 @@ public abstract class BaseAggregateIT extends ParallelStatsDisabledIT {
         assertEquals("n", rs.getString(1));
         assertEquals(2, rs.getDouble(2), 1e-6);
         assertFalse(rs.next());
-        String expectedPhoenixPlan = "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + tableName + "\n" +
-                "    SERVER FILTER BY FIRST KEY ONLY\n" +
-                "    SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]";
-        validateQueryPlan(conn, queryBuilder, expectedPhoenixPlan, null);
+        ExplainPlan plan = conn.prepareStatement(queryBuilder.build())
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("FULL SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("SERVER AGGREGATE INTO ORDERED DISTINCT ROWS BY [K1]",
+            explainPlanAttributes.getServerAggregate());
         TestUtil.analyzeTable(conn, tableName);
         List<KeyRange> splits = TestUtil.getAllSplits(conn, tableName);
         assertEquals(nGuidePosts, splits.size());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOrderByIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOrderByIT.java
index 2791d0f..860a190 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOrderByIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseOrderByIT.java
@@ -30,6 +30,7 @@ import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.assertResultSet;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 import java.sql.Connection;
@@ -40,6 +41,9 @@ import java.sql.SQLException;
 import java.util.Properties;
 
 import com.google.common.collect.Lists;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryBuilder;
 import org.junit.Test;
@@ -286,11 +290,23 @@ public abstract class BaseOrderByIT extends ParallelStatsDisabledIT {
         .setWhereClause("K2 = 'ABC'");
 
         // verify that the phoenix query plan doesn't contain an order by
-        String expectedPhoenixPlan = "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + tableName +"\n" +
-                "    SERVER FILTER BY K2 = 'ABC'\n" +
-                "    SERVER AGGREGATE INTO DISTINCT ROWS BY [K2, VAL1, VAL2]\n" +
-                "CLIENT MERGE SORT";
-        validateQueryPlan(conn, queryBuilder, expectedPhoenixPlan, null);
+        ExplainPlan plan = conn.prepareStatement(queryBuilder.build())
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("FULL SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals("SERVER FILTER BY K2 = 'ABC'",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("SERVER AGGREGATE INTO DISTINCT ROWS BY [K2, VAL1, VAL2]",
+            explainPlanAttributes.getServerAggregate());
+        assertEquals("CLIENT MERGE SORT",
+            explainPlanAttributes.getClientSortAlgo());
+        assertNull(explainPlanAttributes.getClientSortedBy());
+        assertNull(explainPlanAttributes.getServerSortedBy());
 
         ResultSet rs = executeQuery(conn, queryBuilder);
         assertTrue(rs.next());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
index 9860624..71b8eb7 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseTenantSpecificViewIndexIT.java
@@ -30,8 +30,10 @@ import java.util.Properties;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.util.PhoenixRuntime;
-import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.Lists;
@@ -137,26 +139,51 @@ public class BaseTenantSpecificViewIndexIT extends SplitSystemCatalogIT {
         }
         conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,v1,v2) VALUES (-1, 'blah', 'superblah')"); // sanity check that we can upsert after index is there
         conn.commit();
-        ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
-        if(localIndex){
-            assertEquals(saltBuckets == null ? 
-                    "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " [" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + "v2-1']\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                            + "CLIENT MERGE SORT" :
-                    "CLIENT PARALLEL 3-WAY RANGE SCAN OVER " + tableName + " [" + (1L + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + "v2-1']\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                            + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
-        } else {
-            String expected = saltBuckets == null ? 
-                    "CLIENT PARALLEL 1-WAY RANGE SCAN OVER _IDX_" + tableName + " [" + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + "v2-1']\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY" :
-                    "CLIENT PARALLEL 3-WAY RANGE SCAN OVER _IDX_" + tableName + " [0," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix +
-                        "v2-1'] - ["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE + expectedIndexIdOffset) + ",'" + tenantId + "','" + valuePrefix + "v2-1']\n"
+        ExplainPlan plan = conn.prepareStatement("SELECT k1, k2, v2 FROM "
+                + viewName + " WHERE v2='" + valuePrefix + "v2-1'")
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        final String iteratorTypeAndScanSize;
+        final String clientSortAlgo;
+        final String expectedTableName;
+        final String keyRanges;
 
-                  + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                  + "CLIENT MERGE SORT";
-            assertEquals(expected, QueryUtil.getExplainPlan(rs));
+        if (localIndex) {
+            if (saltBuckets == null) {
+                iteratorTypeAndScanSize = "PARALLEL 1-WAY";
+            } else {
+                iteratorTypeAndScanSize = "PARALLEL 3-WAY";
+            }
+            clientSortAlgo = "CLIENT MERGE SORT";
+            expectedTableName = tableName;
+            keyRanges = " [" + (1L + expectedIndexIdOffset) + ",'" + tenantId
+                + "','" + valuePrefix + "v2-1']";
+        } else {
+            if (saltBuckets == null) {
+                iteratorTypeAndScanSize = "PARALLEL 1-WAY";
+                clientSortAlgo = null;
+                keyRanges = " [" + (Short.MIN_VALUE + expectedIndexIdOffset)
+                    + ",'" + tenantId + "','" + valuePrefix + "v2-1']";
+            } else {
+                iteratorTypeAndScanSize = "PARALLEL 3-WAY";
+                clientSortAlgo = "CLIENT MERGE SORT";
+                keyRanges = " [0," + (Short.MIN_VALUE + expectedIndexIdOffset)
+                    + ",'" + tenantId + "','" + valuePrefix + "v2-1'] - ["
+                    + (saltBuckets - 1) + "," + (Short.MIN_VALUE + expectedIndexIdOffset)
+                    + ",'" + tenantId + "','" + valuePrefix + "v2-1']";
+            }
+            expectedTableName = "_IDX_" + tableName;
         }
+        assertEquals(iteratorTypeAndScanSize,
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("RANGE SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(clientSortAlgo, explainPlanAttributes.getClientSortAlgo());
+        assertEquals(expectedTableName, explainPlanAttributes.getTableName());
+        assertEquals(keyRanges, explainPlanAttributes.getKeyRanges());
     }
 
     private void createAndVerifyIndexNonStringTenantId(Connection conn, String viewName, String tableName, String tenantId, String valuePrefix) throws SQLException {
@@ -164,11 +191,22 @@ public class BaseTenantSpecificViewIndexIT extends SplitSystemCatalogIT {
         conn.createStatement().execute("CREATE LOCAL INDEX " + indexName + " ON " + viewName + "(v2)");
         conn.createStatement().execute("UPSERT INTO " + viewName + "(k2,v1,v2) VALUES (-1, 'blah', 'superblah')"); // sanity check that we can upsert after index is there
         conn.commit();
-        ResultSet rs = conn.createStatement().executeQuery("EXPLAIN SELECT k1, k2, v2 FROM " + viewName + " WHERE v2='" + valuePrefix + "v2-1'");
-        assertEquals(
-                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + tableName + " [1," + tenantId + ",'" + valuePrefix + "v2-1']\n"
-                        + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                        + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
+        ExplainPlan plan = conn.prepareStatement("SELECT k1, k2, v2 FROM "
+                + viewName + " WHERE v2='" + valuePrefix + "v2-1'")
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        assertEquals("PARALLEL 1-WAY",
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals("RANGE SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(tableName, explainPlanAttributes.getTableName());
+        assertEquals(" [1," + tenantId + ",'" + valuePrefix + "v2-1']",
+            explainPlanAttributes.getKeyRanges());
+        assertEquals("CLIENT MERGE SORT",
+            explainPlanAttributes.getClientSortAlgo());
     }
     
     private Connection createTenantConnection(String tenantId) throws SQLException {
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
index a9b1af2..9e9025f 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/BaseViewIT.java
@@ -39,11 +39,13 @@ import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.util.MetaDataUtil;
-import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
@@ -188,19 +190,43 @@ public abstract class BaseViewIT extends ParallelStatsEnabledIT {
         assertTrue(BigDecimal.valueOf(51.0).compareTo(rs.getBigDecimal(3))==0);
         assertEquals("bar", rs.getString(4));
         assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        String queryPlan = QueryUtil.getExplainPlan(rs);
+
+        ExplainPlan plan = conn.prepareStatement(query)
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        ExplainPlanAttributes explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+        String iteratorTypeAndScanSize;
+        String clientSortAlgo;
+        String expectedTableName;
+        String keyRanges;
+        String serverFilterBy;
+
         if (localIndex) {
-            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER " + fullTableName +" [1,51]\n"
-                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                    + "CLIENT MERGE SORT",
-                queryPlan);
+            iteratorTypeAndScanSize = "PARALLEL "
+                + (saltBuckets == null ? 1 : saltBuckets) + "-WAY";
+            expectedTableName = fullTableName;
+            keyRanges = "[1,51]";
+            clientSortAlgo = "CLIENT MERGE SORT";
+            serverFilterBy = "SERVER FILTER BY FIRST KEY ONLY";
         } else {
-            assertEquals(saltBuckets == null
-                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + Short.MIN_VALUE + ",51]"
-                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + Short.MIN_VALUE + ",51] - ["+(saltBuckets.intValue()-1)+"," + Short.MIN_VALUE + ",51]\nCLIENT MERGE SORT",
-                            queryPlan);
+            iteratorTypeAndScanSize = saltBuckets == null ? "PARALLEL 1-WAY"
+                : "PARALLEL " + saltBuckets + "-WAY";
+            expectedTableName = viewIndexPhysicalName;
+            keyRanges = saltBuckets == null ? " [" + Short.MIN_VALUE + ",51]"
+                : " [0," + Short.MIN_VALUE + ",51] - [" + (saltBuckets - 1)
+                    + "," + Short.MIN_VALUE + ",51]";
+            clientSortAlgo = saltBuckets == null ? null : "CLIENT MERGE SORT";
+            serverFilterBy = null;
         }
+        assertEquals(iteratorTypeAndScanSize,
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("RANGE SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(expectedTableName, explainPlanAttributes.getTableName());
+        assertEquals(clientSortAlgo, explainPlanAttributes.getClientSortAlgo());
+        assertEquals(keyRanges, explainPlanAttributes.getKeyRanges());
+        assertEquals(serverFilterBy,
+            explainPlanAttributes.getServerWhereFilter());
 
         String viewIndexName2 = "I_" + generateUniqueName();
         if (localIndex) {
@@ -228,23 +254,38 @@ public abstract class BaseViewIT extends ParallelStatsEnabledIT {
         assertEquals(120, rs.getInt(2));
         assertEquals("foo", rs.getString(3));
         assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+
+        plan = conn.prepareStatement(query)
+            .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+            .getExplainPlan();
+        explainPlanAttributes =
+            plan.getPlanStepsAsAttributes();
+
         String physicalTableName;
         if (localIndex) {
             physicalTableName = fullTableName;
-            assertEquals("CLIENT PARALLEL "+ (saltBuckets == null ? 1 : saltBuckets)  +"-WAY RANGE SCAN OVER " + fullTableName +" [" + (2) + ",'foo']\n"
-                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                    + "CLIENT MERGE SORT",QueryUtil.getExplainPlan(rs));
+            iteratorTypeAndScanSize = "PARALLEL "
+                + (saltBuckets == null ? 1 : saltBuckets) + "-WAY";
+            keyRanges = " [" + (2) + ",'foo']";
+            clientSortAlgo = "CLIENT MERGE SORT";
         } else {
             physicalTableName = viewIndexPhysicalName;
-            assertEquals(saltBuckets == null
-                    ? "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + viewIndexPhysicalName +" [" + (Short.MIN_VALUE+1) + ",'foo']\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY"
-                            : "CLIENT PARALLEL " + saltBuckets + "-WAY RANGE SCAN OVER " + viewIndexPhysicalName + " [0," + (Short.MIN_VALUE+1) + ",'foo'] - ["+(saltBuckets.intValue()-1)+"," + (Short.MIN_VALUE+1) + ",'foo']\n"
-                                    + "    SERVER FILTER BY FIRST KEY ONLY\n"
-                                    + "CLIENT MERGE SORT",
-                            QueryUtil.getExplainPlan(rs));
+            iteratorTypeAndScanSize = saltBuckets == null ? "PARALLEL 1-WAY"
+                : "PARALLEL " + saltBuckets + "-WAY";
+            keyRanges = saltBuckets == null ? " [" + (Short.MIN_VALUE + 1) + ",'foo']"
+                : " [0," + (Short.MIN_VALUE + 1) + ",'foo'] - [" + (saltBuckets - 1)
+                    + "," + (Short.MIN_VALUE + 1) + ",'foo']";
+            clientSortAlgo = saltBuckets == null ? null : "CLIENT MERGE SORT";
         }
+        assertEquals(physicalTableName, explainPlanAttributes.getTableName());
+        assertEquals(iteratorTypeAndScanSize,
+            explainPlanAttributes.getIteratorTypeAndScanSize());
+        assertEquals("RANGE SCAN ", explainPlanAttributes.getExplainScanType());
+        assertEquals(keyRanges, explainPlanAttributes.getKeyRanges());
+        assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+            explainPlanAttributes.getServerWhereFilter());
+        assertEquals(clientSortAlgo, explainPlanAttributes.getClientSortAlgo());
+
         conn.close();
         return new Pair<>(physicalTableName,scan);
     }
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctApproximateHyperLogLogIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctApproximateHyperLogLogIT.java
index 3de1509..440470d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctApproximateHyperLogLogIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CountDistinctApproximateHyperLogLogIT.java
@@ -17,9 +17,11 @@
  */
 package org.apache.phoenix.end2end;
 
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -116,18 +118,24 @@ public class CountDistinctApproximateHyperLogLogIT extends ParallelStatsDisabled
 	}
 
 	@Test
-	public void testDistinctCountPlanExlain() throws Exception {
+	public void testDistinctCountPlanExplain() throws Exception {
 		Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-		String query = "explain SELECT APPROX_COUNT_DISTINCT(i1||i2) FROM " + tableName;
-		
-		try(Connection conn = DriverManager.getConnection(getUrl(), props);
-			PreparedStatement statement = conn.prepareStatement(query);) {
+		String query = "SELECT APPROX_COUNT_DISTINCT(i1||i2) FROM " + tableName;
+		try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
 			prepareTableWithValues(conn, 100);
-			ResultSet rs = statement.executeQuery();
-
-			assertEquals("CLIENT PARALLEL 1-WAY FULL SCAN OVER " + tableName + "\n"
-					+ "    SERVER FILTER BY FIRST KEY ONLY\n" + "    SERVER AGGREGATE INTO SINGLE ROW",
-					QueryUtil.getExplainPlan(rs));
+			ExplainPlan plan = conn.prepareStatement(query)
+				.unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+				.getExplainPlan();
+			ExplainPlanAttributes explainPlanAttributes =
+				plan.getPlanStepsAsAttributes();
+			assertEquals(tableName, explainPlanAttributes.getTableName());
+			assertEquals("PARALLEL 1-WAY",
+				explainPlanAttributes.getIteratorTypeAndScanSize());
+			assertEquals("FULL SCAN ", explainPlanAttributes.getExplainScanType());
+			assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+				explainPlanAttributes.getServerWhereFilter());
+			assertEquals("SERVER AGGREGATE INTO SINGLE ROW",
+				explainPlanAttributes.getServerAggregate());
 		}
 	}
 
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
index 254dc39..8821fd6 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexExtendedIT.java
@@ -37,16 +37,17 @@ import java.util.Properties;
 import org.apache.hadoop.hbase.client.Table;
 import org.apache.hadoop.hbase.regionserver.ScanInfoUtil;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
 import org.apache.phoenix.coprocessor.IndexRebuildRegionScanner;
-import org.apache.phoenix.hbase.index.IndexRegionObserver;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.junit.BeforeClass;
@@ -152,14 +153,23 @@ public class IndexExtendedIT extends BaseTest {
             
             //verify rows are fetched from data table.
             String selectSql = String.format("SELECT ID FROM %s WHERE UPPER(NAME, 'en_US') ='UNAME2'",dataTableFullName);
-            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
-            String actualExplainPlan = QueryUtil.getExplainPlan(rs);
-            
+
+            ExplainPlan plan = conn.prepareStatement(selectSql)
+                .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+                .getExplainPlan();
+            ExplainPlanAttributes explainPlanAttributes =
+                plan.getPlanStepsAsAttributes();
+            assertEquals("PARALLEL 1-WAY",
+                explainPlanAttributes.getIteratorTypeAndScanSize());
+            assertEquals("FULL SCAN ",
+                explainPlanAttributes.getExplainScanType());
             //assert we are pulling from data table.
-            assertEquals(String.format("CLIENT PARALLEL 1-WAY FULL SCAN OVER %s\n" +
-                    "    SERVER FILTER BY UPPER(NAME, 'en_US') = 'UNAME2'",dataTableFullName),actualExplainPlan);
-            
-            rs = stmt1.executeQuery(selectSql);
+            assertEquals(dataTableFullName,
+                explainPlanAttributes.getTableName());
+            assertEquals("SERVER FILTER BY UPPER(NAME, 'en_US') = 'UNAME2'",
+                explainPlanAttributes.getServerWhereFilter());
+
+            ResultSet rs = stmt1.executeQuery(selectSql);
             assertTrue(rs.next());
             assertEquals(2, rs.getInt(1));
             assertFalse(rs.next());
@@ -167,11 +177,17 @@ public class IndexExtendedIT extends BaseTest {
             //run the index MR job.
             IndexToolIT.runIndexTool(true, useSnapshot, schemaName, dataTableName, indexTableName);
             
+            plan = conn.prepareStatement(selectSql)
+                .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+                .getExplainPlan();
+            explainPlanAttributes =
+                plan.getPlanStepsAsAttributes();
             //assert we are pulling from index table.
-            rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
-            actualExplainPlan = QueryUtil.getExplainPlan(rs);
-            IndexToolIT.assertExplainPlan(localIndex, actualExplainPlan, dataTableFullName, indexTableFullName);
-            
+            String expectedTableName = localIndex ? dataTableFullName
+                : indexTableFullName;
+            assertEquals(expectedTableName,
+                explainPlanAttributes.getTableName());
+
             rs = stmt.executeQuery(selectSql);
             assertTrue(rs.next());
             assertEquals(2, rs.getInt(1));
@@ -229,12 +245,22 @@ public class IndexExtendedIT extends BaseTest {
             // validate that delete markers were issued correctly and only ('a', '1', 'value1') was
             // deleted
             String query = "SELECT pk3 from " + dataTableFullName + " ORDER BY pk3";
-            ResultSet rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-            String expectedPlan =
-                    "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + indexTableFullName + "\n"
-                            + "    SERVER FILTER BY FIRST KEY ONLY";
-            assertEquals("Wrong plan ", expectedPlan, QueryUtil.getExplainPlan(rs));
-            rs = conn.createStatement().executeQuery(query);
+
+            ExplainPlan plan = conn.prepareStatement(query)
+                .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+                .getExplainPlan();
+            ExplainPlanAttributes explainPlanAttributes =
+                plan.getPlanStepsAsAttributes();
+            assertEquals("PARALLEL 1-WAY",
+                explainPlanAttributes.getIteratorTypeAndScanSize());
+            assertEquals("FULL SCAN ",
+                explainPlanAttributes.getExplainScanType());
+            assertEquals(indexTableFullName,
+                explainPlanAttributes.getTableName());
+            assertEquals("SERVER FILTER BY FIRST KEY ONLY",
+                explainPlanAttributes.getServerWhereFilter());
+
+            ResultSet rs = conn.createStatement().executeQuery(query);
             assertTrue(rs.next());
             assertEquals("2", rs.getString(1));
             assertTrue(rs.next());
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
index 3027554..f7b42e1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/IndexToolForPartialBuildIT.java
@@ -41,7 +41,10 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.client.Table;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.mapreduce.index.IndexTool;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -50,7 +53,6 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
@@ -157,13 +159,22 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
             conn.commit();
 
             String selectSql = String.format("SELECT LPAD(UPPER(NAME),11,'x')||'_xyz',ID FROM %s", fullTableName);
-            rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
-            String actualExplainPlan = QueryUtil.getExplainPlan(rs);
 
+            ExplainPlan plan = conn.prepareStatement(selectSql)
+                .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+                .getExplainPlan();
+            ExplainPlanAttributes explainPlanAttributes =
+                plan.getPlanStepsAsAttributes();
+            assertEquals("PARALLEL 1-WAY",
+                explainPlanAttributes.getIteratorTypeAndScanSize());
+            assertEquals("FULL SCAN ",
+                explainPlanAttributes.getExplainScanType());
             // assert we are pulling from data table.
-			assertExplainPlan(actualExplainPlan, schemaName, dataTableName, null, isNamespaceEnabled);
+            assertEquals(SchemaUtil.getPhysicalHBaseTableName(schemaName,
+                dataTableName, isNamespaceEnabled).toString(),
+                explainPlanAttributes.getTableName());
 
-			rs = stmt1.executeQuery(selectSql);
+            rs = stmt1.executeQuery(selectSql);
             for (int i = 1; i <= 7; i++) {
                 assertTrue(rs.next());
                 assertEquals("xxUNAME" + i*1000 + "_xyz", rs.getString(1));
@@ -195,10 +206,19 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
             upsertRow(stmt1, 9000);
             conn.commit();
 
+            plan = conn.prepareStatement(selectSql)
+                .unwrap(PhoenixPreparedStatement.class).optimizeQuery()
+                .getExplainPlan();
+            explainPlanAttributes =
+                plan.getPlanStepsAsAttributes();
+            assertEquals("PARALLEL 1-WAY",
+                explainPlanAttributes.getIteratorTypeAndScanSize());
+            assertEquals("FULL SCAN ",
+                explainPlanAttributes.getExplainScanType());
             // assert we are pulling from index table.
-            rs = conn.createStatement().executeQuery("EXPLAIN " + selectSql);
-            actualExplainPlan = QueryUtil.getExplainPlan(rs);
-            assertExplainPlan(actualExplainPlan, schemaName, dataTableName, indxTable, isNamespaceEnabled);
+            assertEquals(SchemaUtil.getPhysicalHBaseTableName(schemaName,
+                indxTable, isNamespaceEnabled).toString(),
+                explainPlanAttributes.getTableName());
 
             rs = stmt.executeQuery(selectSql);
 
@@ -211,20 +231,6 @@ public class IndexToolForPartialBuildIT extends BaseOwnClusterIT {
             conn.close();
         }
     }
-    
-	public static void assertExplainPlan(final String actualExplainPlan, String schemaName, String dataTable,
-			String indxTable, boolean isNamespaceMapped) {
-
-		String expectedExplainPlan = "";
-		if (indxTable != null) {
-		    expectedExplainPlan = String.format("CLIENT PARALLEL 1-WAY FULL SCAN OVER %s",
-		            SchemaUtil.getPhysicalHBaseTableName(schemaName, indxTable, isNamespaceMapped));
-		} else {
-			expectedExplainPlan = String.format("CLIENT PARALLEL 1-WAY FULL SCAN OVER %s",
-			        SchemaUtil.getPhysicalHBaseTableName(schemaName, dataTable, isNamespaceMapped));
-		}
-		assertTrue(actualExplainPlan.contains(expectedExplainPlan));
-	}
 
     public String[] getArgValues(String schemaName, String dataTable, String indexName) {
         final List<String> args = Lists.newArrayList();