You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/02/05 07:43:38 UTC

git commit: Backport optimizer fix for point lookups and order by, add auto install of newly named coprocessors

Updated Branches:
  refs/heads/2.2.3 f7e023c7e -> 846c37ffe


Backport optimizer fix for point lookups and order by, add auto install of newly named coprocessors


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/846c37ff
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/846c37ff
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/846c37ff

Branch: refs/heads/2.2.3
Commit: 846c37ffe3fc05d8f9f3ca5556abfcb529db4100
Parents: f7e023c
Author: James Taylor <ja...@apache.org>
Authored: Tue Feb 4 22:43:30 2014 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Tue Feb 4 22:43:30 2014 -0800

----------------------------------------------------------------------
 .../org/apache/phoenix/compile/ScanRanges.java  | 20 +++++-
 .../apache/phoenix/optimize/QueryOptimizer.java | 44 +++++++++----
 .../query/ConnectionQueryServicesImpl.java      | 69 ++++++++++++++++++--
 .../org/apache/phoenix/util/SchemaUtil.java     |  1 +
 .../phoenix/compile/QueryOptimizerTest.java     |  9 ++-
 .../end2end/index/ImmutableIndexTest.java       | 21 +++---
 .../end2end/index/MutableSaltedIndexTest.java   | 19 +++---
 7 files changed, 136 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/main/java/org/apache/phoenix/compile/ScanRanges.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index 44f4473..98d110e 100644
--- a/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -24,14 +24,14 @@ import java.util.List;
 
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Lists;
 import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.util.ScanUtil;
 
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Lists;
+
 
 public class ScanRanges {
     private static final List<List<KeyRange>> EVERYTHING_RANGES = Collections.<List<KeyRange>>emptyList();
@@ -92,6 +92,20 @@ public class ScanRanges {
         return this == NOTHING;
     }
     
+    public boolean isPointLookup() {
+        if (schema == null || forceRangeScan || ranges.size() < schema.getMaxFields()) {
+            return false;
+        }
+        for (List<KeyRange> orRanges : ranges) {
+            for (KeyRange keyRange : orRanges) {
+                if (!keyRange.isSingleKey()) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
     /**
      * Use SkipScanFilter under two circumstances:
      * 1) If we have multiple ranges for a given key slot (use of IN)

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index d4c29dd..16149ef 100644
--- a/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -5,7 +5,6 @@ import java.util.Collections;
 import java.util.Comparator;
 import java.util.List;
 
-import com.google.common.collect.Lists;
 import org.apache.phoenix.compile.ColumnProjector;
 import org.apache.phoenix.compile.IndexStatementRewriter;
 import org.apache.phoenix.compile.QueryCompiler;
@@ -26,6 +25,8 @@ import org.apache.phoenix.schema.PIndexState;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableType;
 
+import com.google.common.collect.Lists;
+
 public class QueryOptimizer {
     private static final ParseNodeFactory FACTORY = new ParseNodeFactory();
 
@@ -185,22 +186,41 @@ public class QueryOptimizer {
             return firstPlan;
         }
         
+        /**
+         * If we have a plan(s) that are just point lookups (i.e. fully qualified row
+         * keys), then favor those first.
+         */
         List<QueryPlan> candidates = Lists.newArrayListWithExpectedSize(plans.size());
-        if (firstPlan.getLimit() == null) {
-            candidates.addAll(plans);
-        } else {
-            for (QueryPlan plan : plans) {
-                // If ORDER BY optimized out (or not present at all)
-                if (plan.getOrderBy().getOrderByExpressions().isEmpty()) {
-                    candidates.add(plan);
-                }
+        for (QueryPlan plan : plans) {
+            if (plan.getContext().getScanRanges().isPointLookup()) {
+                candidates.add(plan);
             }
-            if (candidates.isEmpty()) {
-                candidates.addAll(plans);
+        }
+        /**
+         * If we have a plan(s) that removes the order by, choose from among these,
+         * as this is typically the most expensive operation. Once we have stats, if
+         * there's a limit on the query, we might choose a different plan. For example
+         * if the limit was a very large number and the combination of applying other 
+         * filters on the row key are estimated to choose fewer rows, we'd choose that
+         * one.
+         */
+        List<QueryPlan> stillCandidates = plans;
+        List<QueryPlan> bestCandidates = candidates;
+        if (!candidates.isEmpty()) {
+            stillCandidates = candidates;
+            bestCandidates = Lists.<QueryPlan>newArrayListWithExpectedSize(candidates.size());
+        }
+        for (QueryPlan plan : stillCandidates) {
+            // If ORDER BY optimized out (or not present at all)
+            if (plan.getOrderBy().getOrderByExpressions().isEmpty()) {
+                bestCandidates.add(plan);
             }
         }
+        if (bestCandidates.isEmpty()) {
+            bestCandidates.addAll(stillCandidates);
+        }
         final int comparisonOfDataVersusIndexTable = select.getHint().hasHint(Hint.USE_DATA_OVER_INDEX_TABLE) ? -1 : 1;
-        Collections.sort(candidates, new Comparator<QueryPlan>() {
+        Collections.sort(bestCandidates, new Comparator<QueryPlan>() {
 
             @Override
             public int compare(QueryPlan plan1, QueryPlan plan2) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java b/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 304faed..f23aff1 100644
--- a/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ b/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -65,12 +65,6 @@ import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
-import com.google.common.collect.Sets;
 import org.apache.hbase.index.Indexer;
 import org.apache.hbase.index.covered.CoveredColumnsIndexBuilder;
 import org.apache.phoenix.compile.MutationPlan;
@@ -112,6 +106,12 @@ import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Sets;
 
 public class ConnectionQueryServicesImpl extends DelegateQueryServices implements ConnectionQueryServices {
     private static final Logger logger = LoggerFactory.getLogger(ConnectionQueryServicesImpl.class);
@@ -452,6 +452,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
       hcd.setKeepDeletedCells(true);
     }
     
+    private static final String OLD_PACKAGE = "com.salesforce.";
+    private static final String NEW_PACKAGE = "org.apache.";
+    
     private HTableDescriptor generateTableDescriptor(byte[] tableName, HTableDescriptor existingDesc, PTableType tableType, Map<String,Object> tableProps, List<Pair<byte[],Map<String,Object>>> families, byte[][] splits) throws SQLException {
         HTableDescriptor descriptor = (existingDesc != null) ? new HTableDescriptor(existingDesc) : new HTableDescriptor(tableName);
         for (Entry<String,Object> entry : tableProps.entrySet()) {
@@ -485,18 +488,23 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
         // The phoenix jar must be available on HBase classpath
         try {
             if (!descriptor.hasCoprocessor(ScanRegionObserver.class.getName())) {
+                descriptor.removeCoprocessor(ScanRegionObserver.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                 descriptor.addCoprocessor(ScanRegionObserver.class.getName(), null, 1, null);
             }
             if (!descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName())) {
+                descriptor.removeCoprocessor(UngroupedAggregateRegionObserver.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                 descriptor.addCoprocessor(UngroupedAggregateRegionObserver.class.getName(), null, 1, null);
             }
             if (!descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName())) {
+                descriptor.removeCoprocessor(GroupedAggregateRegionObserver.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                 descriptor.addCoprocessor(GroupedAggregateRegionObserver.class.getName(), null, 1, null);
             }
             if (!descriptor.hasCoprocessor(HashJoiningRegionObserver.class.getName())) {
+                descriptor.removeCoprocessor(HashJoiningRegionObserver.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                 descriptor.addCoprocessor(HashJoiningRegionObserver.class.getName(), null, 1, null);
             }
             if (!descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName())) {
+                descriptor.removeCoprocessor(ServerCachingEndpointImpl.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                 descriptor.addCoprocessor(ServerCachingEndpointImpl.class.getName(), null, 1, null);
             }
             // TODO: better encapsulation for this
@@ -514,10 +522,13 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
             if (SchemaUtil.isMetaTable(tableName)) {
                 descriptor.setValue(SchemaUtil.UPGRADE_TO_2_0, Boolean.TRUE.toString());
                 descriptor.setValue(SchemaUtil.UPGRADE_TO_2_1, Boolean.TRUE.toString());
+                descriptor.setValue(SchemaUtil.UPGRADE_TO_2_2, Boolean.TRUE.toString());
                 if (!descriptor.hasCoprocessor(MetaDataEndpointImpl.class.getName())) {
+                    descriptor.removeCoprocessor(MetaDataEndpointImpl.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                     descriptor.addCoprocessor(MetaDataEndpointImpl.class.getName(), null, 1, null);
                 }
                 if (!descriptor.hasCoprocessor(MetaDataRegionObserver.class.getName())) {
+                    descriptor.removeCoprocessor(MetaDataRegionObserver.class.getName().replace(NEW_PACKAGE, OLD_PACKAGE));
                     descriptor.addCoprocessor(MetaDataRegionObserver.class.getName(), null, 2, null);
                 }
             }
@@ -655,6 +666,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 boolean updateTo2_0 = false;
                 boolean updateTo1_2 = false;
                 boolean updateTo2_1 = false;
+                boolean updateTo2_2 = false;
                 if (isMetaTable) {
                     /*
                      *  FIXME: remove this once everyone has been upgraded to v 0.94.4+
@@ -670,6 +682,7 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                     
                     updateTo2_0 = existingDesc.getValue(SchemaUtil.UPGRADE_TO_2_0) == null;
                     updateTo2_1 = existingDesc.getValue(SchemaUtil.UPGRADE_TO_2_1) == null;
+                    updateTo2_2 = existingDesc.getValue(SchemaUtil.UPGRADE_TO_2_2) == null;
                 }
                 
                 // We'll do this alter at the end of the upgrade
@@ -695,6 +708,9 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
                 if (updateTo2_1 && !updateTo2_0) {
                     upgradeTablesFrom2_0to2_1(admin, newDesc);
                 }
+                if (updateTo2_2) {
+                    upgradeTo2_2(admin);
+                }
                 return false;
             }
 
@@ -778,6 +794,47 @@ public class ConnectionQueryServicesImpl extends DelegateQueryServices implement
     }
 
     /**
+     * Walk through all existing tables and install new coprocessors
+     * @param admin
+     * @throws IOException
+     * @throws SQLException
+     */
+    private void upgradeTo2_2(HBaseAdmin admin) throws IOException, SQLException {
+        if (logger.isInfoEnabled()) {
+            logger.info("Upgrading tables from Phoenix 2.x to Apache Phoenix 2.2.3");
+        }
+        /* Use regular HBase scan instead of query because the jar on the server may
+         * not be compatible (we don't know yet) and this is our one chance to do
+         * the conversion automatically.
+         */
+        Scan scan = new Scan();
+        scan.addColumn(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES);
+        scan.addColumn(TABLE_FAMILY_BYTES, DATA_TABLE_NAME_BYTES);
+        SingleColumnValueFilter filter = new SingleColumnValueFilter(TABLE_FAMILY_BYTES, TABLE_TYPE_BYTES, CompareOp.GREATER_OR_EQUAL, PDataType.CHAR.toBytes("a"));
+        filter.setFilterIfMissing(true);
+        // Add filter so that we only get the table row and not the column rows
+        scan.setFilter(filter);
+        HTableInterface table = HBaseFactoryProvider.getHTableFactory().getTable(TYPE_TABLE_NAME_BYTES, connection, getExecutor());
+        ResultScanner scanner = table.getScanner(scan);
+        Result result = null;
+        while ((result = scanner.next()) != null) {
+            byte[] rowKey = result.getRow();
+            byte[][] rowKeyMetaData = new byte[2][];
+            getVarChars(rowKey, rowKeyMetaData);
+            byte[] schemaBytes = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
+            byte[] tableBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
+            byte[] tableName = SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
+            if (!SchemaUtil.isMetaTable(tableName)) {
+                HTableDescriptor existingDesc = admin.getTableDescriptor(tableName);
+                HTableDescriptor newDesc = generateTableDescriptor(tableName, existingDesc, PTableType.VIEW, Collections.<String,Object>emptyMap(), Collections.<Pair<byte[],Map<String,Object>>>emptyList(), null);
+                admin.disableTable(tableName);
+                admin.modifyTable(tableName, newDesc);
+                admin.enableTable(tableName);
+            }
+        }
+    }
+        
+    /**
      * FIXME: Temporary code to convert tables from 2.0 to 2.1 by:
      * 1) adding the new coprocessors for mutable secondary indexing
      * 2) add a ":" prefix to any index column names that are for data table pk columns

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 0e3127c..483e482 100644
--- a/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -522,6 +522,7 @@ public class SchemaUtil {
     public static final String UPGRADE_TO_2_0 = "UpgradeTo20";
     public static final Integer SYSTEM_TABLE_NULLABLE_VAR_LENGTH_COLUMNS = 3;
     public static final String UPGRADE_TO_2_1 = "UpgradeTo21";
+    public static final String UPGRADE_TO_2_2 = "UpgradeTo22";
 
     public static boolean isUpgradeTo2Necessary(ConnectionQueryServices connServices) throws SQLException {
         HTableInterface htable = connServices.getTable(PhoenixDatabaseMetaData.TYPE_TABLE_NAME_BYTES);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java b/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
index 6988597..660420c 100644
--- a/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
+++ b/src/test/java/org/apache/phoenix/compile/QueryOptimizerTest.java
@@ -6,12 +6,11 @@ import static org.junit.Assert.assertFalse;
 import java.sql.Connection;
 import java.sql.DriverManager;
 
-import org.junit.Test;
-
 import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
 import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
 
 public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
     
@@ -138,7 +137,7 @@ public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("CREATE INDEX idx ON t(v1)");
         PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
         QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1 LIMIT 5");
-        assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString());
+        assertEquals("T", plan.getTableRef().getTable().getTableName().getString());
     }
     
     @Test
@@ -148,7 +147,7 @@ public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("CREATE INDEX idx ON t(v1)");
         PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
         QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1, k DESC LIMIT 5");
-        assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString());
+        assertEquals("T", plan.getTableRef().getTable().getTableName().getString());
     }
     
     @Test
@@ -168,7 +167,7 @@ public class QueryOptimizerTest extends BaseConnectionlessQueryTest {
         conn.createStatement().execute("CREATE INDEX idx ON t(v1, k)");
         PhoenixStatement stmt = conn.createStatement().unwrap(PhoenixStatement.class);
         QueryPlan plan = stmt.optimizeQuery("SELECT k FROM t WHERE k = 30 ORDER BY v1, k LIMIT 5");
-        assertEquals("IDX", plan.getTableRef().getTable().getTableName().getString());
+        assertEquals("T", plan.getTableRef().getTable().getTableName().getString());
     }
     
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java b/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
index ff87b40..e87ba89 100644
--- a/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
+++ b/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
@@ -36,10 +36,6 @@ import java.sql.SQLException;
 import java.util.Map;
 import java.util.Properties;
 
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeTest;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
@@ -47,6 +43,10 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
 
 
 public class ImmutableIndexTest extends BaseHBaseManagedTimeTest{
@@ -228,13 +228,12 @@ public class ImmutableIndexTest extends BaseHBaseManagedTimeTest{
         // Turns into an ORDER BY, which could be bad if lots of data is
         // being returned. Without stats we don't know. The alternative
         // would be a full table scan.
-        expectedPlan = indexSaltBuckets == null ? 
-            ("CLIENT PARALLEL 1-WAY RANGE SCAN OVER I [*] - [~'x']\n" + 
-             "    SERVER TOP -1 ROWS SORTED BY [K]\n" + 
-             "CLIENT MERGE SORT") :
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 RANGES OVER I [0,*] - [3,~'x']\n" + 
-             "    SERVER TOP -1 ROWS SORTED BY [K]\n" + 
-             "CLIENT MERGE SORT");
+        expectedPlan = tableSaltBuckets == null ? 
+                ("CLIENT PARALLEL 1-WAY FULL SCAN OVER T\n" + 
+                        "    SERVER FILTER BY V >= 'x'") :
+            ("CLIENT PARALLEL 3-WAY FULL SCAN OVER T\n" + 
+                    "    SERVER FILTER BY V >= 'x'\n" + 
+                    "CLIENT MERGE SORT");
         assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
         
         // Will use data table now, since there's a LIMIT clause and

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/846c37ff/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java b/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
index 5c5ffd6..cb28d53 100644
--- a/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
+++ b/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
@@ -31,13 +31,13 @@ import java.sql.ResultSet;
 import java.util.Map;
 import java.util.Properties;
 
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
 
 
 public class MutableSaltedIndexTest extends BaseMutableIndexTest{
@@ -152,13 +152,12 @@ public class MutableSaltedIndexTest extends BaseMutableIndexTest{
         // Turns into an ORDER BY, which could be bad if lots of data is
         // being returned. Without stats we don't know. The alternative
         // would be a full table scan.
-        expectedPlan = indexSaltBuckets == null ? 
-            ("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + INDEX_TABLE_FULL_NAME + " [*] - [~'x']\n" + 
-             "    SERVER TOP -1 ROWS SORTED BY [K]\n" + 
-             "CLIENT MERGE SORT") :
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 RANGES OVER " + INDEX_TABLE_FULL_NAME + " [0,*] - [3,~'x']\n" + 
-             "    SERVER TOP -1 ROWS SORTED BY [K]\n" + 
-             "CLIENT MERGE SORT");
+        expectedPlan = tableSaltBuckets == null ? 
+            ("CLIENT PARALLEL 1-WAY FULL SCAN OVER T\n" + 
+                    "    SERVER FILTER BY V >= 'x'") :
+            ("CLIENT PARALLEL 3-WAY FULL SCAN OVER T\n" + 
+                    "    SERVER FILTER BY V >= 'x'\n" + 
+                    "CLIENT MERGE SORT");
         assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
         
         // Will use data table now, since there's a LIMIT clause and