You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2014/02/04 23:09:52 UTC

git commit: Better abstraction for ScanRanges, always flatten skip scan if point lookup, reflect point lookup in explain plan, fix tests

Updated Branches:
  refs/heads/master bc21d8e41 -> 1a6811bf7


Better abstraction for ScanRanges, always flatten skip scan if point lookup, reflect point lookup in explain plan, fix tests


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/1a6811bf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/1a6811bf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/1a6811bf

Branch: refs/heads/master
Commit: 1a6811bf7ac434d32cf577fc1239f5eddbbe2db4
Parents: bc21d8e
Author: James Taylor <ja...@apache.org>
Authored: Tue Feb 4 14:09:45 2014 -0800
Committer: James Taylor <ja...@apache.org>
Committed: Tue Feb 4 14:09:45 2014 -0800

----------------------------------------------------------------------
 .../apache/phoenix/compile/DeleteCompiler.java  |   9 +-
 .../org/apache/phoenix/compile/ScanRanges.java  |  54 +++--
 .../apache/phoenix/compile/WhereOptimizer.java  |  53 ++---
 .../apache/phoenix/iterate/ExplainTable.java    |  35 ++--
 .../apache/phoenix/optimize/QueryOptimizer.java |   1 +
 .../apache/phoenix/schema/MetaDataClient.java   |  10 +-
 .../org/apache/phoenix/schema/SaltingUtil.java  |  36 +---
 .../org/apache/phoenix/util/SchemaUtil.java     |   5 +-
 .../phoenix/compile/WhereClauseCompileTest.java |  57 +++---
 .../compile/WhereClauseOptimizerTest.java       |   8 +-
 .../apache/phoenix/end2end/QueryPlanTest.java   |   4 +-
 .../phoenix/end2end/index/BaseIndexTest.java    |  31 +++
 .../end2end/index/BaseMutableIndexTest.java     |  10 +-
 .../end2end/index/ImmutableIndexTest.java       | 149 --------------
 .../phoenix/end2end/index/MutableIndexTest.java |   8 +-
 .../end2end/index/MutableSaltedIndexTest.java   | 183 -----------------
 .../phoenix/end2end/index/SaltedIndexTest.java  | 203 +++++++++++++++++++
 17 files changed, 374 insertions(+), 482 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
index b71a836..e22013b 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/DeleteCompiler.java
@@ -55,6 +55,7 @@ import org.apache.phoenix.parse.ParseNode;
 import org.apache.phoenix.parse.ParseNodeFactory;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -232,10 +233,12 @@ public class DeleteCompiler {
 
                 @Override
                 public MutationState execute() {
-                    List<byte[]> keys = context.getScanRanges().getPointKeys(table.getBucketNum());
+                    // We have a point lookup, so we know we have a simple set of fully qualified
+                    // keys for our ranges
+                    List<KeyRange> keys = context.getScanRanges().getRanges().get(0);
                     Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(keys.size());
-                    for (byte[] key : keys) {
-                        mutation.put(new ImmutableBytesPtr(key), PRow.DELETE_MARKER);
+                    for (KeyRange key : keys) {
+                        mutation.put(new ImmutableBytesPtr(key.getLowerRange()), PRow.DELETE_MARKER);
                     }
                     return new MutationState(tableRef, mutation, 0, maxSize, connection);
                 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
index 73edaf8..d4dcbbd 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ScanRanges.java
@@ -40,28 +40,46 @@ import com.google.common.collect.Lists;
 public class ScanRanges {
     private static final List<List<KeyRange>> EVERYTHING_RANGES = Collections.<List<KeyRange>>emptyList();
     private static final List<List<KeyRange>> NOTHING_RANGES = Collections.<List<KeyRange>>singletonList(Collections.<KeyRange>singletonList(KeyRange.EMPTY_RANGE));
-    public static final ScanRanges EVERYTHING = new ScanRanges(EVERYTHING_RANGES,null,false);
-    public static final ScanRanges NOTHING = new ScanRanges(NOTHING_RANGES,null,false);
+    public static final ScanRanges EVERYTHING = new ScanRanges(EVERYTHING_RANGES,null,false, false);
+    public static final ScanRanges NOTHING = new ScanRanges(NOTHING_RANGES,null,false, false);
 
     public static ScanRanges create(List<List<KeyRange>> ranges, RowKeySchema schema) {
-        return create(ranges, schema, false);
+        return create(ranges, schema, false, null);
     }
     
-    public static ScanRanges create(List<List<KeyRange>> ranges, RowKeySchema schema, boolean forceRangeScan) {
-        if (ranges.isEmpty()) {
+    public static ScanRanges create(List<List<KeyRange>> ranges, RowKeySchema schema, boolean forceRangeScan, Integer nBuckets) {
+        int offset = nBuckets == null ? 0 : 1;
+        if (ranges.size() == offset) {
             return EVERYTHING;
-        } else if (ranges.size() == 1 && ranges.get(0).size() == 1 && ranges.get(0).get(0) == KeyRange.EMPTY_RANGE) {
+        } else if (ranges.size() == 1 + offset && ranges.get(offset).size() == 1 && ranges.get(offset).get(0) == KeyRange.EMPTY_RANGE) {
             return NOTHING;
         }
-        return new ScanRanges(ranges, schema, forceRangeScan);
+        boolean isPointLookup = !forceRangeScan && ScanRanges.isPointLookup(schema, ranges);
+        if (isPointLookup) {
+            List<byte[]> keys = ScanRanges.getPointKeys(ranges, schema, nBuckets);
+            List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
+            for (byte[] key : keys) {
+                keyRanges.add(KeyRange.getKeyRange(key));
+            }
+            ranges = Collections.singletonList(keyRanges);
+            schema = SchemaUtil.VAR_BINARY_SCHEMA;
+        } else if (nBuckets != null) {
+            List<List<KeyRange>> saltedRanges = Lists.newArrayListWithExpectedSize(ranges.size());
+            saltedRanges.add(SaltingUtil.generateAllSaltingRanges(nBuckets));
+            saltedRanges.addAll(ranges.subList(1, ranges.size()));
+            ranges = saltedRanges;
+        }
+        return new ScanRanges(ranges, schema, forceRangeScan, isPointLookup);
     }
 
     private SkipScanFilter filter;
     private final List<List<KeyRange>> ranges;
     private final RowKeySchema schema;
     private final boolean forceRangeScan;
+    private final boolean isPointLookup;
 
-    private ScanRanges (List<List<KeyRange>> ranges, RowKeySchema schema, boolean forceRangeScan) {
+    private ScanRanges (List<List<KeyRange>> ranges, RowKeySchema schema, boolean forceRangeScan, boolean isPointLookup) {
+        this.isPointLookup = isPointLookup;
         List<List<KeyRange>> sortedRanges = Lists.newArrayListWithExpectedSize(ranges.size());
         for (int i = 0; i < ranges.size(); i++) {
             List<KeyRange> sorted = Lists.newArrayList(ranges.get(i));
@@ -106,6 +124,9 @@ public class ScanRanges {
         if (forceRangeScan) {
             return false;
         }
+        if (isPointLookup) {
+            return getPointLookupCount() > 1;
+        }
         boolean hasRangeKey = false, useSkipScan = false;
         for (List<KeyRange> orRanges : ranges) {
             useSkipScan |= orRanges.size() > 1 | hasRangeKey;
@@ -119,7 +140,7 @@ public class ScanRanges {
         return false;
     }
 
-    public static boolean isPointLookup(RowKeySchema schema, List<List<KeyRange>> ranges) {
+    private static boolean isPointLookup(RowKeySchema schema, List<List<KeyRange>> ranges) {
         if (ranges.size() < schema.getMaxFields()) {
             return false;
         }
@@ -142,14 +163,7 @@ public class ScanRanges {
         return idx >= 0;
     }
 
-    /**
-     * @return true if this represents a set of complete keys
-     */
-    public List<byte[]> getPointKeys(Integer bucketNum) {
-        return getPointKeys(this.getRanges(), this.getSchema(), bucketNum);
-    }
-    
-    public static List<byte[]> getPointKeys(List<List<KeyRange>> ranges, RowKeySchema schema, Integer bucketNum) {
+    private static List<byte[]> getPointKeys(List<List<KeyRange>> ranges, RowKeySchema schema, Integer bucketNum) {
         if (ranges == null || ranges.isEmpty()) {
             return Collections.emptyList();
         }
@@ -179,7 +193,11 @@ public class ScanRanges {
      * @return true if this represents a set of complete keys
      */
     public boolean isPointLookup() {
-        return schema != null && isPointLookup(schema, ranges);
+        return isPointLookup;
+    }
+    
+    public int getPointLookupCount() {
+        return isPointLookup ? ranges.get(0).size() : 0;
     }
 
     public void setScanStartStopRow(Scan scan) {

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index bc868b1..5072130 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -26,13 +26,11 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.BaseTerminalExpression;
 import org.apache.phoenix.expression.CoerceExpression;
@@ -135,18 +133,28 @@ public class WhereOptimizer {
             extractNodes = new HashSet<Expression>(table.getPKColumns().size());
         }
 
+        int pkPos = -1;
+        Integer nBuckets = table.getBucketNum();
         // We're fully qualified if all columns except the salt column are specified
-        int fullyQualifiedColumnCount = table.getPKColumns().size() - (table.getBucketNum() == null ? 0 : 1);
-        int pkPos = table.getBucketNum() == null ? -1 : 0;
-        LinkedList<List<KeyRange>> cnf = new LinkedList<List<KeyRange>>();
+        int fullyQualifiedColumnCount = table.getPKColumns().size() - (nBuckets == null ? 0 : 1);
         RowKeySchema schema = table.getRowKeySchema();
+        List<List<KeyRange>> cnf = Lists.newArrayListWithExpectedSize(schema.getMaxFields());
         boolean forcedSkipScan = statement.getHint().hasHint(Hint.SKIP_SCAN);
         boolean forcedRangeScan = statement.getHint().hasHint(Hint.RANGE_SCAN);
         boolean hasUnboundedRange = false;
         boolean hasAnyRange = false;
         
         Iterator<KeyExpressionVisitor.KeySlot> iterator = keySlots.iterator();
-        // add tenant data isolation for tenant-specific tables
+        // Add placeholder for salt byte ranges
+        if (nBuckets != null) {
+            cnf.add(SALT_PLACEHOLDER);
+            // Increment the pkPos, as the salt column is in the row schema
+            // Do not increment the iterator, though, as there will never be
+            // an expression in the keySlots for the salt column
+            pkPos++;
+        }
+        
+        // aAd tenant data isolation for tenant-specific tables
         if (tenantId != null && table.isMultiTenant()) {
             KeyRange tenantIdKeyRange = KeyRange.getKeyRange(tenantId.getBytes());
             cnf.add(singletonList(tenantIdKeyRange));
@@ -171,14 +179,17 @@ public class WhereOptimizer {
             }
             // We support (a,b) IN ((1,2),(3,4), so in this case we switch to a flattened schema
             if (fullyQualifiedColumnCount > 1 && slot.getPKSpan() == fullyQualifiedColumnCount && slot.getKeyRanges().size() > 1) {
-                schema = SchemaUtil.VAR_BINARY_SCHEMA;
+                schema = nBuckets == null ? SchemaUtil.VAR_BINARY_SCHEMA : SaltingUtil.VAR_BINARY_SALTED_SCHEMA;
             }
             KeyPart keyPart = slot.getKeyPart();
             pkPos = slot.getPKPosition();
             List<KeyRange> keyRanges = slot.getKeyRanges();
             cnf.add(keyRanges);
             for (KeyRange range : keyRanges) {
-                hasUnboundedRange |= range.isUnbound();
+                if (range.isUnbound()) {
+                    hasUnboundedRange = true;
+                    break;
+                }
             }
             
             // Will be null in cases for which only part of the expression was factored out here
@@ -200,32 +211,8 @@ public class WhereOptimizer {
             }
             hasAnyRange |= keyRanges.size() > 1 || (keyRanges.size() == 1 && !keyRanges.get(0).isSingleKey());
         }
-        List<List<KeyRange>> ranges = cnf;
-        if (table.getBucketNum() != null) {
-            if (!cnf.isEmpty()) {
-                // If we have all single keys, we can optimize by adding the salt byte up front
-                if (schema == SchemaUtil.VAR_BINARY_SCHEMA) {
-                    ranges = SaltingUtil.setSaltByte(ranges, table.getBucketNum());
-                } else {
-                    List<KeyRange> saltRanges = SALT_PLACEHOLDER;
-                    cnf.addFirst(saltRanges);
-                    if (ScanRanges.isPointLookup(schema, cnf)) {
-                        List<byte[]> keys = ScanRanges.getPointKeys(cnf, schema, table.getBucketNum());
-                        Collections.sort(keys, Bytes.BYTES_COMPARATOR);
-                        List<KeyRange> keyRanges = Lists.newArrayListWithExpectedSize(keys.size());
-                        for (byte[] key : keys) {
-                            keyRanges.add(KeyRange.getKeyRange(key));
-                        }
-                        ranges = Collections.singletonList(keyRanges);
-                        schema = SchemaUtil.VAR_BINARY_SCHEMA;
-                    } else {
-                        cnf.set(0, SaltingUtil.generateAllSaltingRanges(table.getBucketNum()));
-                    }
-                }
-            }
-        }
         context.setScanRanges(
-                ScanRanges.create(ranges, schema, statement.getHint().hasHint(Hint.RANGE_SCAN)),
+                ScanRanges.create(cnf, schema, statement.getHint().hasHint(Hint.RANGE_SCAN), nBuckets),
                 keySlots.getMinMaxRange());
         if (whereClause == null) {
             return null;

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
index f32b377..3eba7da 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/iterate/ExplainTable.java
@@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter;
 import org.apache.hadoop.hbase.filter.PageFilter;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.collect.Iterators;
 import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.compile.StatementContext;
@@ -45,6 +43,8 @@ import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.util.StringUtil;
 
+import com.google.common.collect.Iterators;
+
 
 public abstract class ExplainTable {
     private static final List<KeyRange> EVERYTHING = Collections.singletonList(KeyRange.EVERYTHING_RANGE);
@@ -65,19 +65,24 @@ public abstract class ExplainTable {
     private boolean explainSkipScan(StringBuilder buf) {
         ScanRanges scanRanges = context.getScanRanges();
         if (scanRanges.useSkipScanFilter()) {
-            buf.append("SKIP SCAN ");
-            int count = 1;
-            boolean hasRanges = false;
-            for (List<KeyRange> ranges : scanRanges.getRanges()) {
-                count *= ranges.size();
-                for (KeyRange range : ranges) {
-                    hasRanges |= !range.isSingleKey();
+            int keyCount = scanRanges.getPointLookupCount();
+            if (keyCount == 0) {
+                buf.append("SKIP SCAN ");
+                int count = 1;
+                boolean hasRanges = false;
+                for (List<KeyRange> ranges : scanRanges.getRanges()) {
+                    count *= ranges.size();
+                    for (KeyRange range : ranges) {
+                        hasRanges |= !range.isSingleKey();
+                    }
                 }
+                buf.append("ON ");
+                buf.append(count);
+                buf.append(hasRanges ? " RANGE" : " KEY");
+                buf.append(count > 1 ? "S " : " ");
+            } else {
+                buf.append("POINT LOOKUP ON " + keyCount + " KEY" + (keyCount > 1 ? "S " : " "));
             }
-            buf.append("ON ");
-            buf.append(count);
-            buf.append(hasRanges ? " RANGE" : " KEY");
-            buf.append(count > 1 ? "S " : " ");
             return true;
         } else {
             buf.append("RANGE SCAN ");
@@ -95,7 +100,9 @@ public abstract class ExplainTable {
             hasSkipScanFilter = explainSkipScan(buf);
         }
         buf.append("OVER " + tableRef.getTable().getName().getString());
-        appendKeyRanges(buf);
+        if (!scanRanges.isPointLookup()) {
+            appendKeyRanges(buf);
+        }
         planSteps.add(buf.toString());
         
         Scan scan = context.getScan();

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
index 7dc6b04..2aa45d7 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/optimize/QueryOptimizer.java
@@ -45,6 +45,7 @@ public class QueryOptimizer {
     public QueryPlan optimize(SelectStatement select, PhoenixStatement statement, List<? extends PDatum> targetColumns, ParallelIteratorFactory parallelIteratorFactory) throws SQLException {
         QueryCompiler compiler = new QueryCompiler(statement, targetColumns, parallelIteratorFactory);
         QueryPlan dataPlan = compiler.compile(select);
+        // TODO: consider not even compiling index plans if we have a point lookup
         if (!useIndexes || select.getFrom().size() > 1) {
             return dataPlan;
         }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 9240186..86fd4ab 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1221,6 +1221,10 @@ public class MetaDataClient {
             TableName tableNameNode = statement.getTable().getName();
             String schemaName = tableNameNode.getSchemaName();
             String tableName = tableNameNode.getTableName();
+            // Outside of retry loop, as we're removing the property and wouldn't find it the second time
+            Boolean isImmutableRowsProp = (Boolean)statement.getProps().remove(PTable.IS_IMMUTABLE_ROWS_PROP_NAME);
+            Boolean multiTenantProp = (Boolean)statement.getProps().remove(PhoenixDatabaseMetaData.MULTI_TENANT);
+            boolean disableWAL = Boolean.TRUE.equals(statement.getProps().remove(DISABLE_WAL));
             
             boolean retried = false;
             while (true) {
@@ -1247,17 +1251,14 @@ public class MetaDataClient {
                 }
                           
                 boolean isImmutableRows = table.isImmutableRows();
-                Boolean isImmutableRowsProp = (Boolean)statement.getProps().remove(PTable.IS_IMMUTABLE_ROWS_PROP_NAME);
                 if (isImmutableRowsProp != null) {
                     isImmutableRows = isImmutableRowsProp;
                 }
                 boolean multiTenant = table.isMultiTenant();
-                Boolean multiTenantProp = (Boolean) statement.getProps().remove(PhoenixDatabaseMetaData.MULTI_TENANT);
                 if (multiTenantProp != null) {
                     multiTenant = Boolean.TRUE.equals(multiTenantProp);
                 }
                 
-                boolean disableWAL = Boolean.TRUE.equals(statement.getProps().remove(DISABLE_WAL));
                 if (statement.getProps().get(PhoenixDatabaseMetaData.SALT_BUCKETS) != null) {
                     throw new SQLExceptionInfo.Builder(SQLExceptionCode.SALT_ONLY_ON_CREATE_TABLE)
                     .setTableName(table.getName().getString()).build().buildException();
@@ -1313,7 +1314,8 @@ public class MetaDataClient {
                         connection.rollback();
                     }
                 } else {
-                 // Only support setting IMMUTABLE_ROWS=true and DISABLE_WAL=true on ALTER TABLE SET command
+                    // Only support setting IMMUTABLE_ROWS=true and DISABLE_WAL=true on ALTER TABLE SET command
+                    // TODO: support setting HBase table properties too
                     if (!statement.getProps().isEmpty()) {
                         throw new SQLExceptionInfo.Builder(SQLExceptionCode.SET_UNSUPPORTED_PROP_ON_ALTER_TABLE)
                         .setTableName(table.getName().getString()).build().buildException();

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
index 53a6ac0..0e8dced 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
@@ -19,12 +19,12 @@
  */
 package org.apache.phoenix.schema;
 
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.query.KeyRange;
+import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
+import org.apache.phoenix.util.SchemaUtil;
 
 import com.google.common.collect.Lists;
 
@@ -39,6 +39,9 @@ public class SaltingUtil {
     public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY";
     public static final PColumnImpl SALTING_COLUMN = new PColumnImpl(
             PNameFactory.newName(SALTING_COLUMN_NAME), null, PDataType.BINARY, 1, 0, false, 0, null, 0);
+    public static final RowKeySchema VAR_BINARY_SALTED_SCHEMA = new RowKeySchemaBuilder(1)
+        .addField(SALTING_COLUMN, false, null)
+        .addField(SchemaUtil.VAR_BINARY_DATUM, false, null).build();
 
     public static List<KeyRange> generateAllSaltingRanges(int bucketNum) {
         List<KeyRange> allRanges = Lists.newArrayListWithExpectedSize(bucketNum);
@@ -70,12 +73,12 @@ public class SaltingUtil {
 
     // Generate the bucket byte given a byte array and the number of buckets.
     public static byte getSaltingByte(byte[] value, int offset, int length, int bucketNum) {
-        int hash = hashCode(value, offset, length);
+        int hash = calculateHashCode(value, offset, length);
         byte bucketByte = (byte) ((Math.abs(hash) % bucketNum));
         return bucketByte;
     }
 
-    private static int hashCode(byte a[], int offset, int length) {
+    private static int calculateHashCode(byte a[], int offset, int length) {
         if (a == null)
             return 0;
         int result = 1;
@@ -85,31 +88,6 @@ public class SaltingUtil {
         return result;
     }
 
-    public static List<List<KeyRange>> setSaltByte(List<List<KeyRange>> ranges, int bucketNum) {
-        if (ranges == null || ranges.isEmpty()) {
-            return ScanRanges.NOTHING.getRanges();
-        }
-        for (int i = 1; i < ranges.size(); i++) {
-            List<KeyRange> range = ranges.get(i);
-            if (range != null && !range.isEmpty()) {
-                throw new IllegalStateException();
-            }
-        }
-        List<KeyRange> newRanges = Lists.newArrayListWithExpectedSize(ranges.size());
-        for (KeyRange range : ranges.get(0)) {
-            if (!range.isSingleKey()) {
-                throw new IllegalStateException();
-            }
-            byte[] key = range.getLowerRange();
-            byte saltByte = SaltingUtil.getSaltingByte(key, 0, key.length, bucketNum);
-            byte[] saltedKey = new byte[key.length + 1];
-            System.arraycopy(key, 0, saltedKey, 1, key.length);   
-            saltedKey[0] = saltByte;
-            newRanges.add(KeyRange.getKeyRange(saltedKey, true, saltedKey, true));
-        }
-        return Collections.singletonList(newRanges);
-    }
-    
     public static KeyRange addSaltByte(byte[] startKey, KeyRange minMaxRange) {
         byte saltByte = startKey.length == 0 ? 0 : startKey[0];
         byte[] lowerRange = minMaxRange.getLowerRange();

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 447c04c..0fa6e60 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -67,7 +67,7 @@ public class SchemaUtil {
     private static final int VAR_LENGTH_ESTIMATE = 10;
     
     public static final DataBlockEncoding DEFAULT_DATA_BLOCK_ENCODING = DataBlockEncoding.FAST_DIFF;
-    public static final RowKeySchema VAR_BINARY_SCHEMA = new RowKeySchemaBuilder(1).addField(new PDatum() {
+    public static final PDatum VAR_BINARY_DATUM = new PDatum() {
     
         @Override
         public boolean isNullable() {
@@ -99,7 +99,8 @@ public class SchemaUtil {
             return null;
         }
         
-    }, false, null).build();
+    };
+    public static final RowKeySchema VAR_BINARY_SCHEMA = new RowKeySchemaBuilder(1).addField(VAR_BINARY_DATUM, false, null).build();
     
     /**
      * May not be instantiated

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseCompileTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseCompileTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseCompileTest.java
index fb9dffe..f2344bb 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseCompileTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseCompileTest.java
@@ -67,6 +67,7 @@ import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.NumberUtil;
+import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.junit.Ignore;
 import org.junit.Test;
@@ -392,8 +393,9 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
         bindParams(pstmt, binds);
         QueryPlan plan = pstmt.optimizeQuery();
         Scan scan = plan.getContext().getScan();
-        assertArrayEquals(ByteUtil.concat(Bytes.toBytes(tenantId), StringUtil.padChar(Bytes.toBytes(keyPrefix), 15)),scan.getStartRow());
-        assertArrayEquals(ByteUtil.nextKey(scan.getStartRow()),scan.getStopRow());
+        byte[] expectedStartRow = ByteUtil.concat(Bytes.toBytes(tenantId), StringUtil.padChar(Bytes.toBytes(keyPrefix), 15));
+        assertArrayEquals(expectedStartRow,scan.getStartRow());
+        assertArrayEquals(ByteUtil.concat(expectedStartRow,QueryConstants.SEPARATOR_BYTE_ARRAY),scan.getStopRow());
     }
 
     @Test
@@ -656,18 +658,17 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
         byte[] startRow = PDataType.VARCHAR.toBytes(tenantId + entityId1);
         assertArrayEquals(startRow, scan.getStartRow());
         byte[] stopRow = PDataType.VARCHAR.toBytes(tenantId + entityId2);
-        assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
 
         Filter filter = scan.getFilter();
 
         assertEquals(
             new SkipScanFilter(
                 ImmutableList.of(
-                    Arrays.asList(pointRange(tenantId)),
                     Arrays.asList(
-                        pointRange(entityId1),
-                        pointRange(entityId2))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                        pointRange(tenantId,entityId1),
+                        pointRange(tenantId,entityId2))),
+                SchemaUtil.VAR_BINARY_SCHEMA),
             filter);
     }
 
@@ -700,6 +701,7 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
                 plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
             filter);
     }
+    
     @Test
     public void testInListWithAnd1Filter() throws SQLException {
         String tenantId1 = "000000000000001";
@@ -717,11 +719,10 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
             new SkipScanFilter(
                 ImmutableList.of(
                     Arrays.asList(
-                        pointRange(tenantId1),
-                        pointRange(tenantId2),
-                        pointRange(tenantId3)),
-                    Arrays.asList(pointRange(entityId))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                        pointRange(tenantId1, entityId),
+                        pointRange(tenantId2, entityId),
+                        pointRange(tenantId3, entityId))),
+                SchemaUtil.VAR_BINARY_SCHEMA),
             filter);
     }
     @Test
@@ -739,12 +740,16 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
         byte[] startRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId1), PDataType.VARCHAR.toBytes(entityId));
         assertArrayEquals(startRow, scan.getStartRow());
         byte[] stopRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId3), PDataType.VARCHAR.toBytes(entityId));
-        assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
         // TODO: validate scan ranges
     }
 
-    private static KeyRange pointRange(String id) {
-        return pointRange(Bytes.toBytes(id));
+    private static KeyRange pointRange(String... ids) {
+        byte[] theKey = ByteUtil.EMPTY_BYTE_ARRAY;
+        for (String id : ids) {
+            theKey = ByteUtil.concat(theKey, Bytes.toBytes(id));
+        }
+        return pointRange(theKey);
     }
     private static KeyRange pointRange(byte[] bytes) {
         return KeyRange.POINT.apply(bytes);
@@ -754,11 +759,10 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
     public void testInListWithAnd2Filter() throws SQLException {
         String tenantId1 = "000000000000001";
         String tenantId2 = "000000000000002";
-        String tenantId3 = "000000000000003";
         String entityId1 = "00000000000000X";
         String entityId2 = "00000000000000Y";
-        String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id IN ('%s', '%s')",
-                ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId1, entityId2);
+        String query = String.format("select * from %s where organization_id IN ('%s','%s') AND entity_id IN ('%s', '%s')",
+                ATABLE_NAME, tenantId1, tenantId2, entityId1, entityId2);
         PhoenixConnection pconn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES).unwrap(PhoenixConnection.class);
         PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
         QueryPlan plan = pstmt.optimizeQuery();
@@ -767,15 +771,12 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
         Filter filter = scan.getFilter();
         assertEquals(
             new SkipScanFilter(
-                ImmutableList.of(
-                    Arrays.asList(
-                        pointRange(tenantId1),
-                        pointRange(tenantId2),
-                        pointRange(tenantId3)),
-                    Arrays.asList(
-                        pointRange(entityId1),
-                        pointRange(entityId2))),
-                plan.getContext().getResolver().getTables().get(0).getTable().getRowKeySchema()),
+                    ImmutableList.<List<KeyRange>>of(ImmutableList.of(
+                        pointRange(tenantId1, entityId1),
+                        pointRange(tenantId1, entityId2),
+                        pointRange(tenantId2, entityId1),
+                        pointRange(tenantId2, entityId2))),
+                SchemaUtil.VAR_BINARY_SCHEMA),
             filter);
     }
 
@@ -814,7 +815,7 @@ public class WhereClauseCompileTest extends BaseConnectionlessQueryTest {
         byte[] startRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId1),PDataType.VARCHAR.toBytes(entityId1));
         assertArrayEquals(startRow, scan.getStartRow());
         byte[] stopRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId3),PDataType.VARCHAR.toBytes(entityId2));
-        assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
         // TODO: validate scan ranges
     }
     

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseOptimizerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseOptimizerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseOptimizerTest.java
index 1de13fd..aebe2af 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseOptimizerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/WhereClauseOptimizerTest.java
@@ -392,7 +392,7 @@ public class WhereClauseOptimizerTest extends BaseConnectionlessQueryTest {
         compileStatement(query, scan, binds);
         byte[] startRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId),PDataType.VARCHAR.toBytes(entityId));
         assertArrayEquals(startRow, scan.getStartRow());
-        assertArrayEquals(ByteUtil.nextKey(startRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(startRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
     }
 
     @Test
@@ -508,7 +508,7 @@ public class WhereClauseOptimizerTest extends BaseConnectionlessQueryTest {
         byte[] startRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId),PDataType.VARCHAR.toBytes(entityId));
         assertArrayEquals(startRow, scan.getStartRow());
         byte[] stopRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId),PDataType.VARCHAR.toBytes(entityId));
-        assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
     }
 
     @Test
@@ -554,7 +554,7 @@ public class WhereClauseOptimizerTest extends BaseConnectionlessQueryTest {
         byte[] startRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId),PDataType.VARCHAR.toBytes(entityId));
         assertArrayEquals(startRow, scan.getStartRow());
         byte[] stopRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId),PDataType.VARCHAR.toBytes(entityId));
-        assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
+        assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
     }
 
     @Test
@@ -1209,7 +1209,7 @@ public class WhereClauseOptimizerTest extends BaseConnectionlessQueryTest {
         compileStatement(query, scan, binds, extractedFilters);
         assertTrue(extractedFilters.size() == 3);
         byte[] expectedStartRow = ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId), PDataType.VARCHAR.toBytes(entityId));
-        byte[] expectedStopRow = ByteUtil.nextKey(ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId), PDataType.VARCHAR.toBytes(entityId2)));
+        byte[] expectedStopRow = ByteUtil.concat(ByteUtil.concat(PDataType.VARCHAR.toBytes(tenantId), PDataType.VARCHAR.toBytes(entityId2)), QueryConstants.SEPARATOR_BYTE_ARRAY);
         assertArrayEquals(expectedStartRow, scan.getStartRow());
         assertArrayEquals(expectedStopRow, scan.getStopRow());
     }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryPlanTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryPlanTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryPlanTest.java
index 12c0c81..49d6ac3 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryPlanTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/QueryPlanTest.java
@@ -72,7 +72,7 @@ public class QueryPlanTest extends BaseConnectedQueryTest {
                 "    SERVER FILTER BY FIRST KEY ONLY AND (HOST IS NULL AND DATE >= '2013-01-01 00:00:00.000')",
 
                 "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id = '000000000000002' AND x_integer = 2 AND a_integer < 5 ",
-                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER ATABLE ['000000000000001','000000000000002']\n" + 
+                "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER ATABLE\n" + 
                 "    SERVER FILTER BY (X_INTEGER = 2 AND A_INTEGER < 5)",
 
                 "SELECT a_string,b_string FROM atable WHERE organization_id = '000000000000001' AND entity_id > '000000000000002' AND entity_id < '000000000000008' AND (organization_id,entity_id) >= ('000000000000001','000000000000005') ",
@@ -180,7 +180,7 @@ public class QueryPlanTest extends BaseConnectedQueryTest {
                 "CLIENT PARALLEL 1-WAY SKIP SCAN ON 2 KEYS OVER ATABLE ['000000000000001'] - ['000000000000005']",
 
                 "SELECT a_string,b_string FROM atable WHERE organization_id IN ('00D000000000001', '00D000000000005') AND entity_id IN('00E00000000000X','00E00000000000Z')",
-                "CLIENT PARALLEL 1-WAY SKIP SCAN ON 4 KEYS OVER ATABLE ['00D000000000001','00E00000000000X'] - ['00D000000000005','00E00000000000Z']",
+                "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 4 KEYS OVER ATABLE",
         };
         for (int i = 0; i < queryPlans.length; i+=2) {
             String query = queryPlans[i];

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseIndexTest.java
new file mode 100644
index 0000000..d5530e5
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseIndexTest.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.BaseHBaseManagedTimeTest;
+import org.apache.phoenix.util.SchemaUtil;
+
+public class BaseIndexTest extends BaseHBaseManagedTimeTest {
+    public static final String SCHEMA_NAME = "";
+    public static final String DATA_TABLE_NAME = "T";
+    public static final String INDEX_TABLE_NAME = "I";
+    public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
+    public static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
+}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseMutableIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseMutableIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseMutableIndexTest.java
index 148ff6b..fe70805 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseMutableIndexTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/BaseMutableIndexTest.java
@@ -29,17 +29,9 @@ import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.Properties;
 
-import org.apache.phoenix.end2end.BaseHBaseManagedTimeTest;
-import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 
-public class BaseMutableIndexTest extends BaseHBaseManagedTimeTest {
-    public static final String SCHEMA_NAME = "";
-    public static final String DATA_TABLE_NAME = "T";
-    public static final String INDEX_TABLE_NAME = "I";
-    public static final String DATA_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "T");
-    public static final String INDEX_TABLE_FULL_NAME = SchemaUtil.getTableName(SCHEMA_NAME, "I");
-
+public class BaseMutableIndexTest extends BaseIndexTest {
     public BaseMutableIndexTest() {
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
index 0bf0d63..ee2086d 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/ImmutableIndexTest.java
@@ -33,26 +33,17 @@ import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
-import java.util.Map;
 import java.util.Properties;
 
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeTest;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryConstants;
-import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
-import com.google.common.collect.Maps;
-
 
 public class ImmutableIndexTest extends BaseHBaseManagedTimeTest{
-    private static final int TABLE_SPLITS = 3;
-    private static final int INDEX_SPLITS = 4;
-    
     // Populate the test table with data.
     private static void populateTestTable() throws SQLException {
         Properties props = new Properties(TEST_PROPERTIES);
@@ -118,146 +109,6 @@ public class ImmutableIndexTest extends BaseHBaseManagedTimeTest{
         }
     }
     
-    @BeforeClass 
-    public static void doSetup() throws Exception {
-        
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
-        // Drop the HBase table metadata for this test
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        // Must update config before starting server
-        startServer(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
-    }
-    
-    
-    @Test
-    public void testImmutableTableIndexMaintanenceSaltedSalted() throws Exception {
-        testImmutableTableIndexMaintanence(TABLE_SPLITS, INDEX_SPLITS);
-    }
-
-    @Test
-    public void testImmutableTableIndexMaintanenceSalted() throws Exception {
-        testImmutableTableIndexMaintanence(null, INDEX_SPLITS);
-    }
-
-    @Test
-    public void testImmutableTableIndexMaintanenceUnsalted() throws Exception {
-        testImmutableTableIndexMaintanence(null, null);
-    }
-
-    private void testImmutableTableIndexMaintanence(Integer tableSaltBuckets, Integer indexSaltBuckets) throws Exception {
-        String query;
-        ResultSet rs;
-        
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        conn.createStatement().execute("CREATE TABLE t (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR) immutable_rows=true " +  (tableSaltBuckets == null ? "" : ", SALT_BUCKETS=" + tableSaltBuckets));
-        query = "SELECT * FROM t";
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-        
-        conn.createStatement().execute("CREATE INDEX i ON t (v DESC)" + (indexSaltBuckets == null ? "" : " SALT_BUCKETS=" + indexSaltBuckets));
-        query = "SELECT * FROM i";
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-
-        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO t VALUES(?,?)");
-        stmt.setString(1,"a");
-        stmt.setString(2, "x");
-        stmt.execute();
-        stmt.setString(1,"b");
-        stmt.setString(2, "y");
-        stmt.execute();
-        conn.commit();
-        
-        query = "SELECT * FROM i";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("y",rs.getString(1));
-        assertEquals("b",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("x",rs.getString(1));
-        assertEquals("a",rs.getString(2));
-        assertFalse(rs.next());
-
-        query = "SELECT k,v FROM t WHERE v = 'y'";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertFalse(rs.next());
-        
-        String expectedPlan;
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = indexSaltBuckets == null ? 
-             "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I [~'y']" : 
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 KEYS OVER I [0,~'y'] - [3,~'y']\n" + 
-             "CLIENT MERGE SORT");
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-
-        // Will use index, so rows returned in DESC order.
-        // This is not a bug, though, because we can
-        // return in any order.
-        query = "SELECT k,v FROM t WHERE v >= 'x'";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = indexSaltBuckets == null ? 
-            "CLIENT PARALLEL 1-WAY RANGE SCAN OVER I [*] - [~'x']" :
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 RANGES OVER I [0,*] - [3,~'x']\n" + 
-             "CLIENT MERGE SORT");
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-        
-        // Use data table, since point lookup trumps order by
-        query = "SELECT k,v FROM t WHERE k = 'a' ORDER BY v";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = tableSaltBuckets == null ? 
-                ("CLIENT PARALLEL 1-WAY RANGE SCAN OVER T ['a']\n" + 
-                     "    SERVER SORTED BY [V]\n" + 
-                     "CLIENT MERGE SORT") :
-                ("CLIENT PARALLEL 1-WAY RANGE SCAN OVER T [[2,97]]\n" + 
-                        "    SERVER SORTED BY [V]\n" + 
-                        "CLIENT MERGE SORT");
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-        
-        // Will use data table now, since there's an ORDER BY which can
-        // be optimized out for the data table, but not the index table.
-        query = "SELECT k,v FROM t WHERE v >= 'x' ORDER BY k LIMIT 2";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = tableSaltBuckets == null ? 
-             "CLIENT PARALLEL 1-WAY FULL SCAN OVER T\n" + 
-             "    SERVER FILTER BY V >= 'x'\n" + 
-             "    SERVER 2 ROW LIMIT\n" + 
-             "CLIENT 2 ROW LIMIT" :
-             "CLIENT PARALLEL 3-WAY FULL SCAN OVER T\n" + 
-             "    SERVER FILTER BY V >= 'x'\n" + 
-             "    SERVER 2 ROW LIMIT\n" + 
-             "CLIENT MERGE SORT\n" + 
-             "CLIENT 2 ROW LIMIT";
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-        
-        conn.createStatement().execute("DROP TABLE t ");
-    }
-
     @Test
     public void testIndexWithNullableFixedWithCols() throws Exception {
         Properties props = new Properties(TEST_PROPERTIES);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableIndexTest.java
index 0486bda..cc09ead 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableIndexTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableIndexTest.java
@@ -34,13 +34,13 @@ import java.util.Map;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import com.google.common.collect.Maps;
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
 
 public class MutableIndexTest extends BaseMutableIndexTest {
     @BeforeClass 
@@ -256,7 +256,7 @@ public class MutableIndexTest extends BaseMutableIndexTest {
         query = "SELECT v1 as foo FROM " + DATA_TABLE_FULL_NAME + " WHERE v2 = '1' ORDER BY foo";
         rs = conn.createStatement().executeQuery("EXPLAIN " + query);
         assertEquals("CLIENT PARALLEL 1-WAY RANGE SCAN OVER " +INDEX_TABLE_FULL_NAME + " [~'1']\n" + 
-                "    SERVER TOP -1 ROWS SORTED BY [V1]\n" + 
+                "    SERVER SORTED BY [V1]\n" + 
                 "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
 
         rs = conn.createStatement().executeQuery(query);

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
deleted file mode 100644
index f6369f7..0000000
--- a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/MutableSaltedIndexTest.java
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright 2010 The Apache Software Foundation
- *
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end.index;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.PreparedStatement;
-import java.sql.ResultSet;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import com.google.common.collect.Maps;
-
-
-public class MutableSaltedIndexTest extends BaseMutableIndexTest{
-    private static final int TABLE_SPLITS = 3;
-    private static final int INDEX_SPLITS = 4;
-    
-    @BeforeClass 
-    public static void doSetup() throws Exception {
-        Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
-        // Drop the HBase table metadata for this test
-        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
-        // Must update config before starting server
-        startServer(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
-    }
-    
-    @Test
-    public void testMutableTableIndexMaintanenceSaltedSalted() throws Exception {
-        testMutableTableIndexMaintanence(TABLE_SPLITS, INDEX_SPLITS);
-    }
-
-    @Test
-    public void testMutableTableIndexMaintanenceSalted() throws Exception {
-        testMutableTableIndexMaintanence(null, INDEX_SPLITS);
-    }
-
-    @Test
-    public void testMutableTableIndexMaintanenceUnsalted() throws Exception {
-        testMutableTableIndexMaintanence(null, null);
-    }
-
-    private void testMutableTableIndexMaintanence(Integer tableSaltBuckets, Integer indexSaltBuckets) throws Exception {
-        String query;
-        ResultSet rs;
-        
-        Properties props = new Properties(TEST_PROPERTIES);
-        Connection conn = DriverManager.getConnection(getUrl(), props);
-        conn.setAutoCommit(false);
-        conn.createStatement().execute("CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)  " +  (tableSaltBuckets == null ? "" : " SALT_BUCKETS=" + tableSaltBuckets));
-        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-        
-        conn.createStatement().execute("CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v DESC)" + (indexSaltBuckets == null ? "" : " SALT_BUCKETS=" + indexSaltBuckets));
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertFalse(rs.next());
-
-        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?)");
-        stmt.setString(1,"a");
-        stmt.setString(2, "x");
-        stmt.execute();
-        stmt.setString(1,"b");
-        stmt.setString(2, "y");
-        stmt.execute();
-        conn.commit();
-        
-        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("y",rs.getString(1));
-        assertEquals("b",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("x",rs.getString(1));
-        assertEquals("a",rs.getString(2));
-        assertFalse(rs.next());
-
-        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v = 'y'";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertFalse(rs.next());
-        
-        String expectedPlan;
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = indexSaltBuckets == null ? 
-             "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + INDEX_TABLE_FULL_NAME + " [~'y']" : 
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 KEYS OVER " + INDEX_TABLE_FULL_NAME + " [0,~'y'] - [3,~'y']\n" + 
-             "CLIENT MERGE SORT");
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-
-        // Will use index, so rows returned in DESC order.
-        // This is not a bug, though, because we can
-        // return in any order.
-        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v >= 'x'";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = indexSaltBuckets == null ? 
-            "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + INDEX_TABLE_FULL_NAME + " [*] - [~'x']" :
-            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 RANGES OVER " + INDEX_TABLE_FULL_NAME + " [0,*] - [3,~'x']\n" + 
-             "CLIENT MERGE SORT");
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-        
-        // Use data table, since point lookup trumps order by
-        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE k = 'a' ORDER BY v";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = tableSaltBuckets == null ? 
-                "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + DATA_TABLE_FULL_NAME + " ['a']\n" +
-                "    SERVER SORTED BY [V]\n" + 
-                "CLIENT MERGE SORT" :
-                    "CLIENT PARALLEL 1-WAY RANGE SCAN OVER T [[2,97]]\n" + 
-                    "    SERVER SORTED BY [V]\n" + 
-                    "CLIENT MERGE SORT";
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-        
-        // Will use data table now, since there's a LIMIT clause and
-        // we're able to optimize out the ORDER BY, unless the data
-        // table is salted.
-        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v >= 'x' ORDER BY k LIMIT 2";
-        rs = conn.createStatement().executeQuery(query);
-        assertTrue(rs.next());
-        assertEquals("a",rs.getString(1));
-        assertEquals("x",rs.getString(2));
-        assertTrue(rs.next());
-        assertEquals("b",rs.getString(1));
-        assertEquals("y",rs.getString(2));
-        assertFalse(rs.next());
-        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
-        expectedPlan = tableSaltBuckets == null ? 
-             "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
-             "    SERVER FILTER BY V >= 'x'\n" + 
-             "    SERVER 2 ROW LIMIT\n" + 
-             "CLIENT 2 ROW LIMIT" :
-                 "CLIENT PARALLEL 3-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
-                 "    SERVER FILTER BY V >= 'x'\n" + 
-                 "    SERVER 2 ROW LIMIT\n" + 
-                 "CLIENT MERGE SORT\n" + 
-                 "CLIENT 2 ROW LIMIT";
-        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
-    }
-}

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/1a6811bf/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/SaltedIndexTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/SaltedIndexTest.java b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/SaltedIndexTest.java
new file mode 100644
index 0000000..115fe30
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/end2end/index/SaltedIndexTest.java
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2010 The Apache Software Foundation
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.Map;
+import java.util.Properties;
+
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Maps;
+
+
+public class SaltedIndexTest extends BaseIndexTest{
+    private static final int TABLE_SPLITS = 3;
+    private static final int INDEX_SPLITS = 4;
+    
+    @BeforeClass 
+    public static void doSetup() throws Exception {
+        Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
+        // Drop the HBase table metadata for this test
+        props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+        // Must update config before starting server
+        startServer(getUrl(), new ReadOnlyProps(props.entrySet().iterator()));
+    }
+    
+    private static void makeImmutableAndDeleteData() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES);
+        try {
+            conn.setAutoCommit(true);
+            conn.createStatement().execute("DELETE FROM " + DATA_TABLE_FULL_NAME);
+            conn.createStatement().execute("ALTER TABLE " + DATA_TABLE_FULL_NAME + " SET IMMUTABLE_ROWS=true");
+            conn.createStatement().executeQuery("SELECT COUNT(*) FROM " + DATA_TABLE_FULL_NAME).next();
+            assertTrue(conn.unwrap(PhoenixConnection.class).getPMetaData().getTable(DATA_TABLE_FULL_NAME).isImmutableRows());
+        } finally {
+            conn.close();
+        }
+    }
+    
+    @Test
+    public void testMutableTableIndexMaintanenceSaltedSalted() throws Exception {
+        testMutableTableIndexMaintanence(TABLE_SPLITS, INDEX_SPLITS);
+        makeImmutableAndDeleteData();
+        testMutableTableIndexMaintanence(TABLE_SPLITS, INDEX_SPLITS);
+    }
+
+    @Test
+    public void testMutableTableIndexMaintanenceSalted() throws Exception {
+        testMutableTableIndexMaintanence(null, INDEX_SPLITS);
+        makeImmutableAndDeleteData();
+        testMutableTableIndexMaintanence(null, INDEX_SPLITS);
+    }
+
+    @Test
+    public void testMutableTableIndexMaintanenceUnsalted() throws Exception {
+        testMutableTableIndexMaintanence(null, null);
+        makeImmutableAndDeleteData();
+        testMutableTableIndexMaintanence(null, null);
+    }
+
+    private void testMutableTableIndexMaintanence(Integer tableSaltBuckets, Integer indexSaltBuckets) throws Exception {
+        String query;
+        ResultSet rs;
+        
+        Properties props = new Properties(TEST_PROPERTIES);
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(false);
+        conn.createStatement().execute("CREATE TABLE IF NOT EXISTS " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v VARCHAR)  " +  (tableSaltBuckets == null ? "" : " SALT_BUCKETS=" + tableSaltBuckets));
+        query = "SELECT * FROM " + DATA_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(query);
+        assertFalse(rs.next());
+        
+        conn.createStatement().execute("CREATE INDEX IF NOT EXISTS " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v DESC)" + (indexSaltBuckets == null ? "" : " SALT_BUCKETS=" + indexSaltBuckets));
+        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(query);
+        assertFalse(rs.next());
+
+        PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?)");
+        stmt.setString(1,"a");
+        stmt.setString(2, "x");
+        stmt.execute();
+        stmt.setString(1,"b");
+        stmt.setString(2, "y");
+        stmt.execute();
+        conn.commit();
+        
+        query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME;
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("y",rs.getString(1));
+        assertEquals("b",rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals("x",rs.getString(1));
+        assertEquals("a",rs.getString(2));
+        assertFalse(rs.next());
+
+        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v = 'y'";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("b",rs.getString(1));
+        assertEquals("y",rs.getString(2));
+        assertFalse(rs.next());
+        
+        String expectedPlan;
+        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        expectedPlan = indexSaltBuckets == null ? 
+             "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + INDEX_TABLE_FULL_NAME + " [~'y']" : 
+            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 KEYS OVER " + INDEX_TABLE_FULL_NAME + " [0,~'y'] - [3,~'y']\n" + 
+             "CLIENT MERGE SORT");
+        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
+
+        // Will use index, so rows returned in DESC order.
+        // This is not a bug, though, because we can
+        // return in any order.
+        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v >= 'x'";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("b",rs.getString(1));
+        assertEquals("y",rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals("a",rs.getString(1));
+        assertEquals("x",rs.getString(2));
+        assertFalse(rs.next());
+        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        expectedPlan = indexSaltBuckets == null ? 
+            "CLIENT PARALLEL 1-WAY RANGE SCAN OVER " + INDEX_TABLE_FULL_NAME + " [*] - [~'x']" :
+            ("CLIENT PARALLEL 4-WAY SKIP SCAN ON 4 RANGES OVER " + INDEX_TABLE_FULL_NAME + " [0,*] - [3,~'x']\n" + 
+             "CLIENT MERGE SORT");
+        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
+        
+        // Use data table, since point lookup trumps order by
+        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE k = 'a' ORDER BY v";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("a",rs.getString(1));
+        assertEquals("x",rs.getString(2));
+        assertFalse(rs.next());
+        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        expectedPlan = tableSaltBuckets == null ? 
+                "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" +
+                "    SERVER SORTED BY [V]\n" + 
+                "CLIENT MERGE SORT" :
+                    "CLIENT PARALLEL 1-WAY POINT LOOKUP ON 1 KEY OVER " + DATA_TABLE_FULL_NAME + "\n" + 
+                    "    SERVER SORTED BY [V]\n" + 
+                    "CLIENT MERGE SORT";
+        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
+        
+        // Will use data table now, since there's a LIMIT clause and
+        // we're able to optimize out the ORDER BY, unless the data
+        // table is salted.
+        query = "SELECT k,v FROM " + DATA_TABLE_FULL_NAME + " WHERE v >= 'x' ORDER BY k LIMIT 2";
+        rs = conn.createStatement().executeQuery(query);
+        assertTrue(rs.next());
+        assertEquals("a",rs.getString(1));
+        assertEquals("x",rs.getString(2));
+        assertTrue(rs.next());
+        assertEquals("b",rs.getString(1));
+        assertEquals("y",rs.getString(2));
+        assertFalse(rs.next());
+        rs = conn.createStatement().executeQuery("EXPLAIN " + query);
+        expectedPlan = tableSaltBuckets == null ? 
+             "CLIENT PARALLEL 1-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
+             "    SERVER FILTER BY V >= 'x'\n" + 
+             "    SERVER 2 ROW LIMIT\n" + 
+             "CLIENT 2 ROW LIMIT" :
+                 "CLIENT PARALLEL 3-WAY FULL SCAN OVER " + DATA_TABLE_FULL_NAME + "\n" +
+                 "    SERVER FILTER BY V >= 'x'\n" + 
+                 "    SERVER 2 ROW LIMIT\n" + 
+                 "CLIENT MERGE SORT\n" + 
+                 "CLIENT 2 ROW LIMIT";
+        assertEquals(expectedPlan,QueryUtil.getExplainPlan(rs));
+    }
+}