You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ma...@apache.org on 2015/07/20 19:14:59 UTC

[29/50] [abbrv] phoenix git commit: PHOENIX-2067 Sort order incorrect for variable length DESC columns

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java
index 1159b5c..3407310 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTimestampArray.java
@@ -17,94 +17,80 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.phoenix.schema.SortOrder;
+import java.sql.Timestamp;
 
-import java.sql.*;
+import org.apache.phoenix.schema.SortOrder;
 
 public class PUnsignedTimestampArray extends PArrayDataType<Timestamp[]> {
 
-  public static final PUnsignedTimestampArray INSTANCE = new PUnsignedTimestampArray();
-
-  private PUnsignedTimestampArray() {
-    super("UNSIGNED_TIMESTAMP ARRAY",
-        PDataType.ARRAY_TYPE_BASE + PUnsignedTimestamp.INSTANCE.getSqlType(), PhoenixArray.class,
-        null, 37);
-  }
-
-  @Override
-  public boolean isArrayType() {
-    return true;
-  }
-
-  @Override
-  public boolean isFixedWidth() {
-    return false;
-  }
+    public static final PUnsignedTimestampArray INSTANCE = new PUnsignedTimestampArray();
 
-  @Override
-  public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
-    return compareTo(lhs, rhs);
-  }
+    private PUnsignedTimestampArray() {
+        super("UNSIGNED_TIMESTAMP ARRAY",
+                PDataType.ARRAY_TYPE_BASE + PUnsignedTimestamp.INSTANCE.getSqlType(), PhoenixArray.class,
+                null, 37);
+    }
 
-  @Override
-  public Integer getByteSize() {
-    return null;
-  }
+    @Override
+    public boolean isArrayType() {
+        return true;
+    }
 
-  @Override
-  public byte[] toBytes(Object object) {
-    return toBytes(object, SortOrder.ASC);
-  }
+    @Override
+    public boolean isFixedWidth() {
+        return false;
+    }
 
-  @Override
-  public byte[] toBytes(Object object, SortOrder sortOrder) {
-    return toBytes(object, PUnsignedTimestamp.INSTANCE, sortOrder);
-  }
+    @Override
+    public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
+        return compareTo(lhs, rhs);
+    }
 
-  @Override
-  public Object toObject(byte[] bytes, int offset, int length,
-      PDataType actualType, SortOrder sortOrder, Integer maxLength,
-      Integer scale) {
-    return toObject(bytes, offset, length, PUnsignedTimestamp.INSTANCE, sortOrder,
-        maxLength, scale, PUnsignedTimestamp.INSTANCE);
-  }
+    @Override
+    public Integer getByteSize() {
+        return null;
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType) {
-    return isCoercibleTo(targetType, this);
-  }
+    @Override
+    public byte[] toBytes(Object object) {
+        return toBytes(object, SortOrder.ASC);
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType, Object value) {
-    if (value == null) {
-      return true;
+    @Override
+    public byte[] toBytes(Object object, SortOrder sortOrder) {
+        return toBytes(object, PUnsignedTimestamp.INSTANCE, sortOrder);
     }
-    PhoenixArray pArr = (PhoenixArray) value;
-    Object[] timeStampArr = (Object[]) pArr.array;
-    for (Object i : timeStampArr) {
-      if (!super.isCoercibleTo(PUnsignedTimestamp.INSTANCE, i)) {
-        return false;
-      }
+
+    @Override
+    public Object toObject(byte[] bytes, int offset, int length,
+            PDataType actualType, SortOrder sortOrder, Integer maxLength,
+            Integer scale) {
+        return toObject(bytes, offset, length, PUnsignedTimestamp.INSTANCE, sortOrder,
+                maxLength, scale, PUnsignedTimestamp.INSTANCE);
     }
-    return true;
-  }
 
-  @Override
-  public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType,
-      Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength,
-      Integer desiredScale, SortOrder desiredModifier) {
-    coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale,
-        this, actualModifer, desiredModifier);
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType) {
+        return isCoercibleTo(targetType, this);
+    }
 
-  @Override
-  public int getResultSetSqlType() {
-    return Types.ARRAY;
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType, Object value) {
+        if (value == null) {
+            return true;
+        }
+        PhoenixArray pArr = (PhoenixArray) value;
+        Object[] timeStampArr = (Object[]) pArr.array;
+        for (Object i : timeStampArr) {
+            if (!super.isCoercibleTo(PUnsignedTimestamp.INSTANCE, i)) {
+                return false;
+            }
+        }
+        return true;
+    }
 
-  @Override
-  public Object getSampleValue(Integer maxLength, Integer arrayLength) {
-    return getSampleValue(PUnsignedTimestamp.INSTANCE, arrayLength, maxLength);
-  }
+    @Override
+    public Object getSampleValue(Integer maxLength, Integer arrayLength) {
+        return getSampleValue(PUnsignedTimestamp.INSTANCE, arrayLength, maxLength);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java
index a73f284..6843340 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PUnsignedTinyintArray.java
@@ -17,94 +17,78 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.SortOrder;
 
-import java.sql.Types;
-
 public class PUnsignedTinyintArray extends PArrayDataType<byte[]> {
 
-  public static final PUnsignedTinyintArray INSTANCE = new PUnsignedTinyintArray();
-
-  private PUnsignedTinyintArray() {
-    super("UNSIGNED_TINYINT ARRAY",
-        PDataType.ARRAY_TYPE_BASE + PUnsignedTinyint.INSTANCE.getSqlType(), PhoenixArray.class,
-        null, 45);
-  }
-
-  @Override
-  public boolean isArrayType() {
-    return true;
-  }
-
-  @Override
-  public boolean isFixedWidth() {
-    return false;
-  }
+    public static final PUnsignedTinyintArray INSTANCE = new PUnsignedTinyintArray();
 
-  @Override
-  public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
-    return compareTo(lhs, rhs);
-  }
+    private PUnsignedTinyintArray() {
+        super("UNSIGNED_TINYINT ARRAY",
+                PDataType.ARRAY_TYPE_BASE + PUnsignedTinyint.INSTANCE.getSqlType(), PhoenixArray.class,
+                null, 45);
+    }
 
-  @Override
-  public Integer getByteSize() {
-    return null;
-  }
+    @Override
+    public boolean isArrayType() {
+        return true;
+    }
 
-  @Override
-  public byte[] toBytes(Object object) {
-    return toBytes(object, SortOrder.ASC);
-  }
+    @Override
+    public boolean isFixedWidth() {
+        return false;
+    }
 
-  @Override
-  public byte[] toBytes(Object object, SortOrder sortOrder) {
-    return toBytes(object, PUnsignedTinyint.INSTANCE, sortOrder);
-  }
+    @Override
+    public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
+        return compareTo(lhs, rhs);
+    }
 
-  @Override
-  public Object toObject(byte[] bytes, int offset, int length,
-      PDataType actualType, SortOrder sortOrder, Integer maxLength,
-      Integer scale) {
-    return toObject(bytes, offset, length, PUnsignedTinyint.INSTANCE, sortOrder, maxLength,
-        scale, PUnsignedTinyint.INSTANCE);
-  }
+    @Override
+    public Integer getByteSize() {
+        return null;
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType) {
-    return isCoercibleTo(targetType, this);
-  }
+    @Override
+    public byte[] toBytes(Object object) {
+        return toBytes(object, SortOrder.ASC);
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType, Object value) {
-    if (value == null) {
-      return true;
+    @Override
+    public byte[] toBytes(Object object, SortOrder sortOrder) {
+        return toBytes(object, PUnsignedTinyint.INSTANCE, sortOrder);
     }
-    PhoenixArray pArr = (PhoenixArray) value;
-    Object[] byteArr = (Object[]) pArr.array;
-    for (Object i : byteArr) {
-      if (!super.isCoercibleTo(PUnsignedTinyint.INSTANCE, i)) {
-        return false;
-      }
+
+    @Override
+    public Object toObject(byte[] bytes, int offset, int length,
+            PDataType actualType, SortOrder sortOrder, Integer maxLength,
+            Integer scale) {
+        return toObject(bytes, offset, length, PUnsignedTinyint.INSTANCE, sortOrder, maxLength,
+                scale, PUnsignedTinyint.INSTANCE);
     }
-    return true;
-  }
 
-  @Override
-  public int getResultSetSqlType() {
-    return Types.ARRAY;
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType) {
+        return isCoercibleTo(targetType, this);
+    }
 
-  @Override
-  public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType,
-      Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength,
-      Integer desiredScale, SortOrder desiredModifier) {
-    coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale,
-        this, actualModifer, desiredModifier);
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType, Object value) {
+        if (value == null) {
+            return true;
+        }
+        PhoenixArray pArr = (PhoenixArray) value;
+        Object[] byteArr = (Object[]) pArr.array;
+        for (Object i : byteArr) {
+            if (!super.isCoercibleTo(PUnsignedTinyint.INSTANCE, i)) {
+                return false;
+            }
+        }
+        return true;
+    }
 
-  @Override
-  public Object getSampleValue(Integer maxLength, Integer arrayLength) {
-    return getSampleValue(PUnsignedTinyint.INSTANCE, arrayLength, maxLength);
-  }
+    @Override
+    public Object getSampleValue(Integer maxLength, Integer arrayLength) {
+        return getSampleValue(PUnsignedTinyint.INSTANCE, arrayLength, maxLength);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java
index 10448ac..01b8667 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarbinaryArray.java
@@ -17,93 +17,77 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.SortOrder;
 
-import java.sql.Types;
-
 public class PVarbinaryArray extends PArrayDataType<byte[][]> {
 
-  public static final PVarbinaryArray INSTANCE = new PVarbinaryArray();
-
-  private PVarbinaryArray() {
-    super("VARBINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PVarbinary.INSTANCE.getSqlType(),
-        PhoenixArray.class, null, 27);
-  }
-
-  @Override
-  public boolean isArrayType() {
-    return true;
-  }
-
-  @Override
-  public boolean isFixedWidth() {
-    return false;
-  }
+    public static final PVarbinaryArray INSTANCE = new PVarbinaryArray();
 
-  @Override
-  public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
-    return compareTo(lhs, rhs);
-  }
+    private PVarbinaryArray() {
+        super("VARBINARY ARRAY", PDataType.ARRAY_TYPE_BASE + PVarbinary.INSTANCE.getSqlType(),
+                PhoenixArray.class, null, 27);
+    }
 
-  @Override
-  public Integer getByteSize() {
-    return null;
-  }
+    @Override
+    public boolean isArrayType() {
+        return true;
+    }
 
-  @Override
-  public byte[] toBytes(Object object) {
-    return toBytes(object, SortOrder.ASC);
-  }
+    @Override
+    public boolean isFixedWidth() {
+        return false;
+    }
 
-  @Override
-  public byte[] toBytes(Object object, SortOrder sortOrder) {
-    return toBytes(object, PVarbinary.INSTANCE, sortOrder);
-  }
+    @Override
+    public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
+        return compareTo(lhs, rhs);
+    }
 
-  @Override
-  public Object toObject(byte[] bytes, int offset, int length,
-      PDataType actualType, SortOrder sortOrder, Integer maxLength,
-      Integer scale) {
-    return toObject(bytes, offset, length, PVarbinary.INSTANCE, sortOrder, maxLength, scale,
-        PVarbinary.INSTANCE);
-  }
+    @Override
+    public Integer getByteSize() {
+        return null;
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType) {
-    return isCoercibleTo(targetType, this);
-  }
+    @Override
+    public byte[] toBytes(Object object) {
+        return toBytes(object, SortOrder.ASC);
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType, Object value) {
-    if (value == null) {
-      return true;
+    @Override
+    public byte[] toBytes(Object object, SortOrder sortOrder) {
+        return toBytes(object, PVarbinary.INSTANCE, sortOrder);
     }
-    PhoenixArray pArr = (PhoenixArray) value;
-    Object[] charArr = (Object[]) pArr.array;
-    for (Object i : charArr) {
-      if (!super.isCoercibleTo(PVarbinary.INSTANCE, i)) {
-        return false;
-      }
+
+    @Override
+    public Object toObject(byte[] bytes, int offset, int length,
+            PDataType actualType, SortOrder sortOrder, Integer maxLength,
+            Integer scale) {
+        return toObject(bytes, offset, length, PVarbinary.INSTANCE, sortOrder, maxLength, scale,
+                PVarbinary.INSTANCE);
     }
-    return true;
-  }
 
-  @Override
-  public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType,
-      Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength,
-      Integer desiredScale, SortOrder desiredModifier) {
-    coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale,
-        this, actualModifer, desiredModifier);
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType) {
+        return isCoercibleTo(targetType, this);
+    }
 
-  @Override
-  public int getResultSetSqlType() {
-    return Types.ARRAY;
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType, Object value) {
+        if (value == null) {
+            return true;
+        }
+        PhoenixArray pArr = (PhoenixArray) value;
+        Object[] charArr = (Object[]) pArr.array;
+        for (Object i : charArr) {
+            if (!super.isCoercibleTo(PVarbinary.INSTANCE, i)) {
+                return false;
+            }
+        }
+        return true;
+    }
 
-  @Override
-  public Object getSampleValue(Integer maxLength, Integer arrayLength) {
-    return getSampleValue(PVarbinary.INSTANCE, arrayLength, maxLength);
-  }
+    @Override
+    public Object getSampleValue(Integer maxLength, Integer arrayLength) {
+        return getSampleValue(PVarbinary.INSTANCE, arrayLength, maxLength);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java
index 72561b3..6edaf80 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PVarcharArray.java
@@ -17,93 +17,77 @@
  */
 package org.apache.phoenix.schema.types;
 
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.SortOrder;
 
-import java.sql.Types;
-
 public class PVarcharArray extends PArrayDataType<String> {
 
-  public static final PVarcharArray INSTANCE = new PVarcharArray();
-
-  private PVarcharArray() {
-    super("VARCHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PVarchar.INSTANCE.getSqlType(),
-        PhoenixArray.class, null, 26);
-  }
-
-  @Override
-  public boolean isArrayType() {
-    return true;
-  }
-
-  @Override
-  public boolean isFixedWidth() {
-    return false;
-  }
+    public static final PVarcharArray INSTANCE = new PVarcharArray();
 
-  @Override
-  public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
-    return compareTo(lhs, rhs);
-  }
+    private PVarcharArray() {
+        super("VARCHAR ARRAY", PDataType.ARRAY_TYPE_BASE + PVarchar.INSTANCE.getSqlType(),
+                PhoenixArray.class, null, 26);
+    }
 
-  @Override
-  public Integer getByteSize() {
-    return null;
-  }
+    @Override
+    public boolean isArrayType() {
+        return true;
+    }
 
-  @Override
-  public byte[] toBytes(Object object) {
-    return toBytes(object, SortOrder.ASC);
-  }
+    @Override
+    public boolean isFixedWidth() {
+        return false;
+    }
 
-  @Override
-  public byte[] toBytes(Object object, SortOrder sortOrder) {
-    return toBytes(object, PVarchar.INSTANCE, sortOrder);
-  }
+    @Override
+    public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
+        return compareTo(lhs, rhs);
+    }
 
-  @Override
-  public Object toObject(byte[] bytes, int offset, int length,
-      PDataType actualType, SortOrder sortOrder, Integer maxLength,
-      Integer scale) {
-    return toObject(bytes, offset, length, PVarchar.INSTANCE, sortOrder, maxLength, scale,
-        PVarchar.INSTANCE);
-  }
+    @Override
+    public Integer getByteSize() {
+        return null;
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType) {
-    return isCoercibleTo(targetType, this);
-  }
+    @Override
+    public byte[] toBytes(Object object) {
+        return toBytes(object, SortOrder.ASC);
+    }
 
-  @Override
-  public boolean isCoercibleTo(PDataType targetType, Object value) {
-    if (value == null) {
-      return true;
+    @Override
+    public byte[] toBytes(Object object, SortOrder sortOrder) {
+        return toBytes(object, PVarchar.INSTANCE, sortOrder);
     }
-    PhoenixArray pArr = (PhoenixArray) value;
-    Object[] charArr = (Object[]) pArr.array;
-    for (Object i : charArr) {
-      if (!super.isCoercibleTo(PVarchar.INSTANCE, i)) {
-        return false;
-      }
+
+    @Override
+    public Object toObject(byte[] bytes, int offset, int length,
+            PDataType actualType, SortOrder sortOrder, Integer maxLength,
+            Integer scale) {
+        return toObject(bytes, offset, length, PVarchar.INSTANCE, sortOrder, maxLength, scale,
+                PVarchar.INSTANCE);
     }
-    return true;
-  }
 
-  @Override
-  public void coerceBytes(ImmutableBytesWritable ptr, Object object, PDataType actualType,
-      Integer maxLength, Integer scale, SortOrder actualModifer, Integer desiredMaxLength,
-      Integer desiredScale, SortOrder desiredModifier) {
-    coerceBytes(ptr, object, actualType, maxLength, scale, desiredMaxLength, desiredScale,
-        this, actualModifer, desiredModifier);
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType) {
+        return isCoercibleTo(targetType, this);
+    }
 
-  @Override
-  public int getResultSetSqlType() {
-    return Types.ARRAY;
-  }
+    @Override
+    public boolean isCoercibleTo(PDataType targetType, Object value) {
+        if (value == null) {
+            return true;
+        }
+        PhoenixArray pArr = (PhoenixArray) value;
+        Object[] charArr = (Object[]) pArr.array;
+        for (Object i : charArr) {
+            if (!super.isCoercibleTo(PVarchar.INSTANCE, i)) {
+                return false;
+            }
+        }
+        return true;
+    }
 
-  @Override
-  public Object getSampleValue(Integer maxLength, Integer arrayLength) {
-    return getSampleValue(PVarchar.INSTANCE, arrayLength, maxLength);
-  }
+    @Override
+    public Object getSampleValue(Integer maxLength, Integer arrayLength) {
+        return getSampleValue(PVarchar.INSTANCE, arrayLength, maxLength);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
index 1e3516d..64d064a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
@@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.query.KeyRange;
-import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PDataType;
 
 import com.google.common.base.Preconditions;
 
@@ -285,14 +285,16 @@ public class ByteUtil {
             totalLength += writable.getLength();
         }
         byte[] result = new byte[totalLength];
-        int offset = 0;
+        int totalOffset = 0;
         for (ImmutableBytesWritable array : writables) {
             byte[] bytes = array.get();
+            int offset = array.getOffset();
             if (sortOrder == SortOrder.DESC) {
-                bytes = SortOrder.invert(bytes, array.getOffset(), new byte[array.getLength()], 0, array.getLength());
+                bytes = SortOrder.invert(bytes, offset, new byte[array.getLength()], 0, array.getLength());
+                offset = 0;
             }
-            System.arraycopy(bytes, array.getOffset(), result, offset, array.getLength());
-            offset += array.getLength();
+            System.arraycopy(bytes, offset, result, totalOffset, array.getLength());
+            totalOffset += array.getLength();
         }
         return result;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 91fd2cb..fbc15be 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -185,7 +185,7 @@ public class MetaDataUtil {
     }
     
     public static PTableType getTableType(List<Mutation> tableMetaData, KeyValueBuilder builder,
-      ImmutableBytesPtr value) {
+      ImmutableBytesWritable value) {
         if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData),
             PhoenixDatabaseMetaData.TABLE_TYPE_BYTES, builder, value)) {
             return PTableType.fromSerializedValue(value.get()[value.getOffset()]);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
index 92bb1d8..61642bc 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/PhoenixRuntime.java
@@ -67,6 +67,7 @@ import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.monitoring.GlobalClientMetrics;
 import org.apache.phoenix.monitoring.GlobalMetric;
 import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.KeyValueSchema;
@@ -78,12 +79,14 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
-import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.TableNotFoundException;
 import org.apache.phoenix.schema.ValueBitSet;
 import org.apache.phoenix.schema.types.PDataType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
+import com.google.common.base.Joiner;
 import com.google.common.base.Splitter;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Lists;
@@ -96,6 +99,8 @@ import com.google.common.collect.Lists;
  * @since 0.1
  */
 public class PhoenixRuntime {
+    private static final Logger logger = LoggerFactory.getLogger(PhoenixRuntime.class);
+
     /**
      * Use this connection property to control HBase timestamps
      * by specifying your own long timestamp value at connection time. All
@@ -191,24 +196,39 @@ public class PhoenixRuntime {
             conn = DriverManager.getConnection(jdbcUrl, props)
                     .unwrap(PhoenixConnection.class);
 
-            for (String inputFile : execCmd.getInputFiles()) {
-                if (inputFile.endsWith(SQL_FILE_EXT)) {
-                    PhoenixRuntime.executeStatements(conn,
-                            new FileReader(inputFile), Collections.emptyList());
-                } else if (inputFile.endsWith(CSV_FILE_EXT)) {
-
-                    String tableName = execCmd.getTableName();
-                    if (tableName == null) {
-                        tableName = SchemaUtil.normalizeIdentifier(
-                                inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1,
-                                        inputFile.length() - CSV_FILE_EXT.length()));
+            if (execCmd.isUpgrade()) {
+                if (execCmd.getInputFiles().isEmpty()) {
+                    Set<String> tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescVarLengthRowKey(conn);
+                    if (tablesNeedingUpgrade.isEmpty()) {
+                        String msg = "No tables are required to be upgraded due to incorrect row key order for descending, variable length columsn (PHOENIX-2067)";
+                        System.out.println(msg);
+                    } else {
+                        String msg = "The following tables require upgrade due to a bug causing the row key to be incorrect for descending columns (PHOENIX-2067):\n" + Joiner.on(' ').join(tablesNeedingUpgrade);
+                        System.out.println("WARNING: " + msg);
+                    }
+                } else {
+                    UpgradeUtil.upgradeDescVarLengthRowKeys(conn, execCmd.getInputFiles());
+                }
+            } else {
+                for (String inputFile : execCmd.getInputFiles()) {
+                    if (inputFile.endsWith(SQL_FILE_EXT)) {
+                        PhoenixRuntime.executeStatements(conn,
+                                new FileReader(inputFile), Collections.emptyList());
+                    } else if (inputFile.endsWith(CSV_FILE_EXT)) {
+    
+                        String tableName = execCmd.getTableName();
+                        if (tableName == null) {
+                            tableName = SchemaUtil.normalizeIdentifier(
+                                    inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1,
+                                            inputFile.length() - CSV_FILE_EXT.length()));
+                        }
+                        CSVCommonsLoader csvLoader =
+                                new CSVCommonsLoader(conn, tableName, execCmd.getColumns(),
+                                        execCmd.isStrict(), execCmd.getFieldDelimiter(),
+                                        execCmd.getQuoteCharacter(), execCmd.getEscapeCharacter(),
+                                        execCmd.getArrayElementSeparator());
+                        csvLoader.upsert(inputFile);
                     }
-                    CSVCommonsLoader csvLoader =
-                            new CSVCommonsLoader(conn, tableName, execCmd.getColumns(),
-                                    execCmd.isStrict(), execCmd.getFieldDelimiter(),
-                                    execCmd.getQuoteCharacter(), execCmd.getEscapeCharacter(),
-                                    execCmd.getArrayElementSeparator());
-                    csvLoader.upsert(inputFile);
                 }
             }
         } catch (Throwable t) {
@@ -459,6 +479,7 @@ public class PhoenixRuntime {
         private String arrayElementSeparator;
         private boolean strict;
         private List<String> inputFiles;
+        private boolean isUpgrade;
 
         /**
          * Factory method to build up an {@code ExecutionCommand} based on supplied parameters.
@@ -483,6 +504,11 @@ public class PhoenixRuntime {
                             "character");
             Option arrayValueSeparatorOption = new Option("a", "array-separator", true,
                     "Define the array element separator, defaults to ':'");
+            Option upgradeOption = new Option("u", "upgrade", false, "Upgrades tables specified as arguments " +
+                    "by rewriting them with the correct row key for descending columns. If no arguments are " +
+                    "specified, then tables that need to be upgraded will be displayed. " +
+                    "Note that " + QueryServices.THREAD_TIMEOUT_MS_ATTRIB + " and hbase.regionserver.lease.period " +
+                    "parameters must be set very high to prevent timeouts when upgrading.");
             Options options = new Options();
             options.addOption(tableOption);
             options.addOption(headerOption);
@@ -491,6 +517,7 @@ public class PhoenixRuntime {
             options.addOption(quoteCharacterOption);
             options.addOption(escapeCharacterOption);
             options.addOption(arrayValueSeparatorOption);
+            options.addOption(upgradeOption);
 
             CommandLineParser parser = new PosixParser();
             CommandLine cmdLine = null;
@@ -530,6 +557,10 @@ public class PhoenixRuntime {
             execCmd.arrayElementSeparator = cmdLine.getOptionValue(
                     arrayValueSeparatorOption.getOpt(),
                     CSVCommonsLoader.DEFAULT_ARRAY_ELEMENT_SEPARATOR);
+            
+            if (cmdLine.hasOption(upgradeOption.getOpt())) {
+                execCmd.isUpgrade = true;
+            }
 
 
             List<String> argList = Lists.newArrayList(cmdLine.getArgList());
@@ -539,21 +570,19 @@ public class PhoenixRuntime {
             execCmd.connectionString = argList.remove(0);
             List<String> inputFiles = Lists.newArrayList();
             for (String arg : argList) {
-                if (arg.endsWith(CSV_FILE_EXT) || arg.endsWith(SQL_FILE_EXT)) {
+                if (execCmd.isUpgrade || arg.endsWith(CSV_FILE_EXT) || arg.endsWith(SQL_FILE_EXT)) {
                     inputFiles.add(arg);
                 } else {
                     usageError("Don't know how to interpret argument '" + arg + "'", options);
                 }
             }
 
-            if (inputFiles.isEmpty()) {
+            if (inputFiles.isEmpty() && !execCmd.isUpgrade) {
                 usageError("At least one input file must be supplied", options);
             }
 
             execCmd.inputFiles = inputFiles;
 
-
-
             return execCmd;
         }
 
@@ -620,87 +649,12 @@ public class PhoenixRuntime {
         public boolean isStrict() {
             return strict;
         }
-    }
-    
-    /**
-     * Encode the primary key values from the table as a byte array. The values must
-     * be in the same order as the primary key constraint. If the connection and
-     * table are both tenant-specific, the tenant ID column must not be present in
-     * the values.
-     * @param conn an open connection
-     * @param fullTableName the full table name
-     * @param values the values of the primary key columns ordered in the same order
-     *  as the primary key constraint
-     * @return the encoded byte array
-     * @throws SQLException if the table cannot be found or the incorrect number of
-     *  of values are provided
-     * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the
-     *  values
-     */
-    @Deprecated
-    public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException {
-        PTable table = getTable(conn, fullTableName);
-        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
-        List<PColumn> pkColumns = table.getPKColumns();
-        if (pkColumns.size() - offset != values.length) {
-            throw new SQLException("Expected " + (pkColumns.size() - offset) + " but got " + values.length);
-        }
-        PDataType type = null;
-        TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength());
-        try {
-            for (int i = offset; i < pkColumns.size(); i++) {
-                if (type != null && !type.isFixedWidth()) {
-                    output.write(QueryConstants.SEPARATOR_BYTE);
-                }
-                type = pkColumns.get(i).getDataType();
 
-                //for fixed width data types like CHAR and BINARY, we need to pad values to be of max length.
-                Object paddedObj = type.pad(values[i - offset], pkColumns.get(i).getMaxLength());
-                byte[] value = type.toBytes(paddedObj);
-                output.write(value);
-            }
-            return output.toByteArray();
-        } finally {
-            try {
-                output.close();
-            } catch (IOException e) {
-                throw new RuntimeException(e); // Impossible
-            }
+        public boolean isUpgrade() {
+            return isUpgrade;
         }
     }
-
-    /**
-     * Decode a byte array value back into the Object values of the
-     * primary key constraint. If the connection and table are both
-     * tenant-specific, the tenant ID column is not expected to have
-     * been encoded and will not appear in the returned values.
-     * @param conn an open connection
-     * @param name the full table name
-     * @param encodedValue the value that was encoded with {@link #encodePK(Connection, String, Object[])}
-     * @return the Object values encoded in the byte array value
-     * @throws SQLException
-     */
-    @Deprecated
-    public static Object[] decodePK(Connection conn, String name, byte[] value) throws SQLException {
-        PTable table = getTable(conn, name);
-        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-        int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
-        int nValues = table.getPKColumns().size() - offset;
-        RowKeySchema schema = table.getRowKeySchema();
-        Object[] values = new Object[nValues];
-        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
-        schema.iterator(value, ptr);
-        int i = 0;
-        int fieldIdx = offset;
-        while (i < nValues && schema.next(ptr, fieldIdx, value.length) != null) {
-            values[i] = schema.getField(fieldIdx).getDataType().toObject(ptr);
-            i++;
-            fieldIdx++;
-        }
-        return values;
-    }
-
+    
     /**
      * Returns the opitmized query plan used by phoenix for executing the sql.
      * @param stmt to return the plan for

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 3dff972..d63edbb 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -326,13 +326,15 @@ public class ScanUtil {
         // but the index for the field it represents in the schema
         // should be incremented by 1 + value in the current slotSpan index
         // slotSpan stores the number of columns beyond one that the range spans
+        Field field = null;
         int i = slotStartIndex, fieldIndex = ScanUtil.getRowKeyPosition(slotSpan, slotStartIndex);
         for (i = slotStartIndex; i < slotEndIndex; i++) {
             // Build up the key by appending the bound of each key range
             // from the current position of each slot. 
             KeyRange range = slots.get(i).get(position[i]);
             // Use last slot in a multi-span column to determine if fixed width
-            boolean isFixedWidth = schema.getField(fieldIndex + slotSpan[i]).getDataType().isFixedWidth();
+            field = schema.getField(fieldIndex + slotSpan[i]);
+            boolean isFixedWidth = field.getDataType().isFixedWidth();
             fieldIndex += slotSpan[i] + 1;
             /*
              * If the current slot is unbound then stop if:
@@ -369,9 +371,11 @@ public class ScanUtil {
             // key slots would cause the flag to become true.
             lastInclusiveUpperSingleKey = range.isSingleKey() && inclusiveUpper;
             anyInclusiveUpperRangeKey |= !range.isSingleKey() && inclusiveUpper;
+            // A match for IS NULL or IS NOT NULL should not have a DESC_SEPARATOR_BYTE as nulls sort first
+            byte sepByte = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), bytes.length == 0 || range == KeyRange.IS_NULL_RANGE || range == KeyRange.IS_NOT_NULL_RANGE, field);
             
-            if (!isFixedWidth && ( fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower)) {
-                key[offset++] = QueryConstants.SEPARATOR_BYTE;
+            if (!isFixedWidth && ( fieldIndex < schema.getMaxFields() || inclusiveUpper || exclusiveLower || sepByte == QueryConstants.DESC_SEPARATOR_BYTE)) {
+                key[offset++] = sepByte;
                 // Set lastInclusiveUpperSingleKey back to false if this is the last pk column
                 // as we don't want to increment the null byte in this case
                 lastInclusiveUpperSingleKey &= i < schema.getMaxFields()-1;
@@ -484,10 +488,11 @@ public class ScanUtil {
         while (schema.next(ptr, pos, maxOffset) != null) {
             pos++;
         }
-        if (!schema.getField(pos-1).getDataType().isFixedWidth()) {
+        Field field = schema.getField(pos-1);
+        if (!field.getDataType().isFixedWidth()) {
             byte[] newLowerRange = new byte[key.length + 1];
             System.arraycopy(key, 0, newLowerRange, 0, key.length);
-            newLowerRange[key.length] = QueryConstants.SEPARATOR_BYTE;
+            newLowerRange[key.length] = SchemaUtil.getSeparatorByte(schema.rowKeyOrderOptimizable(), key.length==0, field);
             key = newLowerRange;
         } else {
             key = Arrays.copyOf(key, key.length);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index c674140..d01bf39 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
+import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -721,4 +722,26 @@ public class SchemaUtil {
         fullColumnName = fullColumnName.replaceAll(ESCAPE_CHARACTER, "");
        	return fullColumnName.trim();
     }
+    
+    /**
+     * Return the separator byte to use based on:
+     * @param rowKeyOrderOptimizable whether or not the table may optimize descending row keys. If the
+     *  table has no descending row keys, this will be true. Also, if the table has been upgraded (using
+     *  a new -u option for psql.py), then it'll be true
+     * @param isNullValue whether or not the value is null. We use a null byte still if the value is null
+     * regardless of sort order since nulls will always sort first this way.
+     * @param sortOrder whether the value sorts ascending or descending.
+     * @return the byte to use as the separator
+     */
+    public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, SortOrder sortOrder) {
+        return !rowKeyOrderOptimizable || isNullValue || sortOrder == SortOrder.ASC ? QueryConstants.SEPARATOR_BYTE : QueryConstants.DESC_SEPARATOR_BYTE;
+    }
+    
+    public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, Field f) {
+        return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, f.getSortOrder());
+    }
+    
+    public static byte getSeparatorByte(boolean rowKeyOrderOptimizable, boolean isNullValue, Expression e) {
+        return getSeparatorByte(rowKeyOrderOptimizable, isNullValue, e.getSortOrder());
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
index 1a1b2c9..f495a7e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/TupleUtil.java
@@ -113,10 +113,9 @@ public class TupleUtil {
                 }
                 for (int i = 1; i < expressions.size(); i++) {
                     if (!expression.getDataType().isFixedWidth()) {
-                        output.write(QueryConstants.SEPARATOR_BYTE);
+                        output.write(SchemaUtil.getSeparatorByte(true, value.getLength()==0, expression));
                     }
                     expression = expressions.get(i);
-                    // TODO: should we track trailing null values and omit the separator bytes?
                     if (expression.evaluate(result, value)) {
                         output.write(value.get(), value.getOffset(), value.getLength());
                     } else if (i < expressions.size()-1 && expression.getDataType().isFixedWidth()) {
@@ -125,6 +124,10 @@ public class TupleUtil {
                         throw new DoNotRetryIOException("Non terminating null value found for fixed width expression (" + expression + ") in row: " + result);
                     }
                 }
+                // Write trailing separator if last expression was variable length and descending
+                if (!expression.getDataType().isFixedWidth() && SchemaUtil.getSeparatorByte(true, value.getLength()==0, expression) == QueryConstants.DESC_SEPARATOR_BYTE) {
+                    output.write(QueryConstants.DESC_SEPARATOR_BYTE);
+                }
                 byte[] outputBytes = output.getBuffer();
                 value.set(outputBytes, 0, output.size());
                 return value;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index c829799..e59ea98 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -35,19 +35,25 @@ import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COU
 import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
 
 import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
 import java.sql.PreparedStatement;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
+import java.util.Properties;
+import java.util.Set;
 
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
@@ -60,6 +66,7 @@ import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.coprocessor.MetaDataEndpointImpl;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
@@ -67,15 +74,24 @@ import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.LinkType;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.schema.types.PDecimal;
+import org.apache.phoenix.schema.types.PDecimalArray;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PVarchar;
+import org.apache.phoenix.schema.types.PVarcharArray;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Objects;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 public class UpgradeUtil {
     private static final Logger logger = LoggerFactory.getLogger(UpgradeUtil.class);
@@ -808,4 +824,289 @@ public class UpgradeUtil {
         }
     }
 
+    private static String getTableRVC(List<String> tableNames) {
+        StringBuilder query = new StringBuilder("(");
+        for (int i = 0; i < tableNames.size(); i+=3) {
+            String tenantId = tableNames.get(i);
+            String schemaName = tableNames.get(i+1);
+            String tableName = tableNames.get(i+2);
+            query.append('(');
+            query.append(tenantId == null ? "null" : ("'" + tenantId + "'"));
+            query.append(',');
+            query.append(schemaName == null ? "null" : ("'" + schemaName + "'"));
+            query.append(',');
+            query.append("'" + tableName + "'");
+            query.append("),");
+        }
+        // Replace trailing , with ) to end IN expression
+        query.setCharAt(query.length()-1, ')');
+        return query.toString();
+    }
+    
+    private static List<String> addPhysicalTables(PhoenixConnection conn, ResultSet rs, PTableType otherType, Set<String> physicalTables) throws SQLException {
+        List<String> tableNames = Lists.newArrayListWithExpectedSize(1024);
+        while (rs.next()) {
+            tableNames.add(rs.getString(1));
+            tableNames.add(rs.getString(2));
+            tableNames.add(rs.getString(3));
+        }
+        if (tableNames.isEmpty()) {
+            return Collections.emptyList();
+        }
+        
+        List<String> otherTables = Lists.newArrayListWithExpectedSize(tableNames.size());
+        // Find the header rows for tables that have not been upgraded already.
+        // We don't care about views, as the row key cannot be different than the table.
+        // We need this query to find physical tables which won't have a link row.
+        String query = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME,TABLE_TYPE\n" + 
+                "FROM SYSTEM.CATALOG (ROW_KEY_ORDER_OPTIMIZABLE BOOLEAN)\n" + 
+                "WHERE COLUMN_NAME IS NULL\n" + 
+                "AND COLUMN_FAMILY IS NULL\n" + 
+                "AND ROW_KEY_ORDER_OPTIMIZABLE IS NULL\n" +
+                "AND TABLE_TYPE IN ('" + PTableType.TABLE.getSerializedValue() + "','" + otherType.getSerializedValue() + "')\n" +
+                "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + getTableRVC(tableNames);
+        rs = conn.createStatement().executeQuery(query);
+        
+        while (rs.next()) {
+            if (PTableType.TABLE.getSerializedValue().equals(rs.getString(4))) {
+                physicalTables.add(SchemaUtil.getTableName(rs.getString(2), rs.getString(3)));
+            } else {
+                otherTables.add(rs.getString(1));
+                otherTables.add(rs.getString(2));
+                otherTables.add(rs.getString(3));
+            }
+        }
+        return otherTables;
+    }
+    /**
+     * Identify the tables that need to be upgraded due to PHOENIX-2067
+     */
+    public static Set<String> getPhysicalTablesWithDescVarLengthRowKey(PhoenixConnection conn) throws SQLException {
+        // First query finds column rows of tables that need to be upgraded.
+        // We cannot tell if the column is from a table, view, or index however.
+        ResultSet rs = conn.createStatement().executeQuery(
+                "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + 
+                "FROM SYSTEM.CATALOG cat1\n" + 
+                "WHERE COLUMN_NAME IS NOT NULL\n" + 
+                "AND COLUMN_FAMILY IS NULL\n" + 
+                "AND SORT_ORDER = " + SortOrder.DESC.getSystemValue() + "\n" + 
+                "AND DATA_TYPE IN (" + PVarchar.INSTANCE.getSqlType() + "," + PDecimal.INSTANCE.getSqlType() + "," + PVarcharArray.INSTANCE.getSqlType() + "," + PDecimalArray.INSTANCE.getSqlType() + ")\n" +
+                "GROUP BY TENANT_ID,TABLE_SCHEM,TABLE_NAME");
+        Set<String> physicalTables = Sets.newHashSetWithExpectedSize(1024);
+        List<String> remainingTableNames = addPhysicalTables(conn, rs, PTableType.INDEX, physicalTables);
+        if (!remainingTableNames.isEmpty()) {
+            // Find tables/views for index
+            String indexLinkQuery = "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + 
+                    "FROM SYSTEM.CATALOG\n" + 
+                    "WHERE COLUMN_NAME IS NULL\n" + 
+                    "AND (TENANT_ID, TABLE_SCHEM, COLUMN_FAMILY) IN " + getTableRVC(remainingTableNames) + "\n" +
+                    "AND LINK_TYPE = " + LinkType.INDEX_TABLE.getSerializedValue();
+             rs = conn.createStatement().executeQuery(indexLinkQuery);
+             remainingTableNames = addPhysicalTables(conn, rs, PTableType.VIEW, physicalTables);
+             if (!remainingTableNames.isEmpty()) {
+                 // Find physical table name from views, splitting on '.' to get schema name and table name
+                 String physicalLinkQuery = "SELECT null, " + 
+                 " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN NULL ELSE SUBSTR(COLUMN_FAMILY,1,INSTR(COLUMN_FAMILY,'.')) END,\n" + 
+                 " CASE WHEN INSTR(COLUMN_FAMILY,'.') = 0 THEN COLUMN_FAMILY ELSE SUBSTR(COLUMN_FAMILY,INSTR(COLUMN_FAMILY,'.')+1) END\n" + 
+                         "FROM SYSTEM.CATALOG\n" + 
+                         "WHERE COLUMN_NAME IS NULL\n" + 
+                         "AND COLUMN_FAMILY IS NOT NULL\n" + 
+                         "AND (TENANT_ID, TABLE_SCHEM, TABLE_NAME) IN " + getTableRVC(remainingTableNames) + "\n" +
+                         "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
+                 rs = conn.createStatement().executeQuery(physicalLinkQuery);
+                 // Add any tables (which will all be physical tables) which have not already been upgraded.
+                 addPhysicalTables(conn, rs, PTableType.TABLE, physicalTables);
+             }
+        }
+        return physicalTables;
+    }
+
+    private static void upgradeDescVarLengthRowKeys(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String schemaName, String tableName, boolean isTable) throws SQLException {
+        String escapedTableName = SchemaUtil.getEscapedTableName(schemaName, tableName);
+        String tenantInfo = "";
+        PName tenantId = PName.EMPTY_NAME;
+        if (upgradeConn.getTenantId() != null) {
+            tenantId = upgradeConn.getTenantId();
+            tenantInfo = " for tenant " + tenantId.getString();
+        }
+        String msg = "Starting upgrade of " + escapedTableName + tenantInfo + "...";
+        System.out.println(msg);
+        logger.info(msg);
+        ResultSet rs = upgradeConn.createStatement().executeQuery("SELECT /*+ NO_INDEX */ count(*) FROM " + escapedTableName);
+        rs.next(); // Run query
+        List<String> tableNames = Lists.newArrayListWithExpectedSize(1024);
+        tableNames.add(tenantId == PName.EMPTY_NAME ? null : tenantId.getString());
+        tableNames.add(schemaName);
+        tableNames.add(tableName);
+        // Find views to mark as upgraded
+        if (isTable) {
+            String physicalName = SchemaUtil.getTableName(schemaName, tableName);
+            String query =
+                    "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + 
+                    "FROM SYSTEM.CATALOG\n" + 
+                    "WHERE COLUMN_NAME IS NULL\n" + 
+                    "AND COLUMN_FAMILY = '" + physicalName + "'" +
+                    "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue();
+            rs = globalConn.createStatement().executeQuery(query);
+            while (rs.next()) {
+                tableNames.add(rs.getString(1));
+                tableNames.add(rs.getString(2));
+                tableNames.add(rs.getString(3));
+            }
+        }
+        // Mark the table and views as upgraded now
+        for (int i = 0; i < tableNames.size(); i += 3) {
+            String theTenantId = tableNames.get(i);
+            String theSchemaName = tableNames.get(i+1);
+            String theTableName = tableNames.get(i+2);
+            globalConn.createStatement().execute("UPSERT INTO " + PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME + 
+                " (" + PhoenixDatabaseMetaData.TENANT_ID + "," +
+                       PhoenixDatabaseMetaData.TABLE_SCHEM + "," +
+                       PhoenixDatabaseMetaData.TABLE_NAME + "," +
+                       MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE + " BOOLEAN"
+               + ") VALUES (" +
+                       "'" + (theTenantId == null ? StringUtil.EMPTY_STRING : theTenantId) + "'," +
+                       "'" + (theSchemaName == null ? StringUtil.EMPTY_STRING : theSchemaName) + "'," +
+                       "'" + theTableName + "'," +
+                       "TRUE)");
+        }
+        globalConn.commit();
+        for (int i = 0; i < tableNames.size(); i += 3) {
+            String theTenantId = tableNames.get(i);
+            String theSchemaName = tableNames.get(i+1);
+            String theTableName = tableNames.get(i+2);
+            globalConn.getQueryServices().clearTableFromCache(
+                    theTenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(theTenantId),
+                    theSchemaName == null ? ByteUtil.EMPTY_BYTE_ARRAY : Bytes.toBytes(schemaName),
+                    Bytes.toBytes(theTableName), HConstants.LATEST_TIMESTAMP);
+        }
+        msg = "Completed upgrade of " + escapedTableName + tenantInfo;
+        System.out.println(msg);
+        logger.info(msg);
+    }
+    
+    private static boolean isInvalidTableToUpgrade(PTable table) throws SQLException {
+        return (table.getType() != PTableType.TABLE || // Must be a table
+            table.getTenantId() != null || // Must be global
+            !table.getPhysicalName().equals(table.getName())); // Must be the physical table
+    }
+    /**
+     * Upgrade tables and their indexes due to a bug causing descending row keys to have a row key that
+     * prevents them from being sorted correctly (PHOENIX-2067).
+     */
+    public static void upgradeDescVarLengthRowKeys(PhoenixConnection conn, List<String> tablesToUpgrade) throws SQLException {
+        if (tablesToUpgrade.isEmpty()) {
+            return;
+        }
+        if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) {
+            throw new SQLException("May not specify the CURRENT_SCN property when upgrading");
+        }
+        if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) {
+            throw new SQLException("May not specify the TENANT_ID_ATTRIB property when upgrading");
+        }
+        List<PTable> tablesNeedingUpgrading = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size());
+        List<String> invalidTables = Lists.newArrayListWithExpectedSize(tablesToUpgrade.size());
+        for (String fullTableName : tablesToUpgrade) {
+            PTable table = PhoenixRuntime.getTable(conn, fullTableName);
+            if (isInvalidTableToUpgrade(table)) {
+                invalidTables.add(fullTableName);
+            } else {
+                tablesNeedingUpgrading.add(table);
+            }
+        }
+        if (!invalidTables.isEmpty()) {
+            StringBuilder buf = new StringBuilder("Only physical tables should be upgraded as their views and indexes will be updated with them: ");
+            for (String fullTableName : invalidTables) {
+                buf.append(fullTableName);
+                buf.append(' ');
+            }
+            throw new SQLException(buf.toString());
+        }
+        PhoenixConnection upgradeConn = new PhoenixConnection(conn, true);
+        try {
+            upgradeConn.setAutoCommit(true);
+            for (PTable table : tablesNeedingUpgrading) {
+                boolean wasUpgraded = false;
+                if (!table.rowKeyOrderOptimizable()) {
+                    wasUpgraded = true;
+                    upgradeDescVarLengthRowKeys(upgradeConn, conn, table.getSchemaName().getString(), table.getTableName().getString(), true);
+                }
+                
+                // Upgrade global indexes
+                for (PTable index : table.getIndexes()) {
+                    if (!index.rowKeyOrderOptimizable() && index.getIndexType() != IndexType.LOCAL) {
+                        wasUpgraded = true;
+                        upgradeDescVarLengthRowKeys(upgradeConn, conn, index.getSchemaName().getString(), index.getTableName().getString(), false);
+                    }
+                }
+                
+                String sharedViewIndexName = Bytes.toString(MetaDataUtil.getViewIndexPhysicalName(table.getName().getBytes()));
+                // Upgrade view indexes
+                wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedViewIndexName);
+                String sharedLocalIndexName = Bytes.toString(MetaDataUtil.getLocalIndexPhysicalName(table.getName().getBytes()));
+                // Upgrade view indexes
+                wasUpgraded |= upgradeSharedIndex(upgradeConn, conn, sharedLocalIndexName);
+                
+                if (!wasUpgraded) {
+                    System.out.println("Upgrade not required for this table or its indexes: " + table.getName().getString());
+                }
+            }
+        } finally {
+            upgradeConn.close();
+        }
+    }
+    
+    /**
+     * Upgrade shared indexes by querying for all that are associated with our
+     * physical table.
+     * @return true if any upgrades were performed and false otherwise.
+     */
+    private static boolean upgradeSharedIndex(PhoenixConnection upgradeConn, PhoenixConnection globalConn, String physicalName) throws SQLException {
+        String query =
+                "SELECT TENANT_ID,TABLE_SCHEM,TABLE_NAME\n" + 
+                "FROM SYSTEM.CATALOG cat1\n" + 
+                "WHERE COLUMN_NAME IS NULL\n" + 
+                "AND COLUMN_FAMILY = '" + physicalName + "'\n" + 
+                "AND LINK_TYPE = " + LinkType.PHYSICAL_TABLE.getSerializedValue() + "\n" +
+                "ORDER BY TENANT_ID";
+        ResultSet rs = globalConn.createStatement().executeQuery(query);
+        String lastTenantId = null;
+        Connection conn = globalConn;
+        String url = globalConn.getURL();
+        boolean wasUpgraded = false;
+        while (rs.next()) {
+            String fullTableName = SchemaUtil.getTableName(
+                    rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM),
+                    rs.getString(PhoenixDatabaseMetaData.TABLE_NAME));
+            String tenantId = rs.getString(1);
+            if (tenantId != null && !tenantId.equals(lastTenantId))  {
+                if (lastTenantId != null) {
+                    conn.close();
+                }
+                // Open tenant-specific connection when we find a new one
+                Properties props = new Properties(globalConn.getClientInfo());
+                props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
+                conn = DriverManager.getConnection(url, props);
+                lastTenantId = tenantId;
+            }
+            PTable table = PhoenixRuntime.getTable(conn, fullTableName);
+            String tableTenantId = table.getTenantId() == null ? null : table.getTenantId().getString();
+            if (Objects.equal(lastTenantId, tableTenantId) && !table.rowKeyOrderOptimizable()) {
+                upgradeDescVarLengthRowKeys(upgradeConn, globalConn, table.getSchemaName().getString(), table.getTableName().getString(), false);
+                wasUpgraded = true;
+            }
+        }
+        rs.close();
+        if (lastTenantId != null) {
+            conn.close();
+        }
+        return wasUpgraded;
+    }
+
+    public static void addRowKeyOrderOptimizableCell(List<Mutation> tableMetadata, byte[] tableHeaderRowKey, long clientTimeStamp) {
+        Put put = new Put(tableHeaderRowKey, clientTimeStamp);
+        put.addColumn(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                MetaDataEndpointImpl.ROW_KEY_ORDER_OPTIMIZABLE_BYTES, PBoolean.INSTANCE.toBytes(true));
+        tableMetadata.add(put);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
index 6e637b7..0f34582 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/compile/QueryCompilerTest.java
@@ -1558,6 +1558,31 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
     }
     
     @Test 
+    public void testDescVarbinaryNotSupported() throws Exception {
+        Connection conn = DriverManager.getConnection(getUrl());
+        try {
+            conn.createStatement().execute("CREATE TABLE t (k VARBINARY PRIMARY KEY DESC)");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode());
+        }
+        try {
+            conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR NOT NULL, k2 VARBINARY, CONSTRAINT pk PRIMARY KEY (k1,k2 DESC))");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode());
+        }
+        try {
+            conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR PRIMARY KEY)");
+            conn.createStatement().execute("ALTER TABLE t ADD k2 VARBINARY PRIMARY KEY DESC");
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.DESC_VARBINARY_NOT_SUPPORTED.getErrorCode(), e.getErrorCode());
+        }
+        conn.close();
+    }
+    
+    @Test 
     public void testDivideByZeroExpressionIndex() throws Exception {
         String ddl = "CREATE TABLE t (k1 INTEGER PRIMARY KEY)";
         Connection conn = DriverManager.getConnection(getUrl());
@@ -1705,9 +1730,10 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
     @Test
     public void testOrderByOrderPreservingFwd() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
-        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3))");
         String[] queries = {
                 "SELECT * FROM T ORDER BY (k1,k2), k3",
+                "SELECT * FROM T ORDER BY k1,k2,k3 NULLS FIRST",
                 "SELECT * FROM T ORDER BY k1,k2,k3",
                 "SELECT * FROM T ORDER BY k1,k2",
                 "SELECT * FROM T ORDER BY k1",
@@ -1727,8 +1753,9 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
     @Test
     public void testOrderByOrderPreservingRev() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
-        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2 DESC,k3))");
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 varchar, v varchar, constraint pk primary key(k1,k2 DESC,k3 DESC))");
         String[] queries = {
+                "SELECT * FROM T ORDER BY INVERT(k1),k2,k3 nulls last",
                 "SELECT * FROM T ORDER BY INVERT(k1),k2",
                 "SELECT * FROM T ORDER BY INVERT(k1)",
                  "SELECT * FROM T ORDER BY TRUNC(k1, 'DAY') DESC, CEIL(k2, 'HOUR') DESC",
@@ -1745,8 +1772,10 @@ public class QueryCompilerTest extends BaseConnectionlessQueryTest {
     @Test
     public void testNotOrderByOrderPreserving() throws Exception {
         Connection conn = DriverManager.getConnection(getUrl());
-        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 date not null, k3 date not null, v varchar, constraint pk primary key(k1,k2,k3))");
+        conn.createStatement().execute("CREATE TABLE t (k1 date not null, k2 varchar, k3 varchar, v varchar, constraint pk primary key(k1,k2,k3 desc))");
         String[] queries = {
+                "SELECT * FROM T ORDER BY k1,k2 NULLS LAST",
+                "SELECT * FROM T ORDER BY k1,k2, k3 NULLS LAST",
                 "SELECT * FROM T ORDER BY k1,k3",
                 "SELECT * FROM T ORDER BY SUBSTR(TO_CHAR(k1),1,4)",
                 "SELECT * FROM T ORDER BY k2",

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
new file mode 100644
index 0000000..f270d9d
--- /dev/null
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/OrderByTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.query;
+
+import static org.junit.Assert.assertEquals;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.phoenix.schema.SortOrder;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.junit.Test;
+
+public class OrderByTest extends BaseConnectionlessQueryTest {
+    @Test
+    public void testSortOrderForSingleDescVarLengthCol() throws SQLException {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k VARCHAR PRIMARY KEY DESC)");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('a')");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('ab')");
+
+        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<KeyValue> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, KeyValue.COMPARATOR);
+        KeyValue first = kvs.get(0);
+        assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1)));
+        KeyValue second = kvs.get(1);
+        assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1)));
+    }
+
+    @Test
+    public void testSortOrderForLeadingDescVarLengthColWithNullFollowing() throws SQLException {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('a')");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('ab')");
+
+        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<KeyValue> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, KeyValue.COMPARATOR);
+        KeyValue first = kvs.get(0);
+        assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), first.getRowLength()-1)));
+        KeyValue second = kvs.get(1);
+        assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), second.getRowLength()-1)));
+    }
+
+    @Test
+    public void testSortOrderForLeadingDescVarLengthColWithNonNullFollowing() throws SQLException {
+        Connection conn = DriverManager.getConnection(getUrl());
+        conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR NOT NULL, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('a','x')");
+        conn.createStatement().execute("UPSERT INTO t VALUES ('ab', 'x')");
+
+        Iterator<Pair<byte[],List<KeyValue>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn);
+        List<KeyValue> kvs = dataIterator.next().getSecond();
+        Collections.sort(kvs, KeyValue.COMPARATOR);
+        KeyValue first = kvs.get(0);
+        assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), 2)));
+        KeyValue second = kvs.get(1);
+        assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), 1)));
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
index 85f9436..7ab9093 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/schema/types/PDataTypeTest.java
@@ -38,6 +38,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.ConstraintViolationException;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.util.TestUtil;
@@ -1754,4 +1755,15 @@ public class PDataTypeTest {
         assertTrue(PLong.INSTANCE.compareTo(l, i, PInteger.INSTANCE)==0);
         assertTrue(PInteger.INSTANCE.compareTo(i, l, PLong.INSTANCE)==0);
     }
+    
+    @Test
+    public void testSeparatorBytes() {
+        byte biggest = (byte) 0xFF;
+        assertEquals(biggest, QueryConstants.DESC_SEPARATOR_BYTE);
+        byte[] array = new byte[1];
+        for (int i = Byte.MIN_VALUE; i <= Byte.MAX_VALUE; i++) {
+            array[0] = (byte) i;
+            assertTrue(Bytes.compareTo(array, QueryConstants.DESC_SEPARATOR_BYTE_ARRAY) <= 0);
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/2620a80c/phoenix-protocol/src/main/PTable.proto
----------------------------------------------------------------------
diff --git a/phoenix-protocol/src/main/PTable.proto b/phoenix-protocol/src/main/PTable.proto
index cbbbdd6..a327803 100644
--- a/phoenix-protocol/src/main/PTable.proto
+++ b/phoenix-protocol/src/main/PTable.proto
@@ -84,4 +84,5 @@ message PTable {
   optional int64 statsTimeStamp = 23;
   optional bool storeNulls = 24;
   optional int32 baseColumnCount = 25;
+  optional bool rowKeyOrderOptimizable = 26;
 }