You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by ja...@apache.org on 2016/10/28 06:10:01 UTC

[2/3] phoenix git commit: PHOENIX-3396 Valid Multi-byte strings whose total byte size is greater than the max char limit cannot be inserted into VARCHAR fields in the PK

PHOENIX-3396 Valid Multi-byte strings whose total byte size is greater than the max char limit cannot be inserted into VARCHAR fields in the PK


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/a54a06cf
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/a54a06cf
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/a54a06cf

Branch: refs/heads/master
Commit: a54a06cf566363054778dc60431553c6384ef34d
Parents: 927c612
Author: James Taylor <ja...@apache.org>
Authored: Thu Oct 27 20:31:42 2016 -0700
Committer: James Taylor <ja...@apache.org>
Committed: Thu Oct 27 23:09:24 2016 -0700

----------------------------------------------------------------------
 .../phoenix/end2end/ArithmeticQueryIT.java      |  11 +-
 .../apache/phoenix/end2end/UpsertSelectIT.java  |  56 +-
 .../apache/phoenix/compile/UpsertCompiler.java  |   8 +-
 .../UngroupedAggregateRegionObserver.java       | 369 +++++-----
 .../exception/DataExceedsCapacityException.java |  14 +-
 .../phoenix/exception/SQLExceptionInfo.java     |   9 +-
 .../function/ArrayConcatFunction.java           |   5 +-
 .../function/ArrayModifierFunction.java         |   8 +-
 .../phoenix/index/PhoenixIndexBuilder.java      |   4 +-
 .../org/apache/phoenix/parse/ColumnDef.java     |   4 +-
 .../org/apache/phoenix/schema/PTableImpl.java   |  47 +-
 .../phoenix/schema/types/PArrayDataType.java    |  11 +-
 .../apache/phoenix/schema/types/PBinary.java    | 340 +++++-----
 .../phoenix/schema/types/PBinaryBase.java       |  17 +
 .../org/apache/phoenix/schema/types/PChar.java  |  15 +-
 .../apache/phoenix/schema/types/PDataType.java  |   5 +-
 .../apache/phoenix/schema/types/PDecimal.java   | 669 ++++++++++---------
 .../apache/phoenix/schema/types/PVarbinary.java | 248 ++++---
 .../apache/phoenix/schema/types/PVarchar.java   | 268 ++++----
 .../org/apache/phoenix/util/SchemaUtil.java     |  11 +-
 .../org/apache/phoenix/schema/MutationTest.java |  54 ++
 21 files changed, 1154 insertions(+), 1019 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
index 5ad356b..c297441 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ArithmeticQueryIT.java
@@ -225,11 +225,16 @@ public class ArithmeticQueryIT extends ParallelStatsDisabledIT {
             assertTrue(rs.next());
             assertEquals(new BigDecimal("100.3"), rs.getBigDecimal(1));
             assertFalse(rs.next());
-            // source and target in same table, values scheme incompatible.
+            // source and target in same table, values scheme incompatible. should throw
             query = "UPSERT INTO " + source + "(pk, col4) SELECT pk, col1 from " + source;
             stmt = conn.prepareStatement(query);
-            stmt.execute();
-            conn.commit();
+            try {
+                stmt.execute();
+                conn.commit();
+                fail();
+            } catch (SQLException e) {
+                assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode());
+            }
             query = "SELECT col4 FROM " + source;
             stmt = conn.prepareStatement(query);
             rs = stmt.executeQuery();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
index 3561274..763f11b 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpsertSelectIT.java
@@ -19,6 +19,7 @@ package org.apache.phoenix.end2end;
 
 import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.PhoenixRuntime.UPSERT_BATCH_SIZE_ATTRIB;
+import static org.apache.phoenix.util.TestUtil.ATABLE_NAME;
 import static org.apache.phoenix.util.TestUtil.A_VALUE;
 import static org.apache.phoenix.util.TestUtil.B_VALUE;
 import static org.apache.phoenix.util.TestUtil.CUSTOM_ENTITY_DATA_FULL_NAME;
@@ -29,7 +30,6 @@ import static org.apache.phoenix.util.TestUtil.ROW7;
 import static org.apache.phoenix.util.TestUtil.ROW8;
 import static org.apache.phoenix.util.TestUtil.ROW9;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.apache.phoenix.util.TestUtil.ATABLE_NAME;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -1375,6 +1375,60 @@ public class UpsertSelectIT extends BaseClientManagedTimeIT {
         assertEquals("[[128,0,0,54], [128,0,4,0]]", rs.getArray(2).toString());
     }
 
+    @Test
+    public void testUpsertSelectWithMultiByteCharsNoAutoCommit() throws Exception {
+        testUpsertSelectWithMultiByteChars(false);
+    }
+
+    @Test
+    public void testUpsertSelectWithMultiByteCharsAutoCommit() throws Exception {
+        testUpsertSelectWithMultiByteChars(true);
+    }
+
+    private void testUpsertSelectWithMultiByteChars(boolean autoCommit) throws Exception {
+        long ts = nextTimestamp();
+        Properties props = new Properties();
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts));
+        Connection conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(autoCommit);
+        conn.createStatement().execute(
+                "create table t1 (id bigint not null primary key, v varchar(20))");
+        conn.close();
+
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 10));
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(autoCommit);
+        conn.createStatement().execute("upsert into t1 values (1, 'foo')");
+        conn.commit();
+
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 15));
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(autoCommit);
+        conn.createStatement().execute(
+                "upsert into t1(id, v) select id, '\u6fb4\u7c96\u87e4\u092f\u893b\u9143\u5ca4\u8c66\ud311\u85b0\u9129\u813c\u056a\ub066\u7889\u7889\u7889\u7889\u7889\u7889' from t1 WHERE id = 1");
+        conn.commit();
+
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 20));
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(autoCommit);
+        ResultSet rs = conn.createStatement().executeQuery("select * from t1");
+
+        assertTrue(rs.next());
+        assertEquals(1, rs.getLong(1));
+        assertEquals("\u6fb4\u7c96\u87e4\u092f\u893b\u9143\u5ca4\u8c66\ud311\u85b0\u9129\u813c\u056a\ub066\u7889\u7889\u7889\u7889\u7889\u7889", rs.getString(2));
+
+        props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 25));
+        conn = DriverManager.getConnection(getUrl(), props);
+        conn.setAutoCommit(autoCommit);
+        try {
+            conn.createStatement().execute(
+                    "upsert into t1(id, v) select id, '\u6fb4\u7c96\u87e4\u092f\u893b\u9143\u5ca4\u8c66\ud311\u85b0\u9129\u813c\u056a\ub066\u7889\u7889\u7889\u7889\u7889\u7889\u7889' from t1 WHERE id = 1");
+            conn.commit();
+            fail();
+        } catch (SQLException e) {
+            assertEquals(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY.getErrorCode(), e.getErrorCode());
+        }
+    }
 
     @Test
     public void testParallelUpsertSelect() throws Exception {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
index 85517a1..8837445 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UpsertCompiler.java
@@ -203,8 +203,8 @@ public class UpsertCompiler {
                     Integer scale = rsScale == 0 ? null : rsScale;
                     // We are guaranteed that the two column will have compatible types,
                     // as we checked that before.
-                    if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), precision, scale,
-                            column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(
+                    if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(), SortOrder.getDefault(), precision,
+                            scale, column.getMaxLength(), column.getScale())) { throw new SQLExceptionInfo.Builder(
                             SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString())
                             .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build()
                             .buildException(); }
@@ -1001,8 +1001,8 @@ public class UpsertCompiler {
                                         + constantExpression.toString() + " in column " + column);
                         }
                         if (!column.getDataType().isSizeCompatible(ptr, value, constantExpression.getDataType(),
-                                constantExpression.getMaxLength(), constantExpression.getScale(), 
-                                column.getMaxLength(), column.getScale())) { 
+                                constantExpression.getSortOrder(), constantExpression.getMaxLength(), 
+                                constantExpression.getScale(), column.getMaxLength(), column.getScale())) { 
                             throw new SQLExceptionInfo.Builder(
                                 SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(column.getName().getString())
                                 .setMessage("value=" + constantExpression.toString()).build().buildException();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index 10d21d3..9ee0054 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -86,7 +86,6 @@ import org.apache.phoenix.index.PhoenixIndexCodec;
 import org.apache.phoenix.join.HashJoinInfo;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServicesOptions;
-import org.apache.phoenix.schema.ConstraintViolationException;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PRow;
 import org.apache.phoenix.schema.PTable;
@@ -109,7 +108,6 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.ScanUtil;
-import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TimeKeeper;
@@ -403,205 +401,199 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                     if (!results.isEmpty()) {
                         rowCount++;
                         result.setKeyValues(results);
-                        try {
-                            if (isDescRowKeyOrderUpgrade) {
-                                Arrays.fill(values, null);
-                                Cell firstKV = results.get(0);
-                                RowKeySchema schema = projectedTable.getRowKeySchema();
-                                int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
-                                for (int i = 0; i < schema.getFieldCount(); i++) {
-                                    Boolean hasValue = schema.next(ptr, i, maxOffset);
-                                    if (hasValue == null) {
-                                        break;
+                        if (isDescRowKeyOrderUpgrade) {
+                            Arrays.fill(values, null);
+                            Cell firstKV = results.get(0);
+                            RowKeySchema schema = projectedTable.getRowKeySchema();
+                            int maxOffset = schema.iterator(firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), ptr);
+                            for (int i = 0; i < schema.getFieldCount(); i++) {
+                                Boolean hasValue = schema.next(ptr, i, maxOffset);
+                                if (hasValue == null) {
+                                    break;
+                                }
+                                Field field = schema.getField(i);
+                                if (field.getSortOrder() == SortOrder.DESC) {
+                                    // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
+                                    if (field.getDataType().isArrayType()) {
+                                        field.getDataType().coerceBytes(ptr, null, field.getDataType(),
+                                            field.getMaxLength(), field.getScale(), field.getSortOrder(), 
+                                            field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
                                     }
-                                    Field field = schema.getField(i);
-                                    if (field.getSortOrder() == SortOrder.DESC) {
-                                        // Special case for re-writing DESC ARRAY, as the actual byte value needs to change in this case
-                                        if (field.getDataType().isArrayType()) {
-                                            field.getDataType().coerceBytes(ptr, null, field.getDataType(),
-                                                field.getMaxLength(), field.getScale(), field.getSortOrder(), 
-                                                field.getMaxLength(), field.getScale(), field.getSortOrder(), true); // force to use correct separator byte
-                                        }
-                                        // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
-                                        else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
-                                            int len = ptr.getLength();
-                                            while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
-                                                len--;
-                                            }
-                                            ptr.set(ptr.get(), ptr.getOffset(), len);
-                                            // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
-                                        } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
-                                            byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
-                                            ptr.set(invertedBytes);
-                                        }
-                                    } else if (field.getDataType() == PBinary.INSTANCE) {
-                                        // Remove trailing space characters so that the setValues call below will replace them
-                                        // with the correct zero byte character. Note this is somewhat dangerous as these
-                                        // could be legit, but I don't know what the alternative is.
+                                    // Special case for re-writing DESC CHAR or DESC BINARY, to force the re-writing of trailing space characters
+                                    else if (field.getDataType() == PChar.INSTANCE || field.getDataType() == PBinary.INSTANCE) {
                                         int len = ptr.getLength();
                                         while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
                                             len--;
                                         }
-                                        ptr.set(ptr.get(), ptr.getOffset(), len);                                        
+                                        ptr.set(ptr.get(), ptr.getOffset(), len);
+                                        // Special case for re-writing DESC FLOAT and DOUBLE, as they're not inverted like they should be (PHOENIX-2171)
+                                    } else if (field.getDataType() == PFloat.INSTANCE || field.getDataType() == PDouble.INSTANCE) {
+                                        byte[] invertedBytes = SortOrder.invert(ptr.get(), ptr.getOffset(), ptr.getLength());
+                                        ptr.set(invertedBytes);
                                     }
-                                    values[i] = ptr.copyBytes();
-                                }
-                                writeToTable.newKey(ptr, values);
-                                if (Bytes.compareTo(
-                                    firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), 
-                                    ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
-                                    continue;
-                                }
-                                byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
-                                if (offset > 0) { // for local indexes (prepend region start key)
-                                    byte[] newRowWithOffset = new byte[offset + newRow.length];
-                                    System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
-                                    System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
-                                    newRow = newRowWithOffset;
+                                } else if (field.getDataType() == PBinary.INSTANCE) {
+                                    // Remove trailing space characters so that the setValues call below will replace them
+                                    // with the correct zero byte character. Note this is somewhat dangerous as these
+                                    // could be legit, but I don't know what the alternative is.
+                                    int len = ptr.getLength();
+                                    while (len > 0 && ptr.get()[ptr.getOffset() + len - 1] == StringUtil.SPACE_UTF8) {
+                                        len--;
+                                    }
+                                    ptr.set(ptr.get(), ptr.getOffset(), len);                                        
                                 }
-                                byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
-                                for (Cell cell : results) {
-                                    // Copy existing cell but with new row key
-                                    Cell newCell = new KeyValue(newRow, 0, newRow.length,
+                                values[i] = ptr.copyBytes();
+                            }
+                            writeToTable.newKey(ptr, values);
+                            if (Bytes.compareTo(
+                                firstKV.getRowArray(), firstKV.getRowOffset() + offset, firstKV.getRowLength(), 
+                                ptr.get(),ptr.getOffset() + offset,ptr.getLength()) == 0) {
+                                continue;
+                            }
+                            byte[] newRow = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                            if (offset > 0) { // for local indexes (prepend region start key)
+                                byte[] newRowWithOffset = new byte[offset + newRow.length];
+                                System.arraycopy(firstKV.getRowArray(), firstKV.getRowOffset(), newRowWithOffset, 0, offset);;
+                                System.arraycopy(newRow, 0, newRowWithOffset, offset, newRow.length);
+                                newRow = newRowWithOffset;
+                            }
+                            byte[] oldRow = Bytes.copy(firstKV.getRowArray(), firstKV.getRowOffset(), firstKV.getRowLength());
+                            for (Cell cell : results) {
+                                // Copy existing cell but with new row key
+                                Cell newCell = new KeyValue(newRow, 0, newRow.length,
+                                    cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
+                                    cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
+                                    cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
+                                    cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
+                                switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
+                                case Put:
+                                    // If Put, point delete old Put
+                                    Delete del = new Delete(oldRow);
+                                    del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
                                         cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
-                                        cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength(),
-                                        cell.getTimestamp(), KeyValue.Type.codeToType(cell.getTypeByte()),
-                                        cell.getValueArray(), cell.getValueOffset(), cell.getValueLength());
-                                    switch (KeyValue.Type.codeToType(cell.getTypeByte())) {
-                                    case Put:
-                                        // If Put, point delete old Put
-                                        Delete del = new Delete(oldRow);
-                                        del.addDeleteMarker(new KeyValue(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(),
-                                            cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength(),
-                                            cell.getQualifierArray(), cell.getQualifierOffset(),
-                                            cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
-                                            ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
-                                        mutations.add(del);
+                                        cell.getQualifierArray(), cell.getQualifierOffset(),
+                                        cell.getQualifierLength(), cell.getTimestamp(), KeyValue.Type.Delete,
+                                        ByteUtil.EMPTY_BYTE_ARRAY, 0, 0));
+                                    mutations.add(del);
 
-                                        Put put = new Put(newRow);
-                                        put.add(newCell);
-                                        mutations.add(put);
-                                        break;
-                                    case Delete:
-                                    case DeleteColumn:
-                                    case DeleteFamily:
-                                    case DeleteFamilyVersion:
-                                        Delete delete = new Delete(newRow);
-                                        delete.addDeleteMarker(newCell);
-                                        mutations.add(delete);
-                                        break;
-                                    }
+                                    Put put = new Put(newRow);
+                                    put.add(newCell);
+                                    mutations.add(put);
+                                    break;
+                                case Delete:
+                                case DeleteColumn:
+                                case DeleteFamily:
+                                case DeleteFamilyVersion:
+                                    Delete delete = new Delete(newRow);
+                                    delete.addDeleteMarker(newCell);
+                                    mutations.add(delete);
+                                    break;
                                 }
-                            } else if (buildLocalIndex) {
-                                for (IndexMaintainer maintainer : indexMaintainers) {
-                                    if (!results.isEmpty()) {
-                                        result.getKey(ptr);
-                                        ValueGetter valueGetter =
-                                                maintainer.createGetterFromKeyValues(
-                                                    ImmutableBytesPtr.copyBytesIfNecessary(ptr),
-                                                    results);
-                                        Put put = maintainer.buildUpdateMutation(kvBuilder,
-                                            valueGetter, ptr, results.get(0).getTimestamp(),
-                                            env.getRegion().getRegionInfo().getStartKey(),
-                                            env.getRegion().getRegionInfo().getEndKey());
-                                        indexMutations.add(put);
-                                    }
+                            }
+                        } else if (buildLocalIndex) {
+                            for (IndexMaintainer maintainer : indexMaintainers) {
+                                if (!results.isEmpty()) {
+                                    result.getKey(ptr);
+                                    ValueGetter valueGetter =
+                                            maintainer.createGetterFromKeyValues(
+                                                ImmutableBytesPtr.copyBytesIfNecessary(ptr),
+                                                results);
+                                    Put put = maintainer.buildUpdateMutation(kvBuilder,
+                                        valueGetter, ptr, results.get(0).getTimestamp(),
+                                        env.getRegion().getRegionInfo().getStartKey(),
+                                        env.getRegion().getRegionInfo().getEndKey());
+                                    indexMutations.add(put);
                                 }
-                                result.setKeyValues(results);
-                            } else if (isDelete) {
-                                // FIXME: the version of the Delete constructor without the lock
-                                // args was introduced in 0.94.4, thus if we try to use it here
-                                // we can no longer use the 0.94.2 version of the client.
-                                Cell firstKV = results.get(0);
-                                Delete delete = new Delete(firstKV.getRowArray(),
-                                    firstKV.getRowOffset(), firstKV.getRowLength(),ts);
-                                mutations.add(delete);
-                                // force tephra to ignore this deletes
-                                delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
-                            } else if (isUpsert) {
-                                Arrays.fill(values, null);
-                                int i = 0;
-                                List<PColumn> projectedColumns = projectedTable.getColumns();
-                                for (; i < projectedTable.getPKColumns().size(); i++) {
-                                    Expression expression = selectExpressions.get(i);
-                                    if (expression.evaluate(result, ptr)) {
-                                        values[i] = ptr.copyBytes();
-                                        // If SortOrder from expression in SELECT doesn't match the
-                                        // column being projected into then invert the bits.
-                                        if (expression.getSortOrder() !=
-                                                projectedColumns.get(i).getSortOrder()) {
-                                            SortOrder.invert(values[i], 0, values[i], 0,
-                                                values[i].length);
-                                        }
+                            }
+                            result.setKeyValues(results);
+                        } else if (isDelete) {
+                            // FIXME: the version of the Delete constructor without the lock
+                            // args was introduced in 0.94.4, thus if we try to use it here
+                            // we can no longer use the 0.94.2 version of the client.
+                            Cell firstKV = results.get(0);
+                            Delete delete = new Delete(firstKV.getRowArray(),
+                                firstKV.getRowOffset(), firstKV.getRowLength(),ts);
+                            mutations.add(delete);
+                            // force tephra to ignore this deletes
+                            delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
+                        } else if (isUpsert) {
+                            Arrays.fill(values, null);
+                            int i = 0;
+                            List<PColumn> projectedColumns = projectedTable.getColumns();
+                            for (; i < projectedTable.getPKColumns().size(); i++) {
+                                Expression expression = selectExpressions.get(i);
+                                if (expression.evaluate(result, ptr)) {
+                                    values[i] = ptr.copyBytes();
+                                    // If SortOrder from expression in SELECT doesn't match the
+                                    // column being projected into then invert the bits.
+                                    if (expression.getSortOrder() !=
+                                            projectedColumns.get(i).getSortOrder()) {
+                                        SortOrder.invert(values[i], 0, values[i], 0,
+                                            values[i].length);
                                     }
                                 }
-                                projectedTable.newKey(ptr, values);
-                                PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
-                                for (; i < projectedColumns.size(); i++) {
-                                    Expression expression = selectExpressions.get(i);
-                                    if (expression.evaluate(result, ptr)) {
-                                        PColumn column = projectedColumns.get(i);
-                                        Object value = expression.getDataType()
-                                                .toObject(ptr, column.getSortOrder());
-                                        // We are guaranteed that the two column will have the
-                                        // same type.
-                                        if (!column.getDataType().isSizeCompatible(ptr, value,
-                                            column.getDataType(), expression.getMaxLength(),
-                                            expression.getScale(), column.getMaxLength(),
-                                            column.getScale())) {
-                                            throw new DataExceedsCapacityException(
-                                                column.getDataType(), column.getMaxLength(),
-                                                column.getScale());
-                                        }
-                                        column.getDataType().coerceBytes(ptr, value,
-                                            expression.getDataType(), expression.getMaxLength(),
-                                            expression.getScale(), expression.getSortOrder(), 
-                                            column.getMaxLength(), column.getScale(),
-                                            column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
-                                        byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
-                                        row.setValue(column, bytes);
+                            }
+                            projectedTable.newKey(ptr, values);
+                            PRow row = projectedTable.newRow(kvBuilder, ts, ptr, false);
+                            for (; i < projectedColumns.size(); i++) {
+                                Expression expression = selectExpressions.get(i);
+                                if (expression.evaluate(result, ptr)) {
+                                    PColumn column = projectedColumns.get(i);
+                                    if (!column.getDataType().isSizeCompatible(ptr, null,
+                                        expression.getDataType(), expression.getSortOrder(),
+                                        expression.getMaxLength(), expression.getScale(),
+                                        column.getMaxLength(), column.getScale())) {
+                                        throw new DataExceedsCapacityException(
+                                            column.getDataType(), column.getMaxLength(),
+                                            column.getScale(), column.getName().getString(), ptr);
                                     }
-                                }
-                                for (Mutation mutation : row.toRowMutations()) {
-                                    mutations.add(mutation);
-                                }
-                                for (i = 0; i < selectExpressions.size(); i++) {
-                                    selectExpressions.get(i).reset();
-                                }
-                            } else if (deleteCF != null && deleteCQ != null) {
-                                // No need to search for delete column, since we project only it
-                                // if no empty key value is being set
-                                if (emptyCF == null ||
-                                        result.getValue(deleteCF, deleteCQ) != null) {
-                                    Delete delete = new Delete(results.get(0).getRowArray(),
-                                        results.get(0).getRowOffset(),
-                                        results.get(0).getRowLength());
-                                    delete.deleteColumns(deleteCF,  deleteCQ, ts);
-                                    // force tephra to ignore this deletes
-                                    delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
-                                    mutations.add(delete);
+                                    column.getDataType().coerceBytes(ptr, null,
+                                        expression.getDataType(), expression.getMaxLength(),
+                                        expression.getScale(), expression.getSortOrder(), 
+                                        column.getMaxLength(), column.getScale(),
+                                        column.getSortOrder(), projectedTable.rowKeyOrderOptimizable());
+                                    byte[] bytes = ByteUtil.copyKeyBytesIfNecessary(ptr);
+                                    row.setValue(column, bytes);
                                 }
                             }
-                            if (emptyCF != null) {
-                                /*
-                                 * If we've specified an emptyCF, then we need to insert an empty
-                                 * key value "retroactively" for any key value that is visible at
-                                 * the timestamp that the DDL was issued. Key values that are not
-                                 * visible at this timestamp will not ever be projected up to
-                                 * scans past this timestamp, so don't need to be considered.
-                                 * We insert one empty key value per row per timestamp.
-                                 */
-                                Set<Long> timeStamps =
-                                        Sets.newHashSetWithExpectedSize(results.size());
-                                for (Cell kv : results) {
-                                    long kvts = kv.getTimestamp();
-                                    if (!timeStamps.contains(kvts)) {
-                                        Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
-                                            kv.getRowLength());
-                                        put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
-                                            ByteUtil.EMPTY_BYTE_ARRAY);
-                                        mutations.add(put);
-                                    }
+                            for (Mutation mutation : row.toRowMutations()) {
+                                mutations.add(mutation);
+                            }
+                            for (i = 0; i < selectExpressions.size(); i++) {
+                                selectExpressions.get(i).reset();
+                            }
+                        } else if (deleteCF != null && deleteCQ != null) {
+                            // No need to search for delete column, since we project only it
+                            // if no empty key value is being set
+                            if (emptyCF == null ||
+                                    result.getValue(deleteCF, deleteCQ) != null) {
+                                Delete delete = new Delete(results.get(0).getRowArray(),
+                                    results.get(0).getRowOffset(),
+                                    results.get(0).getRowLength());
+                                delete.deleteColumns(deleteCF,  deleteCQ, ts);
+                                // force tephra to ignore this deletes
+                                delete.setAttribute(TxConstants.TX_ROLLBACK_ATTRIBUTE_KEY, new byte[0]);
+                                mutations.add(delete);
+                            }
+                        }
+                        if (emptyCF != null) {
+                            /*
+                             * If we've specified an emptyCF, then we need to insert an empty
+                             * key value "retroactively" for any key value that is visible at
+                             * the timestamp that the DDL was issued. Key values that are not
+                             * visible at this timestamp will not ever be projected up to
+                             * scans past this timestamp, so don't need to be considered.
+                             * We insert one empty key value per row per timestamp.
+                             */
+                            Set<Long> timeStamps =
+                                    Sets.newHashSetWithExpectedSize(results.size());
+                            for (Cell kv : results) {
+                                long kvts = kv.getTimestamp();
+                                if (!timeStamps.contains(kvts)) {
+                                    Put put = new Put(kv.getRowArray(), kv.getRowOffset(),
+                                        kv.getRowLength());
+                                    put.add(emptyCF, QueryConstants.EMPTY_COLUMN_BYTES, kvts,
+                                        ByteUtil.EMPTY_BYTE_ARRAY);
+                                    mutations.add(put);
                                 }
                             }
                             // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
@@ -617,13 +609,6 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
                                 commitBatch(region, indexMutations, null, blockingMemStoreSize, null, txState);
                                 indexMutations.clear();
                             }
-                        } catch (ConstraintViolationException e) {
-                            // Log and ignore in count
-                            logger.error(LogUtil.addCustomAnnotations("Failed to create row in " +
-                                    region.getRegionInfo().getRegionNameAsString() + " with values " +
-                                    SchemaUtil.toString(values),
-                                    ScanUtil.getCustomAnnotations(scan)), e);
-                            continue;
                         }
                         aggregators.aggregate(rowAggregators, result);
                         hasAny = true;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java
index 0ee81a0..a12c8a0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/DataExceedsCapacityException.java
@@ -17,8 +17,10 @@
  */
 package org.apache.phoenix.exception;
 
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.schema.IllegalDataException;
 import org.apache.phoenix.schema.types.PDataType;
+import org.apache.phoenix.util.SchemaUtil;
 
 
 public class DataExceedsCapacityException extends IllegalDataException {
@@ -29,12 +31,16 @@ public class DataExceedsCapacityException extends IllegalDataException {
                 SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setMessage(message).build().buildException());
     }
     
+    public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale, String columnName, ImmutableBytesWritable value) {
+        super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
+            .setMessage((columnName == null ? "" : columnName + " ") + getTypeDisplayString(type, precision, scale, value))
+            .build().buildException());
+    }
     public DataExceedsCapacityException(PDataType type, Integer precision, Integer scale) {
-        super(new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setMessage(getTypeDisplayString(type, precision, scale))
-                .build().buildException());
+        this(type, precision, scale, null, null);
     }
 
-    private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale) {
-        return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale + ")"));
+    private static String getTypeDisplayString(PDataType type, Integer precision, Integer scale, ImmutableBytesWritable value) {
+        return type.toString() + "(" + precision + (scale == null ? "" : ("," + scale)) + ")" + (value == null || value.getLength() == 0 ? "" : (" value="+SchemaUtil.toString(type, value)));
     }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java
index 50dffde..1c3694d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionInfo.java
@@ -120,9 +120,14 @@ public class SQLExceptionInfo {
 
     @Override
     public String toString() {
-        StringBuilder builder = new StringBuilder(code.toString());
+        String baseMessage = code.toString();
+        StringBuilder builder = new StringBuilder(baseMessage);
         if (message != null) {
-            builder.append(" ").append(message);
+            if (message.startsWith(baseMessage)) {
+                builder.append(message.substring(baseMessage.length()));
+            } else {
+                builder.append(" ").append(message);
+            }
         }
         if (functionName != null) {
             builder.append(" ").append(FUNCTION_NAME).append("=").append(functionName);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
index 77790b9..85655c6 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayConcatFunction.java
@@ -22,6 +22,7 @@ import java.util.List;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.parse.FunctionParseNode;
+import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TypeMismatchException;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PArrayDataType;
@@ -52,16 +53,18 @@ public class ArrayConcatFunction extends ArrayModifierFunction {
         }
         boolean isLHSRowKeyOrderOptimized = PArrayDataType.isRowKeyOrderOptimized(getLHSExpr().getDataType(), getLHSExpr().getSortOrder(), ptr);
 
+        SortOrder sortOrder = getRHSExpr().getSortOrder();
         int actualLengthOfArray1 = Math.abs(PArrayDataType.getArrayLength(ptr, getLHSBaseType(), getLHSExpr().getMaxLength()));
         int lengthArray1 = ptr.getLength();
         int offsetArray1 = ptr.getOffset();
         byte[] array1Bytes = ptr.get();
         if (!getRHSExpr().evaluate(tuple, ptr)|| ptr.getLength() == 0){
+            sortOrder = getLHSExpr().getSortOrder();
             ptr.set(array1Bytes, offsetArray1, lengthArray1);
             return true;
         }
 
-        checkSizeCompatibility(ptr, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(),getRHSExpr().getDataType());
+        checkSizeCompatibility(ptr, sortOrder, getLHSExpr(), getLHSExpr().getDataType(), getRHSExpr(),getRHSExpr().getDataType());
 
         // FIXME: calling version of coerceBytes that takes into account the separator used by LHS
         // If the RHS does not have the same separator, it'll be coerced to use it. It's unclear

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
index 9bd7372..bcf2a5a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/expression/function/ArrayModifierFunction.java
@@ -104,7 +104,7 @@ public abstract class ArrayModifierFunction extends ScalarFunction {
 
         otherExpr.evaluate(tuple, ptr);
 
-        checkSizeCompatibility(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType);
+        checkSizeCompatibility(ptr, otherExpr.getSortOrder(), arrayExpr, baseDataType, otherExpr, otherExpressionType);
         coerceBytes(ptr, arrayExpr, baseDataType, otherExpr, otherExpressionType);
         return modifierFunction(ptr, length, offset, arrayBytes, baseDataType, arrayLength, getMaxLength(),
                 arrayExpr);
@@ -117,11 +117,11 @@ public abstract class ArrayModifierFunction extends ScalarFunction {
         return false;
     }
 
-    protected void checkSizeCompatibility(ImmutableBytesWritable ptr, Expression arrayExpr,
+    protected void checkSizeCompatibility(ImmutableBytesWritable ptr, SortOrder sortOrder, Expression arrayExpr,
                                           PDataType baseDataType, Expression otherExpr, PDataType otherExpressionType) {
         if (!baseDataType.isSizeCompatible(ptr, null, otherExpressionType,
-                otherExpr.getMaxLength(), otherExpr.getScale(), arrayExpr.getMaxLength(),
-                arrayExpr.getScale())) {
+                sortOrder, otherExpr.getMaxLength(), otherExpr.getScale(),
+                arrayExpr.getMaxLength(), arrayExpr.getScale())) {
             throw new DataExceedsCapacityException("Values are not size compatible");
         }
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
index d6adc71..ac1e2e4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexBuilder.java
@@ -223,8 +223,8 @@ public class PhoenixIndexBuilder extends NonTxIndexBuilder {
                     // We are guaranteed that the two column will have the
                     // same type.
                     if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
-                            expression.getMaxLength(), expression.getScale(), column.getMaxLength(),
-                            column.getScale())) {
+                            expression.getSortOrder(), expression.getMaxLength(), expression.getScale(),
+                            column.getMaxLength(), column.getScale())) {
                         throw new DataExceedsCapacityException(column.getDataType(), column.getMaxLength(),
                             column.getScale());
                     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
index 4b148dd..0be7c16 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/parse/ColumnDef.java
@@ -281,8 +281,8 @@ public class ColumnDef {
             throw e;
         }
         if (!targetType.isSizeCompatible(ptr, defaultValue.getValue(), sourceType, 
-                defaultValue.getMaxLength(), defaultValue.getScale(), 
-                this.getMaxLength(), this.getScale())) {
+                sortOrder, defaultValue.getMaxLength(), 
+                defaultValue.getScale(), this.getMaxLength(), this.getScale())) {
             throw new SQLExceptionInfo.Builder(
                     SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(this.getColumnDefName().getColumnName())
                     .setMessage("DEFAULT " + this.getExpression()).build()

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
index 627740b..98a0b99 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java
@@ -66,7 +66,6 @@ import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.SizedUtil;
-import org.apache.phoenix.util.StringUtil;
 import org.apache.phoenix.util.TrustedByteArrayOutputStream;
 import org.apache.tephra.TxConstants;
 
@@ -671,19 +670,14 @@ public class PTableImpl implements PTable {
                     throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
                 }
                 Integer	maxLength = column.getMaxLength();
-                if (maxLength != null && type.isFixedWidth() && byteValue.length < maxLength) {
-                    if (rowKeyOrderOptimizable()) {
-                        key.set(byteValue);
-                        type.pad(key, maxLength, sortOrder);
-                        byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
-                    } else {
-                        // TODO: remove this incorrect code and move StringUtil.padChar() to TestUtil
-                        // once we require tables to have been upgraded
-                        byteValue = StringUtil.padChar(byteValue, maxLength);
-                    }
-                } else if (maxLength != null && !type.isArrayType() && byteValue.length > maxLength) {
-                    throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + SchemaUtil.toString(type, byteValue) + ")");
+                Integer scale = column.getScale();
+                key.set(byteValue);
+                if (!type.isSizeCompatible(key, null, type, sortOrder, null, null, maxLength, scale)) {
+                    throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
                 }
+                key.set(byteValue);
+                type.pad(key, maxLength, sortOrder);
+                byteValue = ByteUtil.copyKeyBytesIfNecessary(key);
                 os.write(byteValue, 0, byteValue.length);
             }
             // Need trailing byte for DESC columns
@@ -853,11 +847,14 @@ public class PTableImpl implements PTable {
             byte[] qualifier = column.getName().getBytes();
             PDataType<?> type = column.getDataType();
             // Check null, since some types have no byte representation for null
+            if (byteValue == null) {
+                byteValue = ByteUtil.EMPTY_BYTE_ARRAY;
+            }
             boolean isNull = type.isNull(byteValue);
             if (isNull && !column.isNullable()) {
-                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + " may not be null");
-            } else if (isNull && PTableImpl.this.isImmutableRows()
-                    && column.getExpressionStr() == null) {
+                throw new ConstraintViolationException(name.getString() + "." + column.getName().getString() + 
+                        " may not be null");
+            } else if (isNull && PTableImpl.this.isImmutableRows() && column.getExpressionStr() == null) {
                 // Store nulls for immutable tables otherwise default value would be used
                 removeIfPresent(setValues, family, qualifier);
                 removeIfPresent(unsetValues, family, qualifier);
@@ -869,16 +866,16 @@ public class PTableImpl implements PTable {
                 deleteQuietly(unsetValues, kvBuilder, kvBuilder.buildDeleteColumns(keyPtr, column
                             .getFamilyName().getBytesPtr(), column.getName().getBytesPtr(), ts));
             } else {
-                ImmutableBytesWritable ptr = new ImmutableBytesWritable(byteValue == null ?
-                        HConstants.EMPTY_BYTE_ARRAY : byteValue);
+                ImmutableBytesWritable ptr = new ImmutableBytesWritable(byteValue);
                 Integer	maxLength = column.getMaxLength();
-            	if (!isNull && type.isFixedWidth() && maxLength != null) {
-    				if (ptr.getLength() < maxLength) {
-                        type.pad(ptr, maxLength, column.getSortOrder());
-                    } else if (ptr.getLength() > maxLength) {
-                        throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + " may not exceed " + maxLength + " bytes (" + type.toObject(byteValue) + ")");
-                    }
-            	}
+                Integer scale = column.getScale();
+                SortOrder sortOrder = column.getSortOrder();
+                if (!type.isSizeCompatible(ptr, null, type, sortOrder, null, null, maxLength, scale)) {
+                    throw new DataExceedsCapacityException(name.getString() + "." + column.getName().getString() + 
+                            " may not exceed " + maxLength + " (" + SchemaUtil.toString(type, byteValue) + ")");
+                }
+                ptr.set(byteValue);
+                type.pad(ptr, maxLength, sortOrder);
                 removeIfPresent(unsetValues, family, qualifier);
                 addQuietly(setValues, kvBuilder, kvBuilder.buildPut(keyPtr,
                         column.getFamilyName().getBytesPtr(), column.getName().getBytesPtr(),

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
index eb1a7ff..1d2cfb2 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PArrayDataType.java
@@ -256,15 +256,18 @@ public abstract class PArrayDataType<T> extends PDataType<T> {
     }
 
     @Override
-    public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, Integer maxLength,
-            Integer scale, Integer desiredMaxLength, Integer desiredScale) {
+    public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, SortOrder sortOrder,
+            Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) {
         if (value == null) return true;
         PhoenixArray pArr = (PhoenixArray)value;
         PDataType baseType = PDataType.fromTypeId(srcType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
+        // Since we only have a value and no byte[], use an empty length byte[] as otherwise
+        // isSizeCompatible will attempt to interpret the array ptr as a ptr to an element.
+        ImmutableBytesWritable elementPtr = new ImmutableBytesWritable(ByteUtil.EMPTY_BYTE_ARRAY);
         for (int i = 0; i < pArr.numElements; i++) {
             Object val = pArr.getElement(i);
-            if (!baseType.isSizeCompatible(ptr, val, baseType, srcType.getMaxLength(val), scale, desiredMaxLength,
-                    desiredScale)) { return false; }
+            if (!baseType.isSizeCompatible(elementPtr, val, baseType, sortOrder, srcType.getMaxLength(val), scale,
+                    desiredMaxLength, desiredScale)) { return false; }
         }
         return true;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
index 7b4aa38..43906f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinary.java
@@ -30,26 +30,26 @@ import org.apache.phoenix.schema.SortOrder;
 
 public class PBinary extends PBinaryBase {
 
-  public static final PBinary INSTANCE = new PBinary();
+    public static final PBinary INSTANCE = new PBinary();
 
-  private PBinary() {
-    super("BINARY", Types.BINARY, byte[].class, null, 23);
-  }
+    private PBinary() {
+        super("BINARY", Types.BINARY, byte[].class, null, 23);
+    }
 
-  @Override
-  public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength,
-          Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale,
-          SortOrder expectedModifier) {
-    PVarbinary.INSTANCE.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, expectedModifier);
-    if (null != desiredMaxLength && null != expectedModifier) {
-      pad(ptr, desiredMaxLength, expectedModifier);
+    @Override
+    public void coerceBytes(ImmutableBytesWritable ptr, Object o, PDataType actualType, Integer actualMaxLength,
+            Integer actualScale, SortOrder actualModifier, Integer desiredMaxLength, Integer desiredScale,
+            SortOrder expectedModifier) {
+        PVarbinary.INSTANCE.coerceBytes(ptr, o, actualType, actualMaxLength, actualScale, actualModifier, desiredMaxLength, desiredScale, expectedModifier);
+        if (null != desiredMaxLength && null != expectedModifier) {
+            pad(ptr, desiredMaxLength, expectedModifier);
+        }
     }
-  }
 
-  @Override
-  public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) {
-      if (b == null || b.length >= maxLength) {
-          return b;
+    @Override
+    public byte[] pad(byte[] b, Integer maxLength, SortOrder sortOrder) {
+        if (b == null || b.length >= maxLength) {
+            return b;
         }
         byte[] newBytes = new byte[maxLength];
         System.arraycopy(b, 0, newBytes, 0, b.length);
@@ -57,164 +57,152 @@ public class PBinary extends PBinaryBase {
             Arrays.fill(newBytes, b.length, maxLength, QueryConstants.DESC_SEPARATOR_BYTE);
         }
         return newBytes;
-  }
-  
-  @Override
-  public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {
-    if (ptr.getLength() >= maxLength) {
-      return;
-    }
-    byte[] newBytes = new byte[maxLength];
-    System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength());
-    if (sortOrder == SortOrder.DESC) {
-        Arrays.fill(newBytes, ptr.getLength(), maxLength, QueryConstants.DESC_SEPARATOR_BYTE);
-    }
-    ptr.set(newBytes);
-  }
-
-  @Override
-  public Object pad(Object object, Integer maxLength) {
-    byte[] b = (byte[]) object;
-    int length = (b == null ? 0 : b.length);
-    if (length == maxLength) {
-      return object;
-    }
-    if (length > maxLength) {
-      throw new DataExceedsCapacityException(this, maxLength, null);
-    }
-    byte[] newBytes = new byte[maxLength];
-    System.arraycopy(b, 0, newBytes, 0, length);
-
-    return newBytes;
-  }
-
-  @Override
-  public byte[] toBytes(Object object) { // Delegate to VARBINARY
-    if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
-    }
-    return PVarbinary.INSTANCE.toBytes(object);
-  }
-
-  @Override
-  public int toBytes(Object object, byte[] bytes, int offset) {
-    if (object == null) {
-      throw newIllegalDataException(this + " may not be null");
-    }
-    return PVarbinary.INSTANCE.toBytes(object, bytes, offset);
-
-  }
-
-  @Override
-  public byte[] toBytes(Object object, SortOrder sortOrder) {
-    byte[] bytes = toBytes(object);
-    if (sortOrder == SortOrder.DESC) {
-      return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length);
-    }
-    return bytes;
-  }
-
-  @Override
-  public Object toObject(byte[] bytes, int offset, int length, PDataType actualType,
-      SortOrder sortOrder, Integer maxLength, Integer scale) {
-    if (!actualType.isCoercibleTo(this)) {
-      throwConstraintViolationException(actualType, this);
-    }
-    return PVarbinary.INSTANCE.toObject(bytes, offset, length, actualType, sortOrder);
-  }
-
-  @Override
-  public Object toObject(Object object, PDataType actualType) {
-    return actualType.toBytes(object);
-  }
-
-  @Override
-  public boolean isFixedWidth() {
-    return true;
-  }
-
-  @Override
-  public int estimateByteSize(Object o) {
-    byte[] value = (byte[]) o;
-    return value == null ? 1 : value.length;
-  }
-
-  @Override
-  public boolean isCoercibleTo(PDataType targetType) {
-    return equalsAny(targetType, this, PVarbinary.INSTANCE);
-  }
-
-  @Override
-  public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType,
-      Integer maxLength, Integer scale, Integer desiredMaxLength,
-      Integer desiredScale) {
-    if (ptr.getLength() != 0 && (
-        (srcType.equals(PVarbinary.INSTANCE) && ((String) value).length() != ptr.getLength()) ||
-            (maxLength != null && desiredMaxLength != null && maxLength > desiredMaxLength))) {
-      return false;
-    }
-    return true;
-  }
-
-  @Override
-  public Integer estimateByteSizeFromLength(Integer length) {
-    return length;
-  }
-
-  @Override
-  public Integer getByteSize() {
-    return null;
-  }
-
-  @Override
-  public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
-    if (lhs == null && rhs == null) {
-      return 0;
-    } else if (lhs == null) {
-      return -1;
-    } else if (rhs == null) {
-      return 1;
-    }
-    if (equalsAny(rhsType, PVarbinary.INSTANCE, PBinary.INSTANCE)) {
-      return Bytes.compareTo((byte[]) lhs, (byte[]) rhs);
-    } else {
-      byte[] rhsBytes = rhsType.toBytes(rhs);
-      return Bytes.compareTo((byte[]) lhs, rhsBytes);
-    }
-  }
-
-  @Override
-  public Integer getMaxLength(Object o) {
-    if (o == null) {
-      return null;
-    }
-    byte[] value = (byte[]) o;
-    return value.length;
-  }
-
-  @Override
-  public Object toObject(String value) {
-    if (value == null || value.length() == 0) {
-      return null;
-    }
-    return Base64.decode(value);
-  }
-
-  @Override
-  public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
-    if (length == 1) {
-      return Integer.toString(0xFF & b[offset]);
-    }
-    return PVarbinary.INSTANCE.toStringLiteral(b, offset, length, formatter);
-  }
-
-  @Override
-  public String toStringLiteral(Object o, Format formatter) {
-    return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter);
-  }
-
-  @Override
-  public Object getSampleValue(Integer maxLength, Integer arrayLength) {
-    return PVarbinary.INSTANCE.getSampleValue(maxLength, arrayLength);
-  }
+    }
+
+    @Override
+    public void pad(ImmutableBytesWritable ptr, Integer maxLength, SortOrder sortOrder) {
+        if (ptr.getLength() >= maxLength) {
+            return;
+        }
+        byte[] newBytes = new byte[maxLength];
+        System.arraycopy(ptr.get(), ptr.getOffset(), newBytes, 0, ptr.getLength());
+        if (sortOrder == SortOrder.DESC) {
+            Arrays.fill(newBytes, ptr.getLength(), maxLength, QueryConstants.DESC_SEPARATOR_BYTE);
+        }
+        ptr.set(newBytes);
+    }
+
+    @Override
+    public Object pad(Object object, Integer maxLength) {
+        byte[] b = (byte[]) object;
+        int length = (b == null ? 0 : b.length);
+        if (length == maxLength) {
+            return object;
+        }
+        if (length > maxLength) {
+            throw new DataExceedsCapacityException(this, maxLength, null);
+        }
+        byte[] newBytes = new byte[maxLength];
+        System.arraycopy(b, 0, newBytes, 0, length);
+
+        return newBytes;
+    }
+
+    @Override
+    public byte[] toBytes(Object object) { // Delegate to VARBINARY
+        if (object == null) {
+            throw newIllegalDataException(this + " may not be null");
+        }
+        return PVarbinary.INSTANCE.toBytes(object);
+    }
+
+    @Override
+    public int toBytes(Object object, byte[] bytes, int offset) {
+        if (object == null) {
+            throw newIllegalDataException(this + " may not be null");
+        }
+        return PVarbinary.INSTANCE.toBytes(object, bytes, offset);
+
+    }
+
+    @Override
+    public byte[] toBytes(Object object, SortOrder sortOrder) {
+        byte[] bytes = toBytes(object);
+        if (sortOrder == SortOrder.DESC) {
+            return SortOrder.invert(bytes, 0, new byte[bytes.length], 0, bytes.length);
+        }
+        return bytes;
+    }
+
+    @Override
+    public Object toObject(byte[] bytes, int offset, int length, PDataType actualType,
+            SortOrder sortOrder, Integer maxLength, Integer scale) {
+        if (!actualType.isCoercibleTo(this)) {
+            throwConstraintViolationException(actualType, this);
+        }
+        return PVarbinary.INSTANCE.toObject(bytes, offset, length, actualType, sortOrder);
+    }
+
+    @Override
+    public Object toObject(Object object, PDataType actualType) {
+        return actualType.toBytes(object);
+    }
+
+    @Override
+    public boolean isFixedWidth() {
+        return true;
+    }
+
+    @Override
+    public int estimateByteSize(Object o) {
+        byte[] value = (byte[]) o;
+        return value == null ? 1 : value.length;
+    }
+
+    @Override
+    public boolean isCoercibleTo(PDataType targetType) {
+        return equalsAny(targetType, this, PVarbinary.INSTANCE);
+    }
+
+    @Override
+    public Integer estimateByteSizeFromLength(Integer length) {
+        return length;
+    }
+
+    @Override
+    public Integer getByteSize() {
+        return null;
+    }
+
+    @Override
+    public int compareTo(Object lhs, Object rhs, PDataType rhsType) {
+        if (lhs == null && rhs == null) {
+            return 0;
+        } else if (lhs == null) {
+            return -1;
+        } else if (rhs == null) {
+            return 1;
+        }
+        if (equalsAny(rhsType, PVarbinary.INSTANCE, PBinary.INSTANCE)) {
+            return Bytes.compareTo((byte[]) lhs, (byte[]) rhs);
+        } else {
+            byte[] rhsBytes = rhsType.toBytes(rhs);
+            return Bytes.compareTo((byte[]) lhs, rhsBytes);
+        }
+    }
+
+    @Override
+    public Integer getMaxLength(Object o) {
+        if (o == null) {
+            return null;
+        }
+        byte[] value = (byte[]) o;
+        return value.length;
+    }
+
+    @Override
+    public Object toObject(String value) {
+        if (value == null || value.length() == 0) {
+            return null;
+        }
+        return Base64.decode(value);
+    }
+
+    @Override
+    public String toStringLiteral(byte[] b, int offset, int length, Format formatter) {
+        if (length == 1) {
+            return Integer.toString(0xFF & b[offset]);
+        }
+        return PVarbinary.INSTANCE.toStringLiteral(b, offset, length, formatter);
+    }
+
+    @Override
+    public String toStringLiteral(Object o, Format formatter) {
+        return toStringLiteral((byte[])o, 0, ((byte[]) o).length, formatter);
+    }
+
+    @Override
+    public Object getSampleValue(Integer maxLength, Integer arrayLength) {
+        return PVarbinary.INSTANCE.getSampleValue(maxLength, arrayLength);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java
index 0ad4ce1..562875d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PBinaryBase.java
@@ -95,4 +95,21 @@ public abstract class PBinaryBase extends PDataType<byte[]> {
         PInteger.INSTANCE.getCodec().encodeInt(length, bytes, 0);
         outPtr.set(bytes);
     }
+
+    @Override
+    public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType,
+        SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) {
+        if (ptr.getLength() != 0 && desiredMaxLength != null) {
+            if (maxLength == null) { // If not specified, compute
+                if (value != null && srcType instanceof PBinaryBase) { // Use value if provided
+                    maxLength = ((byte[])value).length;
+                } else { // Else use ptr, coercing (which is likely a noop)
+                    this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, desiredScale, sortOrder, true);
+                    maxLength = ptr.getLength();
+                }
+            }
+            return maxLength <= desiredMaxLength;
+        }
+        return true;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
index 2853bc4..fa97992 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PChar.java
@@ -153,8 +153,19 @@ public class PChar extends PDataType<String> {
 
     @Override
     public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType,
-        Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) {
-      return PVarchar.INSTANCE.isSizeCompatible(ptr, value, srcType, maxLength, scale, desiredMaxLength, desiredScale);
+        SortOrder sortOrder, Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) {
+        if (ptr.getLength() != 0 && desiredMaxLength != null) {
+            if (maxLength == null) {
+                if (value != null && srcType == INSTANCE) { // Use value if provided
+                    maxLength = ((String)value).length();
+                } else {
+                    this.coerceBytes(ptr, value, srcType, maxLength, scale, sortOrder, desiredMaxLength, desiredScale, sortOrder, true);
+                    maxLength = ptr.getLength(); // Only single byte characters
+                }
+            }
+            return maxLength <= desiredMaxLength;
+        }
+        return true;
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/phoenix/blob/a54a06cf/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
index 5d611e9..58018ac 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/types/PDataType.java
@@ -747,14 +747,15 @@ public abstract class PDataType<T> implements DataType<T>, Comparable<PDataType<
      * @param ptr bytes pointer for the value
      * @param value object representation of the value. May be null in which case ptr will be used
      * @param srcType the type of the value
+     * @param sortOrder the sort order of the value
      * @param maxLength the max length of the source value or null if not applicable
      * @param scale the scale of the source value or null if not applicable
      * @param desiredMaxLength the desired max length for the value to be coerced
      * @param desiredScale the desired scale for the value to be coerced 
      * @return true if the value may be coerced without losing precision and false otherwise.
      */
-    public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, Integer maxLength,
-            Integer scale, Integer desiredMaxLength, Integer desiredScale) {
+    public boolean isSizeCompatible(ImmutableBytesWritable ptr, Object value, PDataType srcType, SortOrder sortOrder,
+            Integer maxLength, Integer scale, Integer desiredMaxLength, Integer desiredScale) {
         return true;
     }