You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2017/01/25 21:55:04 UTC

[4/4] phoenix git commit: PHOENIX-3586 Add StorageScheme table property to allow users to specify their custom storage schemes

PHOENIX-3586 Add StorageScheme table property to allow users to specify their custom storage schemes


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/1f3f7323
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/1f3f7323
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/1f3f7323

Branch: refs/heads/encodecolumns2
Commit: 1f3f7323a54673bf3a10ffd0be96253a5cfcbb86
Parents: 61d9035
Author: Thomas D'Silva <td...@salesforce.com>
Authored: Mon Jan 9 19:22:19 2017 -0800
Committer: Thomas D'Silva <td...@salesforce.com>
Committed: Wed Jan 25 13:52:41 2017 -0800

----------------------------------------------------------------------
 .../AlterMultiTenantTableWithViewsIT.java       |  25 +-
 .../apache/phoenix/end2end/AlterTableIT.java    |  39 ++-
 .../phoenix/end2end/AlterTableWithViewsIT.java  |   4 +-
 .../phoenix/end2end/ImmutableTablePropIT.java   | 130 ---------
 .../end2end/ImmutableTablePropertiesIT.java     | 189 +++++++++++++
 .../phoenix/end2end/StatsCollectorIT.java       |  22 +-
 .../apache/phoenix/end2end/StoreNullsIT.java    |  18 +-
 .../apache/phoenix/end2end/index/IndexIT.java   |   1 -
 .../phoenix/compile/CreateTableCompiler.java    |  11 +-
 .../phoenix/compile/ExpressionCompiler.java     |   2 +-
 .../apache/phoenix/compile/FromCompiler.java    |   4 +-
 .../apache/phoenix/compile/JoinCompiler.java    |   6 +-
 .../compile/PostLocalIndexDDLCompiler.java      |   4 +-
 .../compile/TupleProjectionCompiler.java        |   4 +-
 .../apache/phoenix/compile/UnionCompiler.java   |   4 +-
 .../apache/phoenix/compile/WhereCompiler.java   |   4 +-
 .../coprocessor/BaseScannerRegionObserver.java  |   1 +
 .../GroupedAggregateRegionObserver.java         |   5 +-
 .../coprocessor/MetaDataEndpointImpl.java       |   4 +-
 .../phoenix/coprocessor/ScanRegionObserver.java |   6 +-
 .../UngroupedAggregateRegionObserver.java       |   3 +-
 .../phoenix/exception/SQLExceptionCode.java     |   2 +
 .../apache/phoenix/execute/BaseQueryPlan.java   |   4 +-
 .../apache/phoenix/execute/UnnestArrayPlan.java |   3 +-
 .../expression/ArrayColumnExpression.java       | 165 -----------
 .../expression/ArrayConstructorExpression.java  |  67 ++---
 .../phoenix/expression/ExpressionType.java      |   2 +-
 .../expression/SingleCellColumnExpression.java  | 171 ++++++++++++
 .../SingleCellConstructorExpression.java        | 102 +++++++
 .../function/ArrayElemRefExpression.java        |   4 +-
 .../expression/function/ArrayIndexFunction.java |   4 +-
 .../expression/util/regex/JONIPattern.java      |  18 +-
 .../visitor/BaseExpressionVisitor.java          |   6 +
 .../visitor/CloneExpressionVisitor.java         |  10 +-
 .../CloneNonDeterministicExpressionVisitor.java |   1 +
 .../expression/visitor/ExpressionVisitor.java   |   8 +-
 .../StatelessTraverseAllExpressionVisitor.java  |  10 +-
 .../StatelessTraverseNoExpressionVisitor.java   |  10 +-
 .../filter/MultiKeyValueComparisonFilter.java   |   2 +-
 .../filter/SingleKeyValueComparisonFilter.java  |   2 +-
 .../apache/phoenix/index/IndexMaintainer.java   |  55 ++--
 .../phoenix/iterate/BaseResultIterators.java    |   2 +-
 .../phoenix/jdbc/PhoenixDatabaseMetaData.java   |   4 +-
 .../query/ConnectionQueryServicesImpl.java      |   2 +-
 .../apache/phoenix/query/QueryConstants.java    |   4 +-
 .../org/apache/phoenix/query/QueryServices.java |   1 +
 .../phoenix/query/QueryServicesOptions.java     |   5 +-
 .../org/apache/phoenix/schema/ColumnRef.java    |   8 +-
 .../phoenix/schema/ColumnValueDecoder.java      |  31 +++
 .../phoenix/schema/ColumnValueEncoder.java      |  45 +++
 .../apache/phoenix/schema/DelegateTable.java    |   4 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  97 +++++--
 .../java/org/apache/phoenix/schema/PTable.java  |  62 ++++-
 .../org/apache/phoenix/schema/PTableImpl.java   |  80 +++---
 .../apache/phoenix/schema/TableProperty.java    |  23 +-
 .../tuple/EncodedColumnQualiferCellsList.java   |   3 +-
 .../phoenix/schema/types/PArrayDataType.java    | 272 +------------------
 .../schema/types/PArrayDataTypeDecoder.java     | 102 +++++++
 .../schema/types/PArrayDataTypeEncoder.java     | 170 ++++++++++++
 .../apache/phoenix/util/EncodedColumnsUtil.java |  35 ++-
 .../java/org/apache/phoenix/util/IndexUtil.java |  15 +-
 .../org/apache/phoenix/util/SchemaUtil.java     |   2 +-
 .../phoenix/execute/CorrelatePlanTest.java      |   4 +-
 .../execute/LiteralResultIteratorPlanTest.java  |   4 +-
 .../phoenix/execute/MutationStateTest.java      |   4 +-
 .../ArrayConstructorExpressionTest.java         | 102 +------
 .../schema/ImmutableStorageSchemeTest.java      | 182 +++++++++++++
 .../schema/types/PDataTypeForArraysTest.java    |  38 +--
 68 files changed, 1451 insertions(+), 982 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
index adadca7..506e503 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterMultiTenantTableWithViewsIT.java
@@ -476,14 +476,15 @@ public class AlterMultiTenantTableWithViewsIT extends ParallelStatsDisabledIT {
             // For a diverged view, only base table's pk column will be added and that too at the end.
             assertTableDefinition(conn, divergedView, PTableType.VIEW, baseTable, 2, 7, DIVERGED_VIEW_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V3", "VIEW_COL1", "VIEW_COL2", "PK2");
             
-            // Add existing column VIEW_COL2 to the base table
-            alterBaseTable = "ALTER TABLE " + baseTable + " ADD VIEW_COL2 CHAR(256)";
-            conn.createStatement().execute(alterBaseTable);
-            
-            // For the non-diverged view, adding the column VIEW_COL2 will end up changing its ordinal position in the view.
-            assertTableDefinition(conn, view1, PTableType.VIEW, baseTable, 2, 9, 8, "TENANT_ID", "PK1", "V1", "V2", "V3", "KV", "PK2", "VIEW_COL2", "VIEW_COL1");
-            // For the diverged view, adding the column VIEW_COL2 will not change its ordinal position in the view. It also won't change the base column count or the sequence number
-            assertTableDefinition(conn, divergedView, PTableType.VIEW, baseTable, 2, 7, DIVERGED_VIEW_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V3", "VIEW_COL1", "VIEW_COL2", "PK2");
+            // Adding existing column VIEW_COL2 to the base table isn't allowed.
+            try {
+                alterBaseTable = "ALTER TABLE " + baseTable + " ADD VIEW_COL2 CHAR(256)";
+                conn.createStatement().execute(alterBaseTable);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals("Unexpected exception", SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+            }
         }
     }
     
@@ -500,13 +501,13 @@ public class AlterMultiTenantTableWithViewsIT extends ParallelStatsDisabledIT {
                 tenant1Conn.createStatement().execute(view1DDL);
             }
 
-            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 0, 5, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
+            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 5, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
             assertTableDefinition(conn, view1, PTableType.VIEW, baseTable, 0, 7, 5, "TENANT_ID", "PK1", "V1", "V2", "V3", "VIEW_COL1", "VIEW_COL2");
 
             String alterBaseTable = "ALTER TABLE " + baseTable + " ADD KV VARCHAR, PK2 VARCHAR PRIMARY KEY";
             conn.createStatement().execute(alterBaseTable);
 
-            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 7, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3", "KV", "PK2");
+            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 2, 7, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3", "KV", "PK2");
             assertTableDefinition(conn, view1, PTableType.VIEW, baseTable, 1, 9, 7, "TENANT_ID", "PK1", "V1", "V2", "V3", "KV", "PK2", "VIEW_COL1", "VIEW_COL2");
 
             // verify that the both columns were added to view1
@@ -530,13 +531,13 @@ public class AlterMultiTenantTableWithViewsIT extends ParallelStatsDisabledIT {
                 tenant1Conn.createStatement().execute(view1DDL);
             }
 
-            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 0, 5, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
+            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 5, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V2", "V3");
             assertTableDefinition(conn, view1, PTableType.VIEW, baseTable, 0, 7, 5, "TENANT_ID", "PK1", "V1", "V2", "V3", "VIEW_COL1", "VIEW_COL2");
 
             String alterBaseTable = "ALTER TABLE " + baseTable + " DROP COLUMN V2";
             conn.createStatement().execute(alterBaseTable);
 
-            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 1, 4, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V3");
+            assertTableDefinition(conn, baseTable, PTableType.TABLE, null, 2, 4, BASE_TABLE_BASE_COLUMN_COUNT, "TENANT_ID", "PK1", "V1", "V3");
             assertTableDefinition(conn, view1, PTableType.VIEW, baseTable, 1, 6, 4, "TENANT_ID", "PK1", "V1", "V3", "VIEW_COL1", "VIEW_COL2");
 
             // verify that the dropped columns aren't visible

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 9799b08..b4e1abe 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -105,7 +105,7 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
     
     public AlterTableIT(boolean columnEncoded) {
         this.columnEncoded = columnEncoded;
-        this.tableDDLOptions = columnEncoded ? "COLUMN_ENCODED_BYTES=4" : "";
+        this.tableDDLOptions = columnEncoded ? "" : "COLUMN_ENCODED_BYTES=0";
     }
     
     @Parameters(name="AlterTableIT_columnEncoded={0}") // name is used by failsafe as file name in reports
@@ -301,7 +301,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
         conn.createStatement().execute(
           "CREATE TABLE " + dataTableFullName
               + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) "
-              + generateDDLOptions((immutable ? "IMMUTABLE_ROWS = true" : "")));
+              + generateDDLOptions(immutable ? "IMMUTABLE_ROWS = true" : "")
+              + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ""));
         query = "SELECT * FROM " + dataTableFullName;
         rs = conn.createStatement().executeQuery(query);
         assertFalse(rs.next());
@@ -795,7 +796,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                             "CREATE TABLE " + dataTableFullName
                                     + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
                                     + "  CONSTRAINT pk PRIMARY KEY (a_string)) "
-                                    + generateDDLOptions("immutable_rows=true, disable_wal=true"));
+                                    + generateDDLOptions("immutable_rows=true, disable_wal=true"
+                                    + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
 
             Connection conn2 = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT * FROM " + dataTableFullName;
@@ -828,7 +830,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                             "CREATE TABLE " + dataTableFullName
                                     + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
                                     + "  CONSTRAINT pk PRIMARY KEY (a_string))" 
-                                    + generateDDLOptions("immutable_rows=true"));
+                                    + generateDDLOptions("immutable_rows=true"
+                                    + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
 
             Connection conn2 = DriverManager.getConnection(getUrl(), props);
             String query = "SELECT * FROM " + dataTableFullName;
@@ -900,7 +903,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                             "CREATE TABLE " + dataTableFullName
                                     + "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
                                     + "  CONSTRAINT pk PRIMARY KEY (a_string)) " 
-                                    + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "));
+                                    + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "
+                                    + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
 
             String query = "SELECT * FROM " + dataTableFullName;
             ResultSet rs = conn.createStatement().executeQuery(query);
@@ -1220,7 +1224,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                 +"CREATED_DATE DATE,\n"
                 +"CF1.CREATION_TIME BIGINT,\n"
                 +"CF2.LAST_USED DATE,\n"
-                +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("IMMUTABLE_ROWS=true");
+                +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("IMMUTABLE_ROWS=true"
+                + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ""));
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
         assertImmutableRows(conn, dataTableFullName, true);
@@ -1432,7 +1437,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                 +"CREATED_DATE DATE,\n"
                 +"CREATION_TIME BIGINT,\n"
                 +"CF.LAST_USED DATE,\n"
-                +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("IMMUTABLE_ROWS=true, DEFAULT_COLUMN_FAMILY = 'XYZ'");
+                +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) " + generateDDLOptions("IMMUTABLE_ROWS=true, DEFAULT_COLUMN_FAMILY = 'XYZ'"
+                + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ""));
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
         assertImmutableRows(conn, dataTableFullName, true);
@@ -1678,7 +1684,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                             "CREATE TABLE "
                                     + dataTableFullName
     						+ "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-    						+ "  CONSTRAINT pk PRIMARY KEY (a_string)) " + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "));
+    						+ "  CONSTRAINT pk PRIMARY KEY (a_string)) " + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "
+    						+ (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ""))); 
 
             String ddl = "Alter table " + dataTableFullName + " add cf3.col5 integer, cf4.col6 integer in_memory=true";
     		conn.createStatement().execute(ddl);
@@ -1716,7 +1723,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                             "CREATE TABLE "
                                     + dataTableFullName
     						+ "  (a_string varchar not null, col1 integer, cf1.col2 integer, col3 integer , cf2.col4 integer "
-    						+ "  CONSTRAINT pk PRIMARY KEY (a_string)) " + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "));
+    						+ "  CONSTRAINT pk PRIMARY KEY (a_string)) " + generateDDLOptions("immutable_rows=true , SALT_BUCKETS=3 "
+    						+ (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));    
 
             String ddl = "Alter table " + dataTableFullName + " add cf1.col5 integer in_memory=true";
     		conn.createStatement().execute(ddl);
@@ -2307,7 +2315,8 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 	                + " COL2 bigint NOT NULL,"
 	                + " KV1 VARCHAR"
 	                + " CONSTRAINT NAME_PK PRIMARY KEY (ID, COL1, COL2)"
-	                + " ) " + generateDDLOptions("IMMUTABLE_ROWS = true"));
+	                + " ) " + generateDDLOptions("IMMUTABLE_ROWS = true"
+	                + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME="+ PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : "")));
 	        PTable baseTable = phxConn.getTable(new PTableKey(phxConn.getTenantId(), fullTableName));
 	        long initBaseTableSeqNumber = baseTable.getSequenceNumber(); 
 
@@ -2315,7 +2324,6 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
 	        EncodedCQCounter cqCounter = baseTable.getEncodedCQCounter();
 	        assertEquals( columnEncoded ? (Integer)(ENCODED_CQ_COUNTER_INITIAL_VALUE + 1) : null, cqCounter.getNextQualifier(QueryConstants.DEFAULT_COLUMN_FAMILY));
 	        
-	        
 	        // assert that the server side metadata is updated correctly.
 	        assertEncodedCQCounter(DEFAULT_COLUMN_FAMILY, schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE + 1);
 	        assertEncodedCQValue(DEFAULT_COLUMN_FAMILY, "KV1", schemaName, baseTableName, ENCODED_CQ_COUNTER_INITIAL_VALUE);
@@ -2568,19 +2576,22 @@ public class AlterTableIT extends ParallelStatsDisabledIT {
                 +"ID VARCHAR(15) NOT NULL,\n"
                 +"CREATED_DATE DATE,\n"
                 +"CREATION_TIME BIGINT,\n"
-                +"CONSTRAINT PK PRIMARY KEY (ID)) " + generateDDLOptions("COLUMN_ENCODED_BYTES=4, IMMUTABLE_ROWS=true");
+                +"CONSTRAINT PK PRIMARY KEY (ID)) " + generateDDLOptions("COLUMN_ENCODED_BYTES=4, IMMUTABLE_ROWS=true"
+                + (!columnEncoded ? ",IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ""));
         Connection conn = DriverManager.getConnection(getUrl(), props);
         conn.createStatement().execute(ddl);
         assertImmutableRows(conn, dataTableFullName, true);
         try {
 	        ddl = "ALTER TABLE " + dataTableFullName + " SET IMMUTABLE_ROWS = false";
 	        conn.createStatement().execute(ddl);
-	        fail();
+	        if (columnEncoded) {
+	            fail();
+	        }
         }
         catch(SQLException e) {
         	assertEquals(SQLExceptionCode.CANNOT_ALTER_IMMUTABLE_ROWS_PROPERTY.getErrorCode(), e.getErrorCode());
         }
-        assertImmutableRows(conn, dataTableFullName, true);
+        assertImmutableRows(conn, dataTableFullName, columnEncoded);
     }
     
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
index 73b3756..45c1dba 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableWithViewsIT.java
@@ -77,10 +77,10 @@ public class AlterTableWithViewsIT extends ParallelStatsDisabledIT {
     
     private String generateDDL(String options, String format) {
         StringBuilder optionsBuilder = new StringBuilder(options);
-        if (columnEncoded) {
+        if (!columnEncoded) {
             if (optionsBuilder.length()!=0)
                 optionsBuilder.append(",");
-            optionsBuilder.append("COLUMN_ENCODED_BYTES=4");
+            optionsBuilder.append("COLUMN_ENCODED_BYTES=0");
         }
         if (isMultiTenant) {
             if (optionsBuilder.length()!=0)

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropIT.java
deleted file mode 100644
index 78be892..0000000
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropIT.java
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.end2end;
-
-import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
-import java.sql.Connection;
-import java.sql.DriverManager;
-import java.sql.SQLException;
-import java.sql.Statement;
-import java.util.Properties;
-
-import org.apache.phoenix.exception.SQLExceptionCode;
-import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.util.PropertiesUtil;
-import org.apache.phoenix.util.SchemaUtil;
-import org.junit.Test;
-
-public class ImmutableTablePropIT extends ParallelStatsDisabledIT {
-
-    @Test
-    public void testImmutableKeyword() throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
-            Statement stmt = conn.createStatement();
-            // create table with immutable keyword
-            String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
-            stmt.execute(ddl);
-            
-            // create table without immutable keyword
-            ddl = "CREATE TABLE  " + mutableDataTableFullName +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
-            stmt.execute(ddl);
-            
-            PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-            PTable immutableTable = phxConn.getTable(new PTableKey(null, immutableDataTableFullName));
-            assertTrue("IMMUTABLE_ROWS should be set to true", immutableTable.isImmutableRows());
-            PTable mutableTable = phxConn.getTable(new PTableKey(null, mutableDataTableFullName));
-            assertFalse("IMMUTABLE_ROWS should be set to false", mutableTable.isImmutableRows());
-        } 
-    }
-    
-    @Test
-    public void testImmutableProperty() throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
-            Statement stmt = conn.createStatement();
-            // create table with immutable table property set to true
-            String ddl = "CREATE TABLE  " + immutableDataTableFullName +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string)) IMMUTABLE_ROWS=true";
-            stmt.execute(ddl);
-            
-            // create table with immutable table property set to false
-            ddl = "CREATE TABLE  " + mutableDataTableFullName +
-                    "  (a_string varchar not null, col1 integer" +
-                    "  CONSTRAINT pk PRIMARY KEY (a_string))  IMMUTABLE_ROWS=false";
-            stmt.execute(ddl);
-            
-            PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
-            PTable immutableTable = phxConn.getTable(new PTableKey(null, immutableDataTableFullName));
-            assertTrue("IMMUTABLE_ROWS should be set to true", immutableTable.isImmutableRows());
-            PTable mutableTable = phxConn.getTable(new PTableKey(null, mutableDataTableFullName));
-            assertFalse("IMMUTABLE_ROWS should be set to false", mutableTable.isImmutableRows());
-        } 
-    }
-    
-    @Test
-    public void testImmutableKeywordAndProperty() throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
-        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
-            Statement stmt = conn.createStatement();
-            try {
-                // create immutable table with immutable table property set to true 
-                String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName +
-                        "  (a_string varchar not null, col1 integer" +
-                        "  CONSTRAINT pk PRIMARY KEY (a_string)) IMMUTABLE_ROWS=true";
-                stmt.execute(ddl);
-                fail();
-            }
-            catch (SQLException e) {
-                assertEquals(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID.getErrorCode(), e.getErrorCode());
-            }
-            
-            try {
-                // create immutable table with immutable table property set to false
-                String ddl = "CREATE IMMUTABLE TABLE  " + mutableDataTableFullName +
-                        "  (a_string varchar not null, col1 integer" +
-                        "  CONSTRAINT pk PRIMARY KEY (a_string))  IMMUTABLE_ROWS=false";
-                stmt.execute(ddl);
-                fail();
-            }
-            catch (SQLException e) {
-                assertEquals(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID.getErrorCode(), e.getErrorCode());
-            }
-            
-        } 
-    }
-    
-}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
new file mode 100644
index 0000000..52cfe9c
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/ImmutableTablePropertiesIT.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end;
+
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Properties;
+
+import org.apache.phoenix.exception.SQLExceptionCode;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.junit.Test;
+
+public class ImmutableTablePropertiesIT extends ParallelStatsDisabledIT {
+
+    @Test
+    public void testImmutableKeyword() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+            Statement stmt = conn.createStatement();
+            // create table with immutable keyword
+            String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
+            stmt.execute(ddl);
+            
+            // create table without immutable keyword
+            ddl = "CREATE TABLE  " + mutableDataTableFullName +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string)) STORE_NULLS=true";
+            stmt.execute(ddl);
+            
+            PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
+            PTable immutableTable = phxConn.getTable(new PTableKey(null, immutableDataTableFullName));
+            assertTrue("IMMUTABLE_ROWS should be set to true", immutableTable.isImmutableRows());
+            PTable mutableTable = phxConn.getTable(new PTableKey(null, mutableDataTableFullName));
+            assertFalse("IMMUTABLE_ROWS should be set to false", mutableTable.isImmutableRows());
+        } 
+    }
+    
+    @Test
+    public void testImmutableProperty() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+            Statement stmt = conn.createStatement();
+            // create table with immutable table property set to true
+            String ddl = "CREATE TABLE  " + immutableDataTableFullName +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string)) IMMUTABLE_ROWS=true";
+            stmt.execute(ddl);
+            
+            // create table with immutable table property set to false
+            ddl = "CREATE TABLE  " + mutableDataTableFullName +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string))  IMMUTABLE_ROWS=false";
+            stmt.execute(ddl);
+            
+            PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
+            PTable immutableTable = phxConn.getTable(new PTableKey(null, immutableDataTableFullName));
+            assertTrue("IMMUTABLE_ROWS should be set to true", immutableTable.isImmutableRows());
+            PTable mutableTable = phxConn.getTable(new PTableKey(null, mutableDataTableFullName));
+            assertFalse("IMMUTABLE_ROWS should be set to false", mutableTable.isImmutableRows());
+        } 
+    }
+    
+    @Test
+    public void testImmutableKeywordAndProperty() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        String mutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+            Statement stmt = conn.createStatement();
+            try {
+                // create immutable table with immutable table property set to true 
+                String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName +
+                        "  (a_string varchar not null, col1 integer" +
+                        "  CONSTRAINT pk PRIMARY KEY (a_string)) IMMUTABLE_ROWS=true";
+                stmt.execute(ddl);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID.getErrorCode(), e.getErrorCode());
+            }
+            
+            try {
+                // create immutable table with immutable table property set to false
+                String ddl = "CREATE IMMUTABLE TABLE  " + mutableDataTableFullName +
+                        "  (a_string varchar not null, col1 integer" +
+                        "  CONSTRAINT pk PRIMARY KEY (a_string))  IMMUTABLE_ROWS=false";
+                stmt.execute(ddl);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals(SQLExceptionCode.IMMUTABLE_TABLE_PROPERTY_INVALID.getErrorCode(), e.getErrorCode());
+            }
+            
+        } 
+    }
+    
+    @Test
+    public void testImmutableTableWithStorageSchemeAndColumnEncodingProps() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String immutableDataTableFullName = SchemaUtil.getTableName("", generateUniqueName());
+        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+            Statement stmt = conn.createStatement();
+            try {
+                // create immutable table with immutable table property set to true 
+                String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName +
+                        "  (a_string varchar not null, col1 integer" +
+                        "  CONSTRAINT pk PRIMARY KEY (a_string)) COLUMN_ENCODED_BYTES=0, IMMUTABLE_STORAGE_SCHEME="
+                        + PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS;
+                stmt.execute(ddl);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES.getErrorCode(), e.getErrorCode());
+            }
+        } 
+    }
+    
+    @Test
+    public void testAlterImmutableStorageSchemeProp() throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+        String immutableDataTableFullName1 = SchemaUtil.getTableName("", generateUniqueName());
+        String immutableDataTableFullName2 = SchemaUtil.getTableName("", generateUniqueName());
+        try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
+            Statement stmt = conn.createStatement();
+            // create an immutable table with  ONE_CELL_PER_COLUMN storage scheme
+            String ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName1 +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string)) COLUMN_ENCODED_BYTES=0, IMMUTABLE_STORAGE_SCHEME="
+                    + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
+            stmt.execute(ddl);
+            // create an immutable table with  SINGLE_CELL_ARRAY_WITH_OFFSETS storage scheme
+            ddl = "CREATE IMMUTABLE TABLE  " + immutableDataTableFullName2 +
+                    "  (a_string varchar not null, col1 integer" +
+                    "  CONSTRAINT pk PRIMARY KEY (a_string)) COLUMN_ENCODED_BYTES=4, IMMUTABLE_STORAGE_SCHEME="
+                    + PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS;
+            stmt.execute(ddl);
+            
+            // changing the storage scheme from/to ONCE_CELL_PER_COLUMN should fail
+            try {
+                stmt.execute("ALTER TABLE " + immutableDataTableFullName1 + " SET IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE.getErrorCode(), e.getErrorCode());
+            }
+            try {
+                stmt.execute("ALTER TABLE " + immutableDataTableFullName2 + " SET IMMUTABLE_STORAGE_SCHEME=" + PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+                fail();
+            }
+            catch (SQLException e) {
+                assertEquals(SQLExceptionCode.INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE.getErrorCode(), e.getErrorCode());
+            }
+        } 
+    }
+    
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
index 752ace0..135bf09 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StatsCollectorIT.java
@@ -52,6 +52,7 @@ import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.stats.GuidePostsKey;
@@ -86,17 +87,20 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         if (transactional) {
             sb.append("TRANSACTIONAL=true");
         }
-        if (columnEncoded) {
+        if (!columnEncoded) {
             if (sb.length()>0) {
                 sb.append(",");
             }
-            sb.append("COLUMN_ENCODED_BYTES=4");
+            sb.append("COLUMN_ENCODED_BYTES=0");
         }
         if (!mutable) {
             if (sb.length()>0) {
                 sb.append(",");
             }
             sb.append("IMMUTABLE_ROWS=true");
+            if (!columnEncoded) {
+                sb.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+            }
         }
         this.tableDDLOptions = sb.toString();
         this.userTableNamespaceMapped = userTableNamespaceMapped;
@@ -184,7 +188,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         rs = conn.createStatement().executeQuery("EXPLAIN SELECT v2 FROM " + fullTableName + " WHERE v2='foo'");
         explainPlan = QueryUtil.getExplainPlan(rs);
         // if we are using the ONE_CELL_PER_COLUMN_FAMILY storage scheme, we will have the single kv even though there are no values for col family v2 
-        String stats = columnEncoded && !mutable  ? "4-CHUNK 1 ROWS 58 BYTES" : "3-CHUNK 0 ROWS 0 BYTES";
+        String stats = columnEncoded && !mutable  ? "4-CHUNK 1 ROWS 38 BYTES" : "3-CHUNK 0 ROWS 0 BYTES";
         assertEquals(
                 "CLIENT " + stats + " PARALLEL 3-WAY FULL SCAN OVER " + physicalTableName + "\n" +
                 "    SERVER FILTER BY B.V2 = 'foo'\n" + 
@@ -533,7 +537,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         List<KeyRange> keyRanges = getAllSplits(conn, fullTableName);
         assertEquals(26, keyRanges.size());
         rs = conn.createStatement().executeQuery("EXPLAIN SELECT * FROM " + fullTableName);
-        assertEquals("CLIENT 26-CHUNK 25 ROWS " + (columnEncoded ? ( mutable ? "12530" : "14422" ) : "12420") + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
+        assertEquals("CLIENT 26-CHUNK 25 ROWS " + (columnEncoded ? ( mutable ? "12530" : "13902" ) : "12420") + " BYTES PARALLEL 1-WAY FULL SCAN OVER " + physicalTableName,
                 QueryUtil.getExplainPlan(rs));
 
         ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
@@ -557,25 +561,25 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
         assertTrue(rs.next());
         assertEquals("A", rs.getString(1));
         assertEquals(24, rs.getInt(2));
-        assertEquals(columnEncoded ? ( mutable ? 12252 : 14144 ) : 12144, rs.getInt(3));
+        assertEquals(columnEncoded ? ( mutable ? 12252 : 13624 ) : 12144, rs.getInt(3));
         assertEquals(oneCellPerColFamliyStorageScheme ? 12 : 11, rs.getInt(4));
 
         assertTrue(rs.next());
         assertEquals("B", rs.getString(1));
         assertEquals(oneCellPerColFamliyStorageScheme ? 24 : 20, rs.getInt(2));
-        assertEquals(columnEncoded ? ( mutable ? 5600 : 7492 ) : 5540, rs.getInt(3));
+        assertEquals(columnEncoded ? ( mutable ? 5600 : 6972 ) : 5540, rs.getInt(3));
         assertEquals(oneCellPerColFamliyStorageScheme ? 6 : 5, rs.getInt(4));
 
         assertTrue(rs.next());
         assertEquals("C", rs.getString(1));
         assertEquals(24, rs.getInt(2));
-        assertEquals(columnEncoded ? ( mutable ? 6724 : 7516 ) : 6652, rs.getInt(3));
+        assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
         assertEquals(6, rs.getInt(4));
 
         assertTrue(rs.next());
         assertEquals("D", rs.getString(1));
         assertEquals(24, rs.getInt(2));
-        assertEquals(columnEncoded ? ( mutable ? 6724 : 7516 ) : 6652, rs.getInt(3));
+        assertEquals(columnEncoded ? ( mutable ? 6724 : 6988 ) : 6652, rs.getInt(3));
         assertEquals(6, rs.getInt(4));
 
         assertFalse(rs.next());
@@ -620,7 +624,7 @@ public class StatsCollectorIT extends BaseUniqueNamesOwnClusterIT {
             int startIndex = r.nextInt(strings.length);
             int endIndex = r.nextInt(strings.length - startIndex) + startIndex;
             long rows = endIndex - startIndex;
-            long c2Bytes = rows * (columnEncoded ? ( mutable ? 37 : 70 ) : 35);
+            long c2Bytes = rows * (columnEncoded ? ( mutable ? 37 : 48 ) : 35);
             String physicalTableName = SchemaUtil.getPhysicalHBaseTableName(fullTableName, userTableNamespaceMapped, PTableType.TABLE).getString();
             rs = conn.createStatement().executeQuery(
                     "SELECT COLUMN_FAMILY,SUM(GUIDE_POSTS_ROW_COUNT),SUM(GUIDE_POSTS_WIDTH) from SYSTEM.STATS where PHYSICAL_NAME = '"

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
index ed294bf..e015a8d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/StoreNullsIT.java
@@ -34,14 +34,15 @@ import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.phoenix.expression.ArrayColumnExpression;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
+import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.tuple.ResultTuple;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -76,11 +77,14 @@ public class StoreNullsIT extends ParallelStatsDisabledIT {
         this.storeNulls = storeNulls;
         
         StringBuilder sb = new StringBuilder("CREATE TABLE %s (id SMALLINT NOT NULL PRIMARY KEY, name VARCHAR) VERSIONS = 1000, KEEP_DELETED_CELLS = false ");
+        if (!columnEncoded) {
+            sb.append(",").append("COLUMN_ENCODED_BYTES=0");
+        }
         if (!mutable) {
             sb.append(",").append("IMMUTABLE_ROWS=true");
-        }
-        if (columnEncoded) {
-            sb.append(",").append("COLUMN_ENCODED_BYTES=4");
+            if (!columnEncoded) {
+                sb.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+            }
         }
         if (storeNulls) {
             sb.append(",").append("STORE_NULLS=true");
@@ -132,10 +136,10 @@ public class StoreNullsIT extends ParallelStatsDisabledIT {
         Result rs = scanner.next();
         PTable table = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
         PColumn nameColumn = table.getPColumnForColumnName("NAME");
-        byte[] qualifier = table.getStorageScheme()== StorageScheme.ONE_CELL_PER_COLUMN_FAMILY ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : nameColumn.getColumnQualifierBytes();
+        byte[] qualifier = table.getImmutableStorageScheme()== ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : nameColumn.getColumnQualifierBytes();
         assertTrue(rs.containsColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, qualifier));
         assertTrue(rs.size() == 2); // 2 because it also includes the empty key value column
-        KeyValueColumnExpression colExpression = table.getStorageScheme() == StorageScheme.ONE_CELL_PER_COLUMN_FAMILY ? new ArrayColumnExpression(nameColumn, "NAME", table.getEncodingScheme()) : new KeyValueColumnExpression(nameColumn);
+        KeyValueColumnExpression colExpression = table.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS ? new SingleCellColumnExpression(nameColumn, "NAME", table.getEncodingScheme()) : new KeyValueColumnExpression(nameColumn);
         ImmutableBytesPtr ptr = new ImmutableBytesPtr();
         colExpression.evaluate(new ResultTuple(rs), ptr);
         assertEquals(new ImmutableBytesPtr(PVarchar.INSTANCE.toBytes("v1")), ptr);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
index 7615935..3a72088 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/IndexIT.java
@@ -84,7 +84,6 @@ public class IndexIT extends ParallelStatsDisabledIT {
     private final boolean mutable;
     private final String tableDDLOptions;
 
-
     public IndexIT(boolean localIndex, boolean mutable, boolean transactional) {
         this.localIndex = localIndex;
         this.transactional = transactional;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
index fae53e2..b482998 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/CreateTableCompiler.java
@@ -31,7 +31,6 @@ import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
 import org.apache.phoenix.execute.MutationState;
 import org.apache.phoenix.expression.AndExpression;
-import org.apache.phoenix.expression.ArrayColumnExpression;
 import org.apache.phoenix.expression.ComparisonExpression;
 import org.apache.phoenix.expression.Determinism;
 import org.apache.phoenix.expression.Expression;
@@ -39,6 +38,7 @@ import org.apache.phoenix.expression.IsNullExpression;
 import org.apache.phoenix.expression.KeyValueColumnExpression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
 import org.apache.phoenix.expression.visitor.StatelessTraverseNoExpressionVisitor;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
@@ -327,13 +327,8 @@ public class CreateTableCompiler {
         }
         
         @Override
-        public Boolean visit(ArrayColumnExpression node) {
-            try {
-                this.position = table.getColumnFamily(node.getColumnFamily()).getPColumnForColumnQualifier(node.getPositionInArray()).getPosition();
-            } catch (SQLException e) {
-                throw new RuntimeException(e);
-            }
-            return Boolean.TRUE;
+        public Boolean visit(SingleCellColumnExpression node) {
+            return visit(node.getKeyValueExpression());
         }
         
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
index 846e966..fb4c542 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/ExpressionCompiler.java
@@ -117,7 +117,7 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PDatum;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
 import org.apache.phoenix.schema.SortOrder;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
index 1a14c7f..8ba0e12 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/FromCompiler.java
@@ -75,7 +75,7 @@ import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
 import org.apache.phoenix.schema.PTableType;
@@ -793,7 +793,7 @@ public class FromCompiler {
                     MetaDataProtocol.MIN_TABLE_TIMESTAMP, PTable.INITIAL_SEQ_NUM, null, null, columns, null, null,
                     Collections.<PTable> emptyList(), false, Collections.<PName> emptyList(), null, null, false, false,
                     false, null, null, null, false, false, 0, 0L, SchemaUtil
-                            .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), null, false, StorageScheme.ONE_CELL_PER_KEYVALUE_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
+                            .isNamespaceMappingEnabled(PTableType.SUBQUERY, connection.getQueryServices().getProps()), null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
 
             String alias = subselectNode.getAlias();
             TableRef tableRef = new TableRef(alias, t, MetaDataProtocol.MIN_TABLE_TIMESTAMP, false);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
index 1db28f5..9a2651d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/JoinCompiler.java
@@ -18,7 +18,7 @@
 package org.apache.phoenix.compile;
 
 import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
-import static org.apache.phoenix.schema.PTable.StorageScheme.ONE_CELL_PER_KEYVALUE_COLUMN;
+import static org.apache.phoenix.schema.PTable.ImmutableStorageScheme.ONE_CELL_PER_COLUMN;
 
 import java.sql.SQLException;
 import java.util.ArrayList;
@@ -80,7 +80,7 @@ import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.ProjectedColumn;
@@ -1316,7 +1316,7 @@ public class JoinCompiler {
                 left.isMultiTenant(), left.getStoreNulls(), left.getViewType(), left.getViewIndexId(),
                 left.getIndexType(), left.rowKeyOrderOptimizable(), left.isTransactional(),
                 left.getUpdateCacheFrequency(), left.getIndexDisableTimestamp(), left.isNamespaceMapped(), 
-                left.getAutoPartitionSeqName(), left.isAppendOnlySchema(), ONE_CELL_PER_KEYVALUE_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
+                left.getAutoPartitionSeqName(), left.isAppendOnlySchema(), ONE_CELL_PER_COLUMN, NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
     }
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
index 8677dc2..843ed68 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostLocalIndexDDLCompiler.java
@@ -31,7 +31,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
 import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.tuple.Tuple;
@@ -83,7 +83,7 @@ public class PostLocalIndexDDLCompiler {
             // However, in this case, we need to project all of the data columns that contribute to the index.
             IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
             for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
-                if (index.getStorageScheme() == StorageScheme.ONE_CELL_PER_COLUMN_FAMILY) {
+                if (index.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                     scan.addFamily(columnRef.getFamily());
                 } else {
                     scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
index b0e715c..49fa53d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/TupleProjectionCompiler.java
@@ -157,7 +157,7 @@ public class TupleProjectionCompiler {
                 null, null, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
                 table.getViewIndexId(),
                 table.getIndexType(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), 
-                table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter());
+                table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), table.getEncodedCQCounter());
     }
     
     public static PTable createProjectedTable(TableRef tableRef, List<ColumnRef> sourceColumnRefs, boolean retainPKColumns) throws SQLException {
@@ -190,7 +190,7 @@ public class TupleProjectionCompiler {
                 Collections.<PTable> emptyList(), table.isImmutableRows(), Collections.<PName> emptyList(), null, null,
                 table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(),
                 table.getViewIndexId(), null, table.rowKeyOrderOptimizable(), table.isTransactional(),
-                table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodingScheme(), cqCounter);
+                table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getImmutableStorageScheme(), table.getEncodingScheme(), cqCounter);
     }
 
     // For extracting column references from single select statement

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
index 13b58d1..e5e18e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/UnionCompiler.java
@@ -40,7 +40,7 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.util.SchemaUtil;
 
@@ -101,7 +101,7 @@ public class UnionCompiler {
             null, null, projectedColumns, null, null, null, true, null, null, null, true,
             true, true, null, null, null, false, false, 0, 0L,
             SchemaUtil.isNamespaceMappingEnabled(PTableType.SUBQUERY,
-                statement.getConnection().getQueryServices().getProps()), null, false, StorageScheme.ONE_CELL_PER_KEYVALUE_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
+                statement.getConnection().getQueryServices().getProps()), null, false, ImmutableStorageScheme.ONE_CELL_PER_COLUMN, QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, PTable.EncodedCQCounter.NULL_COUNTER);
         TableRef tableRef = new TableRef(null, tempTable, 0, false);
         return tableRef;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
index a6ff1c5..3026514 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/WhereCompiler.java
@@ -52,7 +52,7 @@ import org.apache.phoenix.schema.ColumnNotFoundException;
 import org.apache.phoenix.schema.ColumnRef;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.schema.TypeMismatchException;
@@ -174,7 +174,7 @@ public class WhereCompiler {
             TableRef tableRef = ref.getTableRef();
             Expression newColumnExpression = ref.newColumnExpression(node.isTableNameCaseSensitive(), node.isCaseSensitive());
             if (tableRef.equals(context.getCurrentTable()) && !SchemaUtil.isPKColumn(ref.getColumn())) {
-                byte[] cq = tableRef.getTable().getStorageScheme() == StorageScheme.ONE_CELL_PER_COLUMN_FAMILY 
+                byte[] cq = tableRef.getTable().getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS 
                 		? QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES : ref.getColumn().getColumnQualifierBytes();
                 // track the where condition columns. Later we need to ensure the Scan in HRS scans these column CFs
                 context.addWhereCoditionColumn(ref.getColumn().getFamilyName().getBytes(), cq);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index a64ed64..06e4f53 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -108,6 +108,7 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver {
     public final static String MIN_QUALIFIER = "_MinQualifier";
     public final static String MAX_QUALIFIER = "_MaxQualifier";
     public final static String QUALIFIER_ENCODING_SCHEME = "_QualifierEncodingScheme";
+    public final static String IMMUTABLE_STORAGE_ENCODING_SCHEME = "_ImmutableStorageEncodingScheme";
     
     /**
      * Attribute name used to pass custom annotations in Scans and Mutations (later). Custom annotations

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
index ee79586..658bb82 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/GroupedAggregateRegionObserver.java
@@ -24,7 +24,6 @@ import static org.apache.phoenix.query.QueryServices.GROUPBY_ESTIMATED_DISTINCT_
 import static org.apache.phoenix.query.QueryServices.GROUPBY_SPILLABLE_ATTRIB;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES;
 import static org.apache.phoenix.query.QueryServicesOptions.DEFAULT_GROUPBY_SPILLABLE;
-import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodingScheme;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -408,7 +407,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                 acquiredLock = true;
                 synchronized (scanner) {
                     do {
-                        List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), getEncodingScheme(scan)) : new ArrayList<Cell>();
+                        List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
                         // Results are potentially returned even when the return
                         // value of s.next is false
                         // since this is an indication of whether or not there are
@@ -482,7 +481,7 @@ public class GroupedAggregateRegionObserver extends BaseScannerRegionObserver {
                     acquiredLock = true;
                     synchronized (scanner) {
                         do {
-                            List<Cell> kvs = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), getEncodingScheme(scan)) : new ArrayList<Cell>();
+                            List<Cell> kvs = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
                             // Results are potentially returned even when the return
                             // value of s.next is false
                             // since this is an indication of whether or not there

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index b46cb26..84de00d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -196,7 +196,7 @@ import org.apache.phoenix.schema.PTable.EncodedCQCounter;
 import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.ViewType;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableType;
@@ -956,7 +956,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     isAppendOnlySchemaKv.getValueOffset(), isAppendOnlySchemaKv.getValueLength()));
         Cell storageSchemeKv = tableKeyValues[STORAGE_SCHEME_INDEX];
         //TODO: change this once we start having other values for storage schemes
-        StorageScheme storageScheme = storageSchemeKv == null ? StorageScheme.ONE_CELL_PER_KEYVALUE_COLUMN : StorageScheme
+        ImmutableStorageScheme storageScheme = storageSchemeKv == null ? ImmutableStorageScheme.ONE_CELL_PER_COLUMN : ImmutableStorageScheme
                 .fromSerializedValue((byte)PTinyint.INSTANCE.toObject(storageSchemeKv.getValueArray(),
                         storageSchemeKv.getValueOffset(), storageSchemeKv.getValueLength()));
         Cell encodingSchemeKv = tableKeyValues[QUALIFIER_ENCODING_SCHEME_INDEX];

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
index a35d53a..ad5c84c 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/ScanRegionObserver.java
@@ -17,8 +17,8 @@
  */
 package org.apache.phoenix.coprocessor;
 
-import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodingScheme;
 import static org.apache.phoenix.util.EncodedColumnsUtil.getMinMaxQualifiersFromScan;
+import static org.apache.phoenix.util.EncodedColumnsUtil.getQualifierEncodingScheme;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -130,7 +130,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
                 orderByExpression.readFields(input);
                 orderByExpressions.add(orderByExpression);
             }
-            QualifierEncodingScheme encodingScheme = getEncodingScheme(scan);
+            QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);
             ResultIterator inner = new RegionScannerResultIterator(s, EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan), encodingScheme);
             return new OrderedResultIterator(inner, orderByExpressions, thresholdBytes, limit >= 0 ? limit : null, null,
                     estimatedRowSize);
@@ -238,7 +238,7 @@ public class ScanRegionObserver extends BaseScannerRegionObserver {
         }
         if (scanOffset != null) {
             innerScanner = getOffsetScanner(c, innerScanner,
-                    new OffsetResultIterator(new RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), getEncodingScheme(scan)), scanOffset),
+                    new OffsetResultIterator(new RegionScannerResultIterator(innerScanner, getMinMaxQualifiersFromScan(scan), getQualifierEncodingScheme(scan)), scanOffset),
                     scan.getAttribute(QueryConstants.LAST_SCAN) != null);
         }
         final OrderedResultIterator iterator = deserializeFromScan(scan, innerScanner);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index dadfaeb..6289f5a 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -24,7 +24,6 @@ import static org.apache.phoenix.query.QueryConstants.UNGROUPED_AGG_ROW_KEY;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
 import static org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker.COMPACTION_UPDATE_STATS_ROW_COUNT;
 import static org.apache.phoenix.schema.stats.StatisticsCollectionRunTracker.CONCURRENT_UPDATE_STATS_ROW_COUNT;
-import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodingScheme;
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
@@ -401,7 +400,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver
             acquiredLock = true;
             synchronized (innerScanner) {
                 do {
-                    List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), getEncodingScheme(scan)) : new ArrayList<Cell>();
+                    List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), EncodedColumnsUtil.getQualifierEncodingScheme(scan)) : new ArrayList<Cell>();
                     // Results are potentially returned even when the return value of s.next is false
                     // since this is an indication of whether or not there are more values after the
                     // ones returned

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
index 2c3c663..be1a5ef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
@@ -370,6 +370,8 @@ public enum SQLExceptionCode {
     IMMUTABLE_TABLE_PROPERTY_INVALID(1135, "XCL35", "IMMUTABLE table property cannot be used with CREATE IMMUTABLE TABLE statement "),
     
     MAX_COLUMNS_EXCEEDED(1136, "XCL36", "The number of columns exceed the maximum supported by the table's qualifier encoding scheme"),
+    INVALID_IMMUTABLE_STORAGE_SCHEME_AND_COLUMN_QUALIFIER_BYTES(1137, "XCL37", "If IMMUTABLE_STORAGE_SCHEME property is not set to ONE_CELL_PER_COLUMN COLUMN_ENCODED_BYTES cannot be 0"),
+    INVALID_IMMUTABLE_STORAGE_SCHEME_CHANGE(1138, "XCL38", "IMMUTABLE_STORAGE_SCHEME property cannot be changed from/to ONE_CELL_PER_COLUMN "),
 
     /**
      * Implementation defined class. Phoenix internal error. (errorcode 20, sqlstate INT).

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
index 6c7d05b..3c45295 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/BaseQueryPlan.java
@@ -64,7 +64,7 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.IndexType;
-import org.apache.phoenix.schema.PTable.StorageScheme;
+import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.TableRef;
 import org.apache.phoenix.trace.TracingIterator;
@@ -431,7 +431,7 @@ public abstract class BaseQueryPlan implements QueryPlan {
         ByteArrayOutputStream stream = new ByteArrayOutputStream();
         try {
             DataOutputStream output = new DataOutputStream(stream);
-            boolean storeColsInSingleCell = dataTable.getStorageScheme() == StorageScheme.ONE_CELL_PER_COLUMN_FAMILY;
+            boolean storeColsInSingleCell = dataTable.getImmutableStorageScheme() == ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS;
             if (storeColsInSingleCell) {
                 // if storeColsInSingleCell is true all columns of a given column family are stored in a single cell
                 scan.setAttribute(BaseScannerRegionObserver.COLUMNS_STORED_IN_SINGLE_CELL, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java
index bda1b96..51cb67e 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/execute/UnnestArrayPlan.java
@@ -33,6 +33,7 @@ import org.apache.phoenix.iterate.ParallelScanGrouper;
 import org.apache.phoenix.iterate.ResultIterator;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.schema.types.PArrayDataType;
+import org.apache.phoenix.schema.types.PArrayDataTypeDecoder;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 
@@ -138,7 +139,7 @@ public class UnnestArrayPlan extends DelegateQueryPlan {
         @Override
         public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
             ptr.set(arrayPtr.get(), arrayPtr.getOffset(), arrayPtr.getLength());
-            PArrayDataType.positionAtArrayElement(ptr, index++, getDataType(), getMaxLength());
+            PArrayDataTypeDecoder.positionAtArrayElement(ptr, index++, getDataType(), getMaxLength());
             return true;
         }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/1f3f7323/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayColumnExpression.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayColumnExpression.java b/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayColumnExpression.java
deleted file mode 100644
index 330aee0..0000000
--- a/phoenix-core/src/main/java/org/apache/phoenix/expression/ArrayColumnExpression.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.phoenix.expression;
-
-import static org.apache.phoenix.query.QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES;
-import static org.apache.phoenix.schema.PTable.QualifierEncodingScheme.NON_ENCODED_QUALIFIERS;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.phoenix.compile.CreateTableCompiler.ViewWhereExpressionVisitor;
-import org.apache.phoenix.expression.visitor.ExpressionVisitor;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PDatum;
-import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
-import org.apache.phoenix.schema.SortOrder;
-import org.apache.phoenix.schema.tuple.Tuple;
-import org.apache.phoenix.schema.types.PArrayDataType;
-import org.apache.phoenix.schema.types.PDataType;
-import org.apache.phoenix.schema.types.PVarbinary;
-import org.apache.phoenix.util.SchemaUtil;
-
-import com.google.common.base.Preconditions;
-
-/**
- * 
- * Class to access a column that is stored in a KeyValue that contains all
- * columns for a given column family (stored in an array).
- *
- */
-public class ArrayColumnExpression extends KeyValueColumnExpression {
-    
-    private int positionInArray;
-    private String arrayColDisplayName;
-    private KeyValueColumnExpression keyValueColumnExpression;
-    private QualifierEncodingScheme encodingScheme;
-    
-    public ArrayColumnExpression() {
-    }
-    
-    public ArrayColumnExpression(PDatum column, byte[] cf, byte[] cq, QualifierEncodingScheme encodingScheme) {
-        super(column, cf, SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
-        Preconditions.checkNotNull(encodingScheme);
-        Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS);
-        this.positionInArray = encodingScheme.decode(cq);
-        this.encodingScheme = encodingScheme;
-        setKeyValueExpression();
-    }
-    
-    public ArrayColumnExpression(PColumn column, String displayName, QualifierEncodingScheme encodingScheme) {
-        super(column, column.getFamilyName().getBytes(), SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES);
-        Preconditions.checkNotNull(encodingScheme);
-        Preconditions.checkArgument(encodingScheme != NON_ENCODED_QUALIFIERS);
-        this.arrayColDisplayName = displayName;
-        this.positionInArray = encodingScheme.decode(column.getColumnQualifierBytes());
-        this.encodingScheme = encodingScheme;
-        setKeyValueExpression();
-    }
-
-    @Override
-    public boolean evaluate(Tuple tuple, ImmutableBytesWritable ptr) {
-    	if (!super.evaluate(tuple, ptr)) {
-            return false;
-        } else if (ptr.getLength() == 0) { 
-        	return true; 
-        }
-
-        // Given a ptr to the entire array, set ptr to point to a particular element within that array
-        // given the type of an array element (see comments in PDataTypeForArray)
-    	return PArrayDataType.positionAtArrayElement(ptr, positionInArray, PVarbinary.INSTANCE, null);
-    }
-
-    @Override
-    public void readFields(DataInput input) throws IOException {
-        super.readFields(input);
-        this.positionInArray = WritableUtils.readVInt(input);
-        this.encodingScheme = WritableUtils.readEnum(input, QualifierEncodingScheme.class);
-        setKeyValueExpression();
-    }
-
-    @Override
-    public void write(DataOutput output) throws IOException {
-        super.write(output);
-        WritableUtils.writeVInt(output, positionInArray);
-        WritableUtils.writeEnum(output, encodingScheme);
-    }
-    
-    public KeyValueColumnExpression getKeyValueExpression() {
-        return keyValueColumnExpression;
-    }
-    
-    private void setKeyValueExpression() {
-        final boolean isNullable = isNullable();
-        final SortOrder sortOrder = getSortOrder();
-        final Integer scale = getScale();
-        final Integer maxLength = getMaxLength();
-        final PDataType datatype = getDataType();
-    	this.keyValueColumnExpression = new KeyValueColumnExpression(new PDatum() {
-			@Override
-			public boolean isNullable() {
-				return isNullable;
-			}
-			
-			@Override
-			public SortOrder getSortOrder() {
-				return sortOrder;
-			}
-			
-			@Override
-			public Integer getScale() {
-				return scale;
-			}
-			
-			@Override
-			public Integer getMaxLength() {
-				return maxLength;
-			}
-			
-			@Override
-			public PDataType getDataType() {
-				return datatype;
-			}
-		}, getColumnFamily(), getPositionInArray());
-    }
-    
-    @Override
-    public String toString() {
-        if (arrayColDisplayName == null) {
-            arrayColDisplayName = SchemaUtil.getColumnDisplayName(getColumnFamily(), getColumnQualifier());
-        }
-        return arrayColDisplayName;
-    }
-    
-    public byte[] getPositionInArray() {
-        return encodingScheme.encode(positionInArray);
-    }
-    
-    @Override
-    public <T> T accept(ExpressionVisitor<T> visitor) {
-        //FIXME: this is ugly but can't think of a good solution.
-        if (visitor instanceof ViewWhereExpressionVisitor) {
-            return visitor.visit(this);
-        } else {
-            return super.accept(visitor);
-        }
-    }
-}