You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by sa...@apache.org on 2015/06/24 09:26:36 UTC

[1/3] phoenix git commit: PHOENIX-2056 Ensure PK column from base table is added to any indexes on views

Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-0.98 4a7022066 -> 6e56eddc9


PHOENIX-2056 Ensure PK column from base table is added to any indexes on views


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/b95907ef
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/b95907ef
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/b95907ef

Branch: refs/heads/4.x-HBase-0.98
Commit: b95907ef64c3d93c8c263d5a9f5b1533e4256ee6
Parents: 4a70220
Author: Samarth <sa...@salesforce.com>
Authored: Tue Jun 23 11:07:36 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Tue Jun 23 11:07:36 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    | 170 ++++++++++++++++++-
 .../coprocessor/MetaDataEndpointImpl.java       | 145 +++++++++++++++-
 .../java/org/apache/phoenix/util/ByteUtil.java  |  10 +-
 3 files changed, 305 insertions(+), 20 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/b95907ef/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 61dd6a9..1302c60 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -2303,13 +2303,23 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
 
             String alterBaseTable = "ALTER TABLE " + baseTable + " ADD NEW_PK varchar primary key ";
             globalConn.createStatement().execute(alterBaseTable);
-
+            
             // verify that the new column new_pk is now part of the primary key for the entire hierarchy
-            assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", baseTable));
-            assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view1));
-            assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "PK1", view2));
-            assertTrue(checkColumnPartOfPk(tenant2Conn.unwrap(PhoenixConnection.class), "PK1", view3));
-            assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "PK1", view4));
+            
+            globalConn.createStatement().execute("SELECT * FROM " + baseTable);
+            assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "NEW_PK", baseTable));
+            
+            tenant1Conn.createStatement().execute("SELECT * FROM " + view1);
+            assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "NEW_PK", view1));
+            
+            tenant1Conn.createStatement().execute("SELECT * FROM " + view2);
+            assertTrue(checkColumnPartOfPk(tenant1Conn.unwrap(PhoenixConnection.class), "NEW_PK", view2));
+            
+            tenant2Conn.createStatement().execute("SELECT * FROM " + view3);
+            assertTrue(checkColumnPartOfPk(tenant2Conn.unwrap(PhoenixConnection.class), "NEW_PK", view3));
+            
+            globalConn.createStatement().execute("SELECT * FROM " + view4);
+            assertTrue(checkColumnPartOfPk(globalConn.unwrap(PhoenixConnection.class), "NEW_PK", view4));
 
         } finally {
             if (tenant1Conn != null) {
@@ -2344,4 +2354,152 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
         tenantProps.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
         return DriverManager.getConnection(getUrl(), tenantProps);
     }
+    
+    @Test
+    public void testAddPKColumnToBaseTableWhoseViewsHaveIndices() throws Exception {
+        String baseTable = "testAddPKColumnToBaseTableWhoseViewsHaveIndices";
+        String view1 = "view1";
+        String view2 = "view2";
+        String view3 = "view3";
+        String tenant1 = "tenant1";
+        String tenant2 = "tenant2";
+        String view2Index = view2 + "_idx";
+        String view3Index = view3 + "_idx";
+        /*                          baseTable(mutli-tenant)
+                                 /                           \                
+                         view1(tenant1)                  view3(tenant2, index) 
+                          /
+                        view2(tenant1, index)  
+         */
+        try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+            // make sure that the tables are empty, but reachable
+            globalConn
+            .createStatement()
+            .execute(
+                    "CREATE TABLE "
+                            + baseTable
+                            + " (TENANT_ID VARCHAR NOT NULL, K1 varchar not null, V1 VARCHAR, V2 VARCHAR CONSTRAINT NAME_PK PRIMARY KEY(TENANT_ID, K1)) MULTI_TENANT = true ");
+
+        }
+        try (Connection tenantConn = getTenantConnection(tenant1)) {
+            // create tenant specific view for tenant1 - view1
+            tenantConn.createStatement().execute("CREATE VIEW " + view1 + " AS SELECT * FROM " + baseTable);
+            PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+            assertEquals(0, getTableSequenceNumber(phxConn, view1));
+            assertEquals(2, getMaxKeySequenceNumber(phxConn, view1));
+
+            // create a view - view2 on view - view1
+            tenantConn.createStatement().execute("CREATE VIEW " + view2 + " AS SELECT * FROM " + view1);
+            assertEquals(0, getTableSequenceNumber(phxConn, view2));
+            assertEquals(2, getMaxKeySequenceNumber(phxConn, view2));
+
+
+            // create an index on view2
+            tenantConn.createStatement().execute("CREATE INDEX " + view2Index + " ON " + view2 + " (v1) include (v2)");
+            assertEquals(0, getTableSequenceNumber(phxConn, view2Index));
+            assertEquals(4, getMaxKeySequenceNumber(phxConn, view2Index));
+
+        }
+        try (Connection tenantConn = getTenantConnection(tenant2)) {
+            // create tenant specific view for tenant2 - view3
+            tenantConn.createStatement().execute("CREATE VIEW " + view3 + " AS SELECT * FROM " + baseTable);
+            PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+            assertEquals(0, getTableSequenceNumber(phxConn, view3));
+            assertEquals(2, getMaxKeySequenceNumber(phxConn, view3));
+            
+
+            // create an index on view3
+            tenantConn.createStatement().execute("CREATE INDEX " + view3Index + " ON " + view3 + " (v1) include (v2)");
+            assertEquals(0, getTableSequenceNumber(phxConn, view3Index));
+            assertEquals(4, getMaxKeySequenceNumber(phxConn, view3Index));
+            
+
+        }
+
+        // alter the base table by adding 1 non-pk and 2 pk columns
+        try (Connection globalConn = DriverManager.getConnection(getUrl())) {
+            globalConn.createStatement().execute("ALTER TABLE " + baseTable + " ADD v3 VARCHAR, k2 VARCHAR PRIMARY KEY, k3 VARCHAR PRIMARY KEY");
+            assertEquals(4, getMaxKeySequenceNumber(globalConn.unwrap(PhoenixConnection.class), baseTable));
+            
+            // Upsert records in the base table
+            String upsert = "UPSERT INTO " + baseTable + " (TENANT_ID, K1, K2, K3, V1, V2, V3) VALUES (?, ?, ?, ?, ?, ?, ?)";
+            PreparedStatement stmt = globalConn.prepareStatement(upsert);
+            stmt.setString(1, tenant1);
+            stmt.setString(2, "K1");
+            stmt.setString(3, "K2");
+            stmt.setString(4, "K3");
+            stmt.setString(5, "V1");
+            stmt.setString(6, "V2");
+            stmt.setString(7, "V3");
+            stmt.executeUpdate();
+            stmt.setString(1, tenant2);
+            stmt.setString(2, "K11");
+            stmt.setString(3, "K22");
+            stmt.setString(4, "K33");
+            stmt.setString(5, "V11");
+            stmt.setString(6, "V22");
+            stmt.setString(7, "V33");
+            stmt.executeUpdate();
+            globalConn.commit();
+        }
+
+        // Verify now that the sequence number of data table, indexes and views have changed.
+        // Also verify that the newly added pk columns show up as pk columns of data table, indexes and views.
+        try (Connection tenantConn = getTenantConnection(tenant1)) {
+
+            ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view1);
+            PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+            assertTrue(checkColumnPartOfPk(phxConn, "k2", view1));
+            assertTrue(checkColumnPartOfPk(phxConn, "k3", view1));
+            assertEquals(1, getTableSequenceNumber(phxConn, view1));
+            assertEquals(4, getMaxKeySequenceNumber(phxConn, view1));
+            verifyNewColumns(rs, "K2", "K3", "V3");
+            
+
+            rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view2);
+            assertTrue(checkColumnPartOfPk(phxConn, "k2", view2));
+            assertTrue(checkColumnPartOfPk(phxConn, "k3", view2));
+            assertEquals(1, getTableSequenceNumber(phxConn, view2));
+            assertEquals(4, getMaxKeySequenceNumber(phxConn, view2));
+            verifyNewColumns(rs, "K2", "K3", "V3");
+            
+            assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view2Index));
+            assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view2Index));
+            assertEquals(1, getTableSequenceNumber(phxConn, view2Index));
+            assertEquals(6, getMaxKeySequenceNumber(phxConn, view2Index));
+        }
+        try (Connection tenantConn = getTenantConnection(tenant2)) {
+            ResultSet rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view3);
+            PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
+            assertTrue(checkColumnPartOfPk(phxConn, "k2", view3));
+            assertTrue(checkColumnPartOfPk(phxConn, "k3", view3));
+            assertEquals(1, getTableSequenceNumber(phxConn, view3));
+            verifyNewColumns(rs, "K22", "K33", "V33");
+            
+            assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view3Index));
+            assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view3Index));
+            assertEquals(1, getTableSequenceNumber(phxConn, view3Index));
+            assertEquals(6, getMaxKeySequenceNumber(phxConn, view3Index));
+        }
+    }
+    
+    private static long getTableSequenceNumber(PhoenixConnection conn, String tableName) throws SQLException {
+        PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), SchemaUtil.normalizeIdentifier(tableName)));
+        return table.getSequenceNumber();
+    }
+    
+    private static short getMaxKeySequenceNumber(PhoenixConnection conn, String tableName) throws SQLException {
+        PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), SchemaUtil.normalizeIdentifier(tableName)));
+        return SchemaUtil.getMaxKeySeq(table);
+    }
+    
+    private static void verifyNewColumns(ResultSet rs, String ... values) throws SQLException {
+        assertTrue(rs.next());
+        int i = 1;
+        for (String value : values) {
+            assertEquals(value, rs.getString(i++));
+        }
+        assertFalse(rs.next());
+    }
+    
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b95907ef/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index f80a69d..af0c7de 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -62,7 +62,9 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTE
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
 import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE_ARRAY;
 import static org.apache.phoenix.schema.PTableType.INDEX;
+import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
 import static org.apache.phoenix.util.SchemaUtil.getVarCharLength;
 import static org.apache.phoenix.util.SchemaUtil.getVarChars;
 
@@ -166,11 +168,13 @@ import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PInteger;
 import org.apache.phoenix.schema.types.PLong;
+import org.apache.phoenix.schema.types.PSmallint;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
 import org.apache.phoenix.trace.util.Tracing;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.EnvironmentEdgeManager;
+import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.KeyValueUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.QueryUtil;
@@ -1580,13 +1584,13 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             // lock the rows corresponding to views so that no other thread can modify the view meta-data
             RowLock viewRowLock = acquireLock(region, viewKey, locks);
             PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
-
             if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
                 // if a view has divorced itself from the base table, we don't allow schema changes
                 // to be propagated to it.
                 return;
             }
             int deltaNumberOfColumns = 0;
+            short deltaNumPkColsSoFar = 0;
             for (Mutation m : tableMetadata) {
                 byte[][] rkmd = new byte[5][];
                 int pkCount = getVarChars(m.getRow(), rkmd);
@@ -1595,16 +1599,133 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                         && Bytes.compareTo(tableName, rkmd[TABLE_NAME_INDEX]) == 0) {
                     Put p = (Put)m;
 
-                    byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[COLUMN_NAME_INDEX],
-                            QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[FAMILY_NAME_INDEX]);
-                    Put viewColumnDefinitionPut = new Put(k, clientTimeStamp);
+                    byte[] columnKey = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[COLUMN_NAME_INDEX]);
+                    if (rkmd[FAMILY_NAME_INDEX] != null) {
+                        columnKey = ByteUtil.concat(columnKey, QueryConstants.SEPARATOR_BYTE_ARRAY, rkmd[FAMILY_NAME_INDEX]);
+                    }
+                    Put viewColumnDefinitionPut = new Put(columnKey, clientTimeStamp);
                     for (Cell cell : p.getFamilyCellMap().values().iterator().next()) {
-                        viewColumnDefinitionPut.add(CellUtil.createCell(k, CellUtil.cloneFamily(cell),
+                        viewColumnDefinitionPut.add(CellUtil.createCell(columnKey, CellUtil.cloneFamily(cell),
                                 CellUtil.cloneQualifier(cell), cell.getTimestamp(), cell.getTypeByte(),
                                 CellUtil.cloneValue(cell)));
                     }
                     deltaNumberOfColumns++;
                     mutationsForAddingColumnsToViews.add(viewColumnDefinitionPut);
+                    if (rkmd[FAMILY_NAME_INDEX] == null && rkmd[COLUMN_NAME_INDEX] != null) {
+                        /*
+                         * If adding a pk column to the base table (and hence the view), see if there are any indexes on
+                         * the view. If yes, then generate puts for the index header row and column rows.
+                         */
+                        deltaNumPkColsSoFar++;
+                        for (PTable index : view.getIndexes()) {
+                            int oldNumberOfColsInIndex = index.getColumns().size();
+                            
+                            byte[] indexColumnKey = ByteUtil.concat(getViewIndexHeaderRowKey(index),
+                                    QueryConstants.SEPARATOR_BYTE_ARRAY,
+                                    IndexUtil.getIndexColumnName(rkmd[FAMILY_NAME_INDEX], rkmd[COLUMN_NAME_INDEX]));
+                            Put indexColumnDefinitionPut = new Put(indexColumnKey, clientTimeStamp);
+                            
+                            // Set the index specific data type for the column
+                            List<Cell> dataTypes = viewColumnDefinitionPut
+                                    .get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                            PhoenixDatabaseMetaData.DATA_TYPE_BYTES);
+                            if (dataTypes != null && dataTypes.size() > 0) {
+                                Cell dataType = dataTypes.get(0);
+                                int dataColumnDataType = PInteger.INSTANCE.getCodec().decodeInt(
+                                        dataType.getValueArray(), dataType.getValueOffset(), SortOrder.ASC);
+                                int indexColumnDataType = IndexUtil.getIndexColumnDataType(true,
+                                        PDataType.fromTypeId(dataColumnDataType)).getSqlType();
+                                byte[] indexColumnDataTypeBytes = new byte[PInteger.INSTANCE.getByteSize()];
+                                PInteger.INSTANCE.getCodec().encodeInt(indexColumnDataType, indexColumnDataTypeBytes, 0);
+                                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.DATA_TYPE_BYTES, indexColumnDataTypeBytes);
+                            }
+                            
+                            // Set precision
+                            List<Cell> decimalDigits = viewColumnDefinitionPut.get(
+                                    PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                    PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES);
+                            if (decimalDigits != null && decimalDigits.size() > 0) {
+                                Cell decimalDigit = decimalDigits.get(0);
+                                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.DECIMAL_DIGITS_BYTES, decimalDigit.getValueArray());
+                            }
+                            
+                            // Set size
+                            List<Cell> columnSizes = viewColumnDefinitionPut.get(
+                                    PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                    PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES);
+                            if (columnSizes != null && columnSizes.size() > 0) {
+                                Cell columnSize = columnSizes.get(0);
+                                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.COLUMN_SIZE_BYTES, columnSize.getValueArray());
+                            }
+                            
+                            // Set sort order
+                            List<Cell> sortOrders = viewColumnDefinitionPut.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                    PhoenixDatabaseMetaData.SORT_ORDER_BYTES);
+                            if (sortOrders != null && sortOrders.size() > 0) {
+                                Cell sortOrder = sortOrders.get(0);
+                                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.SORT_ORDER_BYTES, sortOrder.getValueArray());
+                            }
+                            
+                            // Set data table name
+                            List<Cell> dataTableNames = viewColumnDefinitionPut.get(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                    PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES);
+                            if (dataTableNames != null && dataTableNames.size() > 0) {
+                                Cell dataTableName = dataTableNames.get(0);
+                                indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.DATA_TABLE_NAME_BYTES, dataTableName.getValueArray());
+                            }
+                            
+                            // Set the ordinal position of the new column.
+                            byte[] ordinalPositionBytes = new byte[PInteger.INSTANCE.getByteSize()];
+                            int ordinalPositionOfNewCol = oldNumberOfColsInIndex + deltaNumPkColsSoFar;
+                            PInteger.INSTANCE.getCodec().encodeInt(ordinalPositionOfNewCol, ordinalPositionBytes, 0);
+                            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.ORDINAL_POSITION_BYTES, ordinalPositionBytes);
+                            
+                            // New PK columns have to be nullable after the first DDL
+                            byte[] isNullableBytes = PBoolean.INSTANCE.toBytes(true);
+                            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                        PhoenixDatabaseMetaData.NULLABLE_BYTES, isNullableBytes);
+                            
+                            // Set the key sequence for the pk column to be added
+                            short currentKeySeq = SchemaUtil.getMaxKeySeq(index);
+                            short newKeySeq = (short)(currentKeySeq + deltaNumPkColsSoFar);
+                            byte[] keySeqBytes = new byte[PSmallint.INSTANCE.getByteSize()];
+                            PSmallint.INSTANCE.getCodec().encodeShort(newKeySeq, keySeqBytes, 0);
+                            indexColumnDefinitionPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                                    PhoenixDatabaseMetaData.KEY_SEQ_BYTES, keySeqBytes);
+                            
+                            mutationsForAddingColumnsToViews.add(indexColumnDefinitionPut);
+                        }
+                    }
+                }
+            }
+            if (deltaNumPkColsSoFar > 0) {
+                for (PTable index : view.getIndexes()) {
+                    byte[] indexHeaderRowKey = getViewIndexHeaderRowKey(index);
+                    Put indexHeaderRowMutation = new Put(indexHeaderRowKey);
+                    
+                    // increment sequence number
+                    long newSequenceNumber = index.getSequenceNumber() + 1;
+                    byte[] newSequenceNumberPtr = new byte[PLong.INSTANCE.getByteSize()];
+                    PLong.INSTANCE.getCodec().encodeLong(newSequenceNumber, newSequenceNumberPtr, 0);
+                    indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                            PhoenixDatabaseMetaData.TABLE_SEQ_NUM_BYTES, newSequenceNumberPtr);
+                    
+                    // increase the column count
+                    int newColumnCount = index.getColumns().size() + deltaNumPkColsSoFar;
+                    byte[] newColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
+                    PInteger.INSTANCE.getCodec().encodeInt(newColumnCount, newColumnCountPtr, 0);
+                    indexHeaderRowMutation.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
+                            PhoenixDatabaseMetaData.COLUMN_COUNT_BYTES, newColumnCountPtr);
+                    
+                    // add index row header key to the invalidate list to force clients to fetch the latest meta-data
+                    invalidateList.add(new ImmutableBytesPtr(indexHeaderRowKey));
+                    mutationsForAddingColumnsToViews.add(indexHeaderRowMutation);
                 }
             }
 
@@ -1631,7 +1752,10 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     int newPosition = column.getPosition() + deltaNumberOfColumns + 1;
 
                     byte[] k = ByteUtil.concat(viewKey, QueryConstants.SEPARATOR_BYTE_ARRAY, column.getName()
-                            .getBytes(), QueryConstants.SEPARATOR_BYTE_ARRAY, column.getFamilyName() != null ? column.getFamilyName().getBytes() : null);
+                            .getBytes());
+                    if (column.getFamilyName() != null) {
+                        k = ByteUtil.concat(k, QueryConstants.SEPARATOR_BYTE_ARRAY, column.getFamilyName().getBytes());
+                    }
 
                     Put positionUpdatePut = new Put(k, clientTimeStamp);
                     byte[] ptr = new byte[PInteger.INSTANCE.getByteSize()];
@@ -1644,7 +1768,14 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             invalidateList.add(new ImmutableBytesPtr(viewKey));
         }
     }
-
+    
+    private byte[] getViewIndexHeaderRowKey(PTable index) {
+        byte[] tenantIdBytes = index.getKey().getTenantId() != null ? index.getKey().getTenantId().getBytes() : EMPTY_BYTE_ARRAY;
+        byte[] schemaNameBytes = index.getSchemaName() != null ? index.getSchemaName().getBytes() : EMPTY_BYTE_ARRAY; 
+        byte[] tableNameBytes = index.getTableName().getBytes();
+        return ByteUtil.concat(tenantIdBytes, SEPARATOR_BYTE_ARRAY, schemaNameBytes, SEPARATOR_BYTE_ARRAY, tableNameBytes);
+    }
+    
     @Override
     public void addColumn(RpcController controller, AddColumnRequest request,
             RpcCallback<MetaDataResponse> done) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/b95907ef/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
index 1f4a285..1e3516d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/ByteUtil.java
@@ -253,17 +253,13 @@ public class ByteUtil {
     public static byte[] concat(byte[] first, byte[]... rest) {
         int totalLength = first.length;
         for (byte[] array : rest) {
-            if (array != null) {
-                totalLength += array.length;
-            }
+            totalLength += array.length;
         }
         byte[] result = Arrays.copyOf(first, totalLength);
         int offset = first.length;
         for (byte[] array : rest) {
-            if (array != null) {
-                System.arraycopy(array, 0, result, offset, array.length);
-                offset += array.length;
-            }
+            System.arraycopy(array, 0, result, offset, array.length);
+            offset += array.length;
         }
         return result;
     }


[3/3] phoenix git commit: PHOENIX-2056 Additional test to verify index usage after adding pk columns

Posted by sa...@apache.org.
PHOENIX-2056 Additional test to verify index usage after adding pk columns


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/6e56eddc
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/6e56eddc
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/6e56eddc

Branch: refs/heads/4.x-HBase-0.98
Commit: 6e56eddc984fc1abe9dadba91954cc148e7455bf
Parents: 97f0d62
Author: Samarth <sa...@salesforce.com>
Authored: Wed Jun 24 00:07:32 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Wed Jun 24 00:07:32 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    | 28 +++++++++++++++-----
 1 file changed, 21 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/6e56eddc/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 3a9517d..946aaab 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -46,10 +46,12 @@ import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeepDeletedCells;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
+import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PTable;
@@ -2402,7 +2404,6 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
             tenantConn.createStatement().execute("CREATE INDEX " + view2Index + " ON " + view2 + " (v1) include (v2)");
             assertEquals(0, getTableSequenceNumber(phxConn, view2Index));
             assertEquals(4, getMaxKeySequenceNumber(phxConn, view2Index));
-
         }
         try (Connection tenantConn = getTenantConnection(tenant2)) {
             // create tenant specific view for tenant2 - view3
@@ -2410,13 +2411,13 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
             PhoenixConnection phxConn = tenantConn.unwrap(PhoenixConnection.class);
             assertEquals(0, getTableSequenceNumber(phxConn, view3));
             assertEquals(2, getMaxKeySequenceNumber(phxConn, view3));
-            
+
 
             // create an index on view3
             tenantConn.createStatement().execute("CREATE INDEX " + view3Index + " ON " + view3 + " (v1) include (v2)");
             assertEquals(0, getTableSequenceNumber(phxConn, view3Index));
             assertEquals(4, getMaxKeySequenceNumber(phxConn, view3Index));
-            
+
 
         }
 
@@ -2424,7 +2425,7 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
         try (Connection globalConn = DriverManager.getConnection(getUrl())) {
             globalConn.createStatement().execute("ALTER TABLE " + baseTable + " ADD v3 VARCHAR, k2 VARCHAR PRIMARY KEY, k3 VARCHAR PRIMARY KEY");
             assertEquals(4, getMaxKeySequenceNumber(globalConn.unwrap(PhoenixConnection.class), baseTable));
-            
+
             // Upsert records in the base table
             String upsert = "UPSERT INTO " + baseTable + " (TENANT_ID, K1, K2, K3, V1, V2, V3) VALUES (?, ?, ?, ?, ?, ?, ?)";
             PreparedStatement stmt = globalConn.prepareStatement(upsert);
@@ -2458,7 +2459,7 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertEquals(1, getTableSequenceNumber(phxConn, view1));
             assertEquals(4, getMaxKeySequenceNumber(phxConn, view1));
             verifyNewColumns(rs, "K2", "K3", "V3");
-            
+
 
             rs = tenantConn.createStatement().executeQuery("SELECT K2, K3, V3 FROM " + view2);
             assertTrue(checkColumnPartOfPk(phxConn, "k2", view2));
@@ -2466,7 +2467,7 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertEquals(1, getTableSequenceNumber(phxConn, view2));
             assertEquals(4, getMaxKeySequenceNumber(phxConn, view2));
             verifyNewColumns(rs, "K2", "K3", "V3");
-            
+
             assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view2Index));
             assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view2Index));
             assertEquals(1, getTableSequenceNumber(phxConn, view2Index));
@@ -2479,12 +2480,25 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
             assertTrue(checkColumnPartOfPk(phxConn, "k3", view3));
             assertEquals(1, getTableSequenceNumber(phxConn, view3));
             verifyNewColumns(rs, "K22", "K33", "V33");
-            
+
             assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k2"), view3Index));
             assertTrue(checkColumnPartOfPk(phxConn, IndexUtil.getIndexColumnName(null, "k3"), view3Index));
             assertEquals(1, getTableSequenceNumber(phxConn, view3Index));
             assertEquals(6, getMaxKeySequenceNumber(phxConn, view3Index));
         }
+        // Verify that the index is actually being used when using newly added pk col
+        try (Connection tenantConn = getTenantConnection(tenant1)) {
+            String upsert = "UPSERT INTO " + view2 + " (K1, K2, K3, V1, V2, V3) VALUES ('key1', 'key2', 'key3', 'value1', 'value2', 'value3')";
+            tenantConn.createStatement().executeUpdate(upsert);
+            tenantConn.commit();
+            Statement stmt = tenantConn.createStatement();
+            String sql = "SELECT V2 FROM " + view2 + " WHERE V1 = 'value1' AND K3 = 'key3'";
+            QueryPlan plan = stmt.unwrap(PhoenixStatement.class).optimizeQuery(sql);
+            assertTrue(plan.getTableRef().getTable().getName().getString().equals(SchemaUtil.normalizeIdentifier(view2Index)));
+            ResultSet rs = tenantConn.createStatement().executeQuery(sql);
+            verifyNewColumns(rs, "value2");
+        }
+
     }
     
     private static long getTableSequenceNumber(PhoenixConnection conn, String tableName) throws SQLException {


[2/3] phoenix git commit: PHOENIX-2055 Allow view with views to add column

Posted by sa...@apache.org.
PHOENIX-2055 Allow view with views to add column


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/97f0d626
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/97f0d626
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/97f0d626

Branch: refs/heads/4.x-HBase-0.98
Commit: 97f0d626079321fff78d1b117dbc3cfc16d7e2ee
Parents: b95907e
Author: Samarth <sa...@salesforce.com>
Authored: Tue Jun 23 15:33:17 2015 -0700
Committer: Samarth <sa...@salesforce.com>
Committed: Tue Jun 23 15:33:17 2015 -0700

----------------------------------------------------------------------
 .../apache/phoenix/end2end/AlterTableIT.java    | 16 +++---
 .../org/apache/phoenix/end2end/UpgradeIT.java   | 12 ++---
 .../coprocessor/MetaDataEndpointImpl.java       | 51 ++++++++++++--------
 .../apache/phoenix/query/QueryConstants.java    |  2 +-
 .../org/apache/phoenix/util/UpgradeUtil.java    |  6 +--
 5 files changed, 50 insertions(+), 37 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/97f0d626/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
index 1302c60..3a9517d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/AlterTableIT.java
@@ -2127,8 +2127,8 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
     }
     
     @Test
-    public void testAlteringViewThatHasChildViewsNotAllowed() throws Exception {
-        String baseTable = "testAlteringViewThatHasChildViewsNotAllowed";
+    public void testAlteringViewThatHasChildViews() throws Exception {
+        String baseTable = "testAlteringViewThatHasChildViews";
         String childView = "childView";
         String grandChildView = "grandChildView";
         try (Connection conn = DriverManager.getConnection(getUrl())) {
@@ -2165,13 +2165,17 @@ public class AlterTableIT extends BaseOwnClusterHBaseManagedTimeIT {
                 assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
             }
             
-            // Adding column to view that has child views should fail
+            // Adding column to view that has child views is allowed
             String addColumnToChildView = "ALTER VIEW " + childView + " ADD V5 VARCHAR";
+            conn.createStatement().execute(addColumnToChildView);
+            // V5 column should be visible now for childView
+            conn.createStatement().execute("SELECT V5 FROM " + childView);    
+            
+            // However, column V5 shouldn't have propagated to grandChildView. Not till PHOENIX-2054 is fixed.
             try {
-                conn.createStatement().execute(addColumnToChildView);
-                fail("Adding columns to a view that has child views on it is not allowed");
+                conn.createStatement().execute("SELECT V5 FROM " + grandChildView);
             } catch (SQLException e) {
-                assertEquals(SQLExceptionCode.CANNOT_MUTATE_TABLE.getErrorCode(), e.getErrorCode());
+                assertEquals(SQLExceptionCode.COLUMN_NOT_FOUND.getErrorCode(), e.getErrorCode());
             }
 
             // dropping column from the grand child view, however, should work.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/97f0d626/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
index 886e567..094816c 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/UpgradeIT.java
@@ -20,7 +20,7 @@ package org.apache.phoenix.end2end;
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
 import static org.apache.phoenix.util.UpgradeUtil.SELECT_BASE_COLUMN_COUNT_FROM_HEADER_ROW;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
@@ -187,14 +187,14 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
             for (int i = 1; i <=2 ; i++) {
                 String tenantId = "tenant" + i;
                 checkBaseColumnCount(tenantId, null, "TENANT_VIEW1", 4);
-                checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
-                checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+                checkBaseColumnCount(tenantId, null, "TENANT_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+                checkBaseColumnCount(tenantId, null, "TENANT_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
             }
             
             // Verify base column count for global views
             checkBaseColumnCount(null, null, "GLOBAL_VIEW1", 4);
-            checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVORCED_VIEW_BASE_COLUMN_COUNT);
-            checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVORCED_VIEW_BASE_COLUMN_COUNT);
+            checkBaseColumnCount(null, null, "GLOBAL_VIEW2", DIVERGED_VIEW_BASE_COLUMN_COUNT);
+            checkBaseColumnCount(null, null, "GLOBAL_VIEW3", DIVERGED_VIEW_BASE_COLUMN_COUNT);
         }
         
         
@@ -243,7 +243,7 @@ public class UpgradeIT extends BaseHBaseManagedTimeIT {
                     conn3.createStatement().execute(
                         "ALTER VIEW " + fullViewName + " DROP COLUMN CF2.V2");
                 }
-                expectedBaseColumnCount = DIVORCED_VIEW_BASE_COLUMN_COUNT;
+                expectedBaseColumnCount = DIVERGED_VIEW_BASE_COLUMN_COUNT;
             } else {
                 expectedBaseColumnCount = 6;
             }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/97f0d626/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
index af0c7de..e13c635 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataEndpointImpl.java
@@ -61,7 +61,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_CONSTANT_BYTE
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_INDEX_ID_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_STATEMENT_BYTES;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.VIEW_TYPE_BYTES;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
 import static org.apache.phoenix.query.QueryConstants.SEPARATOR_BYTE_ARRAY;
 import static org.apache.phoenix.schema.PTableType.INDEX;
 import static org.apache.phoenix.util.ByteUtil.EMPTY_BYTE_ARRAY;
@@ -1584,7 +1584,7 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
             // lock the rows corresponding to views so that no other thread can modify the view meta-data
             RowLock viewRowLock = acquireLock(region, viewKey, locks);
             PTable view = doGetTable(viewKey, clientTimeStamp, viewRowLock);
-            if (view.getBaseColumnCount() == QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT) {
+            if (view.getBaseColumnCount() == QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT) {
                 // if a view has divorced itself from the base table, we don't allow schema changes
                 // to be propagated to it.
                 return;
@@ -1790,21 +1790,29 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                     byte[] schemaName = rowKeyMetaData[SCHEMA_NAME_INDEX];
                     byte[] tableName = rowKeyMetaData[TABLE_NAME_INDEX];
                     PTableType type = table.getType();
-                    TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table,
-                            (type == PTableType.VIEW ? PARENT_TABLE_BYTES : PHYSICAL_TABLE_BYTES));
                     List<Mutation> mutationsForAddingColumnsToViews = Collections.emptyList();
-                    if (childViewsResult.hasViews()) {
-                        /*
-                         * Adding a column is not allowed if: 1) Meta-data for child view/s spans over more than one
-                         * region. 2) Adding column to a views that has child view/s.
-                         */
-                        if (!childViewsResult.allViewsInSingleRegion() || type == PTableType.VIEW) {
-                            return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
-                                    EnvironmentEdgeManager.currentTimeMillis(), null);
-                        } else {
-                            mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getResults().size() * tableMetaData.size());
-                            addRowsToChildViews(tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
-                                    childViewsResult, region, locks);
+                    /*
+                     * If adding a column to a view, we don't want to propagate those meta-data changes to the child
+                     * view hierarchy. This is because our check of finding child views is expensive and we want making
+                     * meta-data changes to views to be light-weight. The side-effect of this change is that a child
+                     * won't have it's parent views columns i.e. it would have diverged itself from the parent view. See
+                     * https://issues.apache.org/jira/browse/PHOENIX-2051 for a proper way to fix the performance issue
+                     * and https://issues.apache.org/jira/browse/PHOENIX-2054 for enabling meta-data changes to a view
+                     * to be propagated to its view hierarchy.
+                     */
+                    if (type == PTableType.TABLE) {
+                        TableViewFinderResult childViewsResult = findChildViews(region, tenantId, table, PHYSICAL_TABLE_BYTES);
+                        if (childViewsResult.hasViews()) {
+                            // Adding a column is not allowed if the meta-data for child view/s spans over
+                            // more than one region (since the changes cannot be done in a transactional fashion)
+                            if (!childViewsResult.allViewsInSingleRegion()) {
+                                return new MetaDataMutationResult(MutationCode.UNALLOWED_TABLE_MUTATION,
+                                        EnvironmentEdgeManager.currentTimeMillis(), null);
+                            } else {
+                                mutationsForAddingColumnsToViews = new ArrayList<>(childViewsResult.getResults().size() * tableMetaData.size());
+                                addRowsToChildViews(tableMetaData, mutationsForAddingColumnsToViews, schemaName, tableName, invalidateList, clientTimeStamp,
+                                        childViewsResult, region, locks);
+                            }
                         }
                     }
                     for (Mutation m : tableMetaData) {
@@ -2037,18 +2045,19 @@ public class MetaDataEndpointImpl extends MetaDataProtocol implements Coprocesso
                                         continue;
                                     }
                                     if (table.getType() == PTableType.VIEW) {
-                                        if (table.getBaseColumnCount() != DIVORCED_VIEW_BASE_COLUMN_COUNT
+                                        if (table.getBaseColumnCount() != DIVERGED_VIEW_BASE_COLUMN_COUNT
                                                 && columnToDelete.getPosition() < table.getBaseColumnCount()) {
                                             /*
                                              * If the column being dropped is inherited from the base table, then the
-                                             * view is about to divorce itself from the base table. Divorce here means
-                                             * that any further meta-data changes made to the base table will not be
-                                             * propagated to the hierarchy of views on the base table.
+                                             * view is about to diverge itself from the base table. The consequence of
+                                             * this divergence is that that any further meta-data changes made to the
+                                             * base table will not be propagated to the hierarchy of views where this 
+                                             * view is the root.
                                              */
                                             byte[] viewKey = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
                                             Put updateBaseColumnCountPut = new Put(viewKey);
                                             byte[] baseColumnCountPtr = new byte[PInteger.INSTANCE.getByteSize()];
-                                            PInteger.INSTANCE.getCodec().encodeInt(DIVORCED_VIEW_BASE_COLUMN_COUNT,
+                                            PInteger.INSTANCE.getCodec().encodeInt(DIVERGED_VIEW_BASE_COLUMN_COUNT,
                                                     baseColumnCountPtr, 0);
                                             updateBaseColumnCountPut.add(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                                                     PhoenixDatabaseMetaData.BASE_COLUMN_COUNT_BYTES, clientTimeStamp,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/97f0d626/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
index f82c594..d095049 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryConstants.java
@@ -185,7 +185,7 @@ public interface QueryConstants {
     public static final BigDecimal BD_MILLIS_IN_DAY = BigDecimal.valueOf(QueryConstants.MILLIS_IN_DAY);
     public static final int MAX_ALLOWED_NANOS = 999999999;
     public static final int NANOS_IN_SECOND = BigDecimal.valueOf(Math.pow(10, 9)).intValue();
-    public static final int DIVORCED_VIEW_BASE_COLUMN_COUNT = -100;
+    public static final int DIVERGED_VIEW_BASE_COLUMN_COUNT = -100;
     public static final int BASE_TABLE_BASE_COLUMN_COUNT = -1;
     public static final String CREATE_TABLE_METADATA =
             // Do not use IF NOT EXISTS as we sometimes catch the TableAlreadyExists

http://git-wip-us.apache.org/repos/asf/phoenix/blob/97f0d626/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
index dff6598..8d574ce 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/UpgradeUtil.java
@@ -32,7 +32,7 @@ import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
 import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
 import static org.apache.phoenix.query.QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT;
-import static org.apache.phoenix.query.QueryConstants.DIVORCED_VIEW_BASE_COLUMN_COUNT;
+import static org.apache.phoenix.query.QueryConstants.DIVERGED_VIEW_BASE_COLUMN_COUNT;
 
 import java.io.IOException;
 import java.sql.PreparedStatement;
@@ -595,7 +595,7 @@ public class UpgradeUtil {
                         // We are about to iterate through columns of a different view. Check whether base column count was upserted.
                         // If it wasn't then it is likely the case that a column inherited from the base table was dropped from view.
                         if (currentViewName != null && !baseColumnCountUpserted && numBaseTableColsMatched < numColsInBaseTable) {
-                            upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+                            upsertBaseColumnCountInHeaderRow(metaConnection, currentTenantId, currentViewSchema, currentViewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
                         }
                         // reset the values as we are now going to iterate over columns of a new view.
                         numBaseTableColsMatched = 0;
@@ -641,7 +641,7 @@ public class UpgradeUtil {
                             }
                         } else {
                             // special value to denote that the view has divorced itself from the base physical table.
-                            upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVORCED_VIEW_BASE_COLUMN_COUNT);
+                            upsertBaseColumnCountInHeaderRow(metaConnection, viewTenantId, viewSchema, viewName, DIVERGED_VIEW_BASE_COLUMN_COUNT);
                             baseColumnCountUpserted = true;
                             // ignore rest of the rows for the view.
                             ignore = true;