You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by td...@apache.org on 2017/03/09 23:02:54 UTC

[41/50] [abbrv] phoenix git commit: PHOENIX-3680 Do not issue delete markers when dropping a column from an immutable encoded table

PHOENIX-3680 Do not issue delete markers when dropping a column from an immutable encoded table


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/023f8631
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/023f8631
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/023f8631

Branch: refs/heads/omid
Commit: 023f8631fc1688eaecef475d47d9e926fe7d3014
Parents: 8f6d02f
Author: Thomas <td...@salesforce.com>
Authored: Wed Mar 1 11:05:21 2017 -0800
Committer: Thomas D'Silva <td...@tdsilva-ltm2.internal.salesforce.com>
Committed: Mon Mar 6 10:29:40 2017 -0800

----------------------------------------------------------------------
 .../phoenix/end2end/index/DropColumnIT.java     | 517 +++++++++++++++++++
 .../phoenix/end2end/index/DropMetadataIT.java   | 220 --------
 .../EndToEndCoveredColumnsIndexBuilderIT.java   |   2 +-
 .../hbase/index/covered/LocalTableState.java    |  14 +-
 .../phoenix/hbase/index/covered/TableState.java |   4 +-
 .../example/CoveredColumnIndexCodec.java        |   4 +-
 .../hbase/index/scanner/ScannerBuilder.java     |   9 +-
 .../hbase/index/util/IndexManagementUtil.java   |   2 +-
 .../apache/phoenix/index/IndexMaintainer.java   |  10 +-
 .../apache/phoenix/index/PhoenixIndexCodec.java |  25 +-
 .../index/PhoenixTransactionalIndexer.java      |   2 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  45 +-
 .../index/covered/TestLocalTableState.java      |  10 +-
 13 files changed, 587 insertions(+), 277 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
new file mode 100644
index 0000000..4f6c37e
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropColumnIT.java
@@ -0,0 +1,517 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Properties;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.expression.KeyValueColumnExpression;
+import org.apache.phoenix.expression.SingleCellColumnExpression;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.query.QueryConstants;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.schema.ColumnNotFoundException;
+import org.apache.phoenix.schema.PColumn;
+import org.apache.phoenix.schema.PName;
+import org.apache.phoenix.schema.PNameFactory;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableImpl;
+import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.TableNotFoundException;
+import org.apache.phoenix.schema.tuple.ResultTuple;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.PropertiesUtil;
+import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.StringUtil;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Test;
+import org.junit.internal.ArrayComparisonFailure;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.junit.runners.Parameterized.Parameters;
+
+@RunWith(Parameterized.class)
+public class DropColumnIT extends ParallelStatsDisabledIT {
+    
+    private static final String PRINCIPAL = "dropColumn";
+    public static final String SCHEMA_NAME = "";
+    private final String TENANT_ID = "tenant1";
+    private String tableDDLOptions;
+    private boolean columnEncoded;
+    private boolean mutable;
+    
+    private Connection getConnection() throws Exception {
+        return getConnection(PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
+    }
+    
+    private Connection getConnection(Properties props) throws Exception {
+        props.setProperty(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
+        // Force real driver to be used as the test one doesn't handle creating
+        // more than one ConnectionQueryService
+        props.setProperty(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, StringUtil.EMPTY_STRING);
+        // Create new ConnectionQueryServices so that we can set DROP_METADATA_ATTRIB
+        String url = QueryUtil.getConnectionUrl(props, config, PRINCIPAL);
+        return DriverManager.getConnection(url, props);
+    }
+    
+    public DropColumnIT(boolean mutable, boolean columnEncoded) {
+        StringBuilder optionBuilder = new StringBuilder();
+        if (!columnEncoded) {
+            optionBuilder.append("COLUMN_ENCODED_BYTES=0");
+        }
+        if (!mutable) {
+            if (optionBuilder.length()>0)
+                optionBuilder.append(",");
+            optionBuilder.append("IMMUTABLE_ROWS=true");
+            if (!columnEncoded) {
+                optionBuilder.append(",IMMUTABLE_STORAGE_SCHEME="+PTableImpl.ImmutableStorageScheme.ONE_CELL_PER_COLUMN);
+            }
+        }
+        this.mutable = mutable;
+        this.columnEncoded = columnEncoded;
+        this.tableDDLOptions = optionBuilder.toString();
+    }
+    
+    @Parameters(name="DropColumnIT_mutable={0}, columnEncoded={1}") // name is used by failsafe as file name in reports
+    public static Collection<Boolean[]> data() {
+        return Arrays.asList(new Boolean[][] {
+                { false, false }, { false, true }, { true, false }, { true, true }, 
+           });
+    }
+    
+    @Test
+    public void testDropCol() throws Exception {
+        String indexTableName = generateUniqueName();
+        String dataTableName = generateUniqueName();
+        String localIndexTableName = "LOCAL_" + indexTableName;
+        try (Connection conn = getConnection()) {
+            conn.setAutoCommit(false);
+            conn.createStatement().execute(
+                "CREATE TABLE " + dataTableName
+                        + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR) " + tableDDLOptions);
+            // create one global and one local index
+            conn.createStatement().execute(
+                "CREATE INDEX " + indexTableName + " ON " + dataTableName + " (v1) INCLUDE (v2, v3)");
+            conn.createStatement().execute(
+                "CREATE LOCAL INDEX " + localIndexTableName + " ON " + dataTableName + " (v1) INCLUDE (v2, v3)");
+            
+            // upsert a single row
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableName + " VALUES(?,?,?,?)");
+            stmt.setString(1, "a");
+            stmt.setString(2, "x");
+            stmt.setString(3, "1");
+            stmt.setString(4, "2");
+            stmt.execute();
+            conn.commit();
+            
+            // verify v2 exists in the data table
+            PTable dataTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
+            PColumn dataColumn = dataTable.getColumnForColumnName("V2");
+            byte[] dataCq = dataColumn.getColumnQualifierBytes();
+            
+            // verify v2 exists in the global index table
+            PTable globalIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
+            PColumn glovalIndexCol = globalIndexTable.getColumnForColumnName("0:V2");
+            byte[] globalIndexCq = glovalIndexCol.getColumnQualifierBytes();
+            
+            // verify v2 exists in the global index table
+            PTable localIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, localIndexTableName));
+            PColumn localIndexCol = localIndexTable.getColumnForColumnName("0:V2");
+            byte[] localIndexCq = localIndexCol.getColumnQualifierBytes();
+            
+            verifyColValue(indexTableName, dataTableName, conn, dataTable, dataColumn, dataCq,
+                    globalIndexTable, glovalIndexCol, globalIndexCq, localIndexTable,
+                    localIndexCol, localIndexCq);
+            
+            // drop v2 column
+            conn.createStatement().execute("ALTER TABLE " + dataTableName + " DROP COLUMN v2 ");
+            conn.createStatement().execute("SELECT * FROM " + dataTableName);
+
+            // verify that the column was dropped from the data table
+            dataTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, dataTableName));
+            try {
+                dataTable.getColumnForColumnName("V2");
+                fail("Column V2 should have been dropped from data table");
+            }
+            catch (ColumnNotFoundException e){
+            }
+            
+            // verify that the column was dropped from the global index table
+            globalIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
+            try {
+                globalIndexTable.getColumnForColumnName("V2");
+                fail("Column V2 should have been dropped from global index table");
+            }
+            catch (ColumnNotFoundException e){
+            }
+            
+            // verify that the column was dropped from the local index table
+            localIndexTable = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, indexTableName));
+            try {
+                localIndexTable.getColumnForColumnName("V2");
+                fail("Column V2 should have been dropped from global index table");
+            }
+            catch (ColumnNotFoundException e){
+            }
+            
+            if (mutable || !columnEncoded) {
+                byte[] key = Bytes.toBytes("a");
+                Scan scan = new Scan();
+                scan.setRaw(true);
+                scan.setStartRow(key);
+                scan.setStopRow(key);
+                HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
+                ResultScanner results = table.getScanner(scan);
+                Result result = results.next();
+                assertNotNull(result);
+                
+                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq).get(0).getTypeByte());
+                assertNull(results.next());
+                
+                // key value for v2 should have been deleted from the global index table
+                scan = new Scan();
+                scan.setRaw(true);
+                table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(indexTableName.getBytes());
+                results = table.getScanner(scan);
+                result = results.next();
+                assertNotNull(result);
+                assertEquals("data table column value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq).get(0).getTypeByte());
+                assertNull(results.next());
+                
+                // key value for v2 should have been deleted from the local index table
+                scan = new Scan();
+                scan.setRaw(true);
+                scan.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
+                table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
+                results = table.getScanner(scan);
+                result = results.next();
+                assertNotNull(result);
+                assertEquals("data table col"
+                        + "umn value should have been deleted", KeyValue.Type.DeleteColumn.getCode(), result.getColumn(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq).get(0).getTypeByte());
+                assertNull(results.next()); 
+            }
+            else {
+                // verify we don't issue deletes when we drop a column from an immutable encoded table
+                verifyColValue(indexTableName, dataTableName, conn, dataTable, dataColumn, dataCq,
+                    globalIndexTable, glovalIndexCol, globalIndexCq, localIndexTable,
+                    localIndexCol, localIndexCq);
+            }
+        }
+    }
+
+    private void verifyColValue(String indexTableName, String dataTableName, Connection conn,
+            PTable dataTable, PColumn dataColumn, byte[] dataCq, PTable globalIndexTable,
+            PColumn glovalIndexCol, byte[] globalIndexCq, PTable localIndexTable,
+            PColumn localIndexCol, byte[] localIndexCq)
+            throws SQLException, IOException, ArrayComparisonFailure {
+        // key value for v2 should exist in the data table
+        Scan scan = new Scan();
+        scan.setRaw(true);
+        byte[] key = Bytes.toBytes("a");
+        scan.setStartRow(key);
+        scan.setStopRow(key);
+        HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
+        ResultScanner results = table.getScanner(scan);
+        Result result = results.next();
+        assertNotNull(result);
+        byte[] colValue;
+        if (!mutable && columnEncoded) {
+            KeyValueColumnExpression colExpression = new SingleCellColumnExpression(dataColumn, "V2", dataTable.getEncodingScheme());
+            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+            colExpression.evaluate(new ResultTuple(result), ptr);
+            colValue = ptr.copyBytesIfNecessary();
+        }
+        else {
+            colValue = result.getValue(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dataCq);
+        }
+        assertArrayEquals("wrong column value for v2", Bytes.toBytes("1"), colValue);
+        assertNull(results.next());
+        
+        // key value for v2 should exist in the global index table
+        scan = new Scan();
+        scan.setRaw(true);
+        table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(indexTableName.getBytes());
+        results = table.getScanner(scan);
+        result = results.next();
+        assertNotNull(result);
+        if (!mutable && columnEncoded) {
+            KeyValueColumnExpression colExpression = new SingleCellColumnExpression(glovalIndexCol, "0:V2", globalIndexTable.getEncodingScheme());
+            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+            colExpression.evaluate(new ResultTuple(result), ptr);
+            colValue = ptr.copyBytesIfNecessary();
+        }
+        else {
+            colValue = result.getValue(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, globalIndexCq);
+        }
+        assertArrayEquals("wrong column value for v2", Bytes.toBytes("1"), colValue);
+        assertNull(results.next());
+        
+        // key value for v2 should exist in the local index table
+        scan = new Scan();
+        scan.setRaw(true);
+        scan.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
+        table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(dataTableName.getBytes());
+        results = table.getScanner(scan);
+        result = results.next();
+        assertNotNull(result);
+        if (!mutable && columnEncoded) {
+            KeyValueColumnExpression colExpression = new SingleCellColumnExpression(localIndexCol, "0:V2", localIndexTable.getEncodingScheme());
+            ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+            colExpression.evaluate(new ResultTuple(result), ptr);
+            colValue = ptr.copyBytesIfNecessary();
+        }
+        else {
+            colValue = result.getValue(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCq);
+        }
+        assertArrayEquals("wrong column value for v2", Bytes.toBytes("1"), colValue);
+        assertNull(results.next());
+    }
+    
+    @Test
+    public void testDroppingIndexedColDropsIndex() throws Exception {
+        String indexTableName = generateUniqueName();
+        String dataTableFullName = SchemaUtil.getTableName(SCHEMA_NAME, generateUniqueName());
+        String localIndexTableName1 = "LOCAL_" + indexTableName + "_1";
+        String localIndexTableName2 = "LOCAL_" + indexTableName + "_2";
+        try (Connection conn = getConnection()) {
+            conn.setAutoCommit(false);
+            conn.createStatement().execute(
+                "CREATE TABLE " + dataTableFullName
+                        + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR) " + tableDDLOptions);
+            // create one regular and two local indexes
+            conn.createStatement().execute(
+                "CREATE INDEX " + indexTableName + " ON " + dataTableFullName + " (v2) INCLUDE (v1)");
+            conn.createStatement().execute(
+                "CREATE LOCAL INDEX " + localIndexTableName1 + " ON " + dataTableFullName + " (v2) INCLUDE (v1)");
+            conn.createStatement().execute(
+                "CREATE LOCAL INDEX " + localIndexTableName2 + " ON " + dataTableFullName + " (k) INCLUDE (v1)");
+            
+            // upsert a single row
+            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)");
+            stmt.setString(1, "a");
+            stmt.setString(2, "x");
+            stmt.setString(3, "1");
+            stmt.execute();
+            conn.commit();
+            
+            // verify the indexes were created
+            PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
+            PTable dataTable = pconn.getTable(new PTableKey(null, dataTableFullName));
+            assertEquals("Unexpected number of indexes ", 3, dataTable.getIndexes().size());
+            PTable indexTable = dataTable.getIndexes().get(0);
+            byte[] indexTablePhysicalName = indexTable.getPhysicalName().getBytes();
+            PName localIndexTablePhysicalName = dataTable.getIndexes().get(1).getPhysicalName();
+            
+            // drop v2 which causes the regular index and first local index to be dropped
+            conn.createStatement().execute(
+                "ALTER TABLE " + dataTableFullName + " DROP COLUMN v2 ");
+
+            // verify the both of the indexes' metadata were dropped
+            conn.createStatement().execute("SELECT * FROM "+dataTableFullName);
+            try {
+                conn.createStatement().execute("SELECT * FROM "+indexTableName);
+                fail("Index should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            pconn = conn.unwrap(PhoenixConnection.class);
+            dataTable = pconn.getTable(new PTableKey(null, dataTableFullName));
+            try {
+                pconn.getTable(new PTableKey(null, indexTableName));
+                fail("index should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            try {
+                pconn.getTable(new PTableKey(null, localIndexTableName1));
+                fail("index should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            assertEquals("Unexpected number of indexes ", 1, dataTable.getIndexes().size());
+            
+            // verify that the regular index physical table was dropped
+            try {
+                conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(indexTablePhysicalName);
+                fail("Index table should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            
+            // verify that the local index physical table was *not* dropped
+            conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(localIndexTablePhysicalName.getBytes());
+            PTable localIndex2 = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, localIndexTableName2));
+            
+            // there should be a single row belonging to localIndexTableName2 
+            Scan scan = new Scan();
+            scan.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
+            HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(localIndexTablePhysicalName.getBytes());
+            ResultScanner results = table.getScanner(scan);
+            Result result = results.next();
+            assertNotNull(result);
+            String indexColumnName = IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, "V1");
+            PColumn localIndexCol = localIndex2.getColumnForColumnName(indexColumnName);
+            byte[] colValue;
+            if (!mutable && columnEncoded) {
+                KeyValueColumnExpression colExpression = new SingleCellColumnExpression(localIndexCol, indexColumnName, localIndex2.getEncodingScheme());
+                ImmutableBytesPtr ptr = new ImmutableBytesPtr();
+                colExpression.evaluate(new ResultTuple(result), ptr);
+                colValue = ptr.copyBytesIfNecessary();
+            }
+            else {
+                colValue = result.getValue(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, localIndexCol.getColumnQualifierBytes());
+            }
+            assertNotNull("localIndexTableName2 row is missing", colValue);
+            assertNull(results.next());
+        }
+    }
+    
+    @Test
+    public void testDroppingIndexedColDropsViewIndex() throws Exception {
+        helpTestDroppingIndexedColDropsViewIndex(false);
+    }
+    
+    @Test
+    public void testDroppingIndexedColDropsMultiTenantViewIndex() throws Exception {
+        helpTestDroppingIndexedColDropsViewIndex(true);
+    }
+    
+    public void helpTestDroppingIndexedColDropsViewIndex(boolean isMultiTenant) throws Exception {
+        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
+        props.setProperty(TENANT_ID_ATTRIB, TENANT_ID);
+        try (Connection conn = getConnection();
+                Connection viewConn = isMultiTenant ? getConnection(props) : conn ) {
+            String tableWithView = generateUniqueName();
+            String viewOfTable = generateUniqueName();
+            String viewIndex1 = generateUniqueName();
+            String viewIndex2 = generateUniqueName();
+            
+            conn.setAutoCommit(false);
+            viewConn.setAutoCommit(false);
+            String ddlFormat = "CREATE TABLE " + tableWithView + " (%s k VARCHAR NOT NULL, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR, v4 VARCHAR CONSTRAINT PK PRIMARY KEY(%s k))%s";
+            String ddl = String.format(ddlFormat, isMultiTenant ? "TENANT_ID VARCHAR NOT NULL, " : "",
+                    isMultiTenant ? "TENANT_ID, " : "", isMultiTenant ? "MULTI_TENANT=true" : "");
+            conn.createStatement().execute(ddl);
+            viewConn.createStatement()
+                    .execute(
+                        "CREATE VIEW " + viewOfTable + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + tableWithView );
+            // create an index with the column that will be dropped
+            viewConn.createStatement().execute("CREATE INDEX " + viewIndex1 + " ON " + viewOfTable + "(v2) INCLUDE (v4)");
+            // create an index without the column that will be dropped
+            viewConn.createStatement().execute("CREATE INDEX " + viewIndex2 + " ON " + viewOfTable + "(v1) INCLUDE (v4)");
+            // verify index was created
+            try {
+                viewConn.createStatement().execute("SELECT * FROM " + viewIndex1 );
+            } catch (TableNotFoundException e) {
+                fail("Index on view was not created");
+            }
+            
+            // upsert a single row
+            PreparedStatement stmt = viewConn.prepareStatement("UPSERT INTO " + viewOfTable + " VALUES(?,?,?,?,?,?,?)");
+            stmt.setString(1, "a");
+            stmt.setString(2, "b");
+            stmt.setString(3, "c");
+            stmt.setString(4, "d");
+            stmt.setString(5, "e");
+            stmt.setInt(6, 1);
+            stmt.setString(7, "g");
+            stmt.execute();
+            viewConn.commit();
+
+            // verify the index was created
+            PhoenixConnection pconn = viewConn.unwrap(PhoenixConnection.class);
+            PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null; 
+            PTable view = pconn.getTable(new PTableKey(tenantId,  viewOfTable ));
+            PTable viewIndex = pconn.getTable(new PTableKey(tenantId,  viewIndex1 ));
+            byte[] viewIndexPhysicalTable = viewIndex.getPhysicalName().getBytes();
+            assertNotNull("Can't find view index", viewIndex);
+            assertEquals("Unexpected number of indexes ", 2, view.getIndexes().size());
+            assertEquals("Unexpected index ",  viewIndex1 , view.getIndexes().get(0).getName()
+                    .getString());
+            assertEquals("Unexpected index ",  viewIndex2 , view.getIndexes().get(1).getName()
+                .getString());
+            
+            // drop two columns
+            conn.createStatement().execute("ALTER TABLE " + tableWithView + " DROP COLUMN v2, v3 ");
+            
+            // verify columns were dropped
+            try {
+                conn.createStatement().execute("SELECT v2 FROM " + tableWithView );
+                fail("Column should have been dropped");
+            } catch (ColumnNotFoundException e) {
+            }
+            try {
+                conn.createStatement().execute("SELECT v3 FROM " + tableWithView );
+                fail("Column should have been dropped");
+            } catch (ColumnNotFoundException e) {
+            }
+            
+            // verify index metadata was dropped
+            try {
+                viewConn.createStatement().execute("SELECT * FROM " + viewIndex1 );
+                fail("Index metadata should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            
+            pconn = viewConn.unwrap(PhoenixConnection.class);
+            view = pconn.getTable(new PTableKey(tenantId,  viewOfTable ));
+            try {
+                viewIndex = pconn.getTable(new PTableKey(tenantId,  viewIndex1 ));
+                fail("View index should have been dropped");
+            } catch (TableNotFoundException e) {
+            }
+            assertEquals("Unexpected number of indexes ", 1, view.getIndexes().size());
+            assertEquals("Unexpected index ",  viewIndex2 , view.getIndexes().get(0).getName().getString());
+            
+            // verify that the physical index view table is *not* dropped
+            conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(viewIndexPhysicalTable);
+            
+            // scan the physical table and verify there is a single row for the second local index
+            Scan scan = new Scan();
+            HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(viewIndexPhysicalTable);
+            ResultScanner results = table.getScanner(scan);
+            Result result = results.next();
+            assertNotNull(result);
+            PTable viewIndexPTable = pconn.getTable(new PTableKey(pconn.getTenantId(), viewIndex2));
+            PColumn column = viewIndexPTable.getColumnForColumnName(IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, "V4"));
+            byte[] cq = column.getColumnQualifierBytes();
+            // there should be a single row belonging to VIEWINDEX2 
+            assertNotNull(viewIndex2 + " row is missing", result.getValue(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, cq));
+            assertNull(results.next());
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
index 3d0ba8a..b92ed8d 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/DropMetadataIT.java
@@ -17,40 +17,20 @@
  */
 package org.apache.phoenix.end2end.index;
 
-import static org.apache.phoenix.util.PhoenixRuntime.TENANT_ID_ATTRIB;
 import static org.apache.phoenix.util.TestUtil.HBASE_NATIVE_SCHEMA_NAME;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
 
 import java.sql.Connection;
 import java.sql.DriverManager;
-import java.sql.PreparedStatement;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
-import org.apache.hadoop.hbase.client.HTable;
-import org.apache.hadoop.hbase.client.Result;
-import org.apache.hadoop.hbase.client.ResultScanner;
-import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.schema.ColumnNotFoundException;
-import org.apache.phoenix.schema.PColumn;
-import org.apache.phoenix.schema.PName;
-import org.apache.phoenix.schema.PNameFactory;
-import org.apache.phoenix.schema.PTable;
-import org.apache.phoenix.schema.PTableKey;
-import org.apache.phoenix.schema.TableNotFoundException;
-import org.apache.phoenix.util.EncodedColumnsUtil;
-import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
 import org.apache.phoenix.util.SchemaUtil;
@@ -62,7 +42,6 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
     private static final String PRINCIPAL = "dropMetaData";
     private static final byte[] FAMILY_NAME = Bytes.toBytes(SchemaUtil.normalizeIdentifier("1"));
     public static final String SCHEMA_NAME = "";
-    private final String TENANT_ID = "tenant1";
 
     private Connection getConnection() throws Exception {
         return getConnection(PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES));
@@ -107,204 +86,5 @@ public class DropMetadataIT extends ParallelStatsDisabledIT {
         conn.createStatement().execute("drop view " + hbaseNativeViewName);
         conn.close();
     }
-    
-    @Test
-    public void testDroppingIndexedColDropsIndex() throws Exception {
-        String indexTableName = generateUniqueName();
-        String dataTableFullName = SchemaUtil.getTableName(SCHEMA_NAME, generateUniqueName());
-        String localIndexTableName1 = "LOCAL_" + indexTableName + "_1";
-        String localIndexTableName2 = "LOCAL_" + indexTableName + "_2";
-        try (Connection conn = getConnection()) {
-            conn.setAutoCommit(false);
-            conn.createStatement().execute(
-                "CREATE TABLE " + dataTableFullName
-                        + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)");
-            // create one regular and two local indexes
-            conn.createStatement().execute(
-                "CREATE INDEX " + indexTableName + " ON " + dataTableFullName + " (v2) INCLUDE (v1)");
-            conn.createStatement().execute(
-                "CREATE LOCAL INDEX " + localIndexTableName1 + " ON " + dataTableFullName + " (v2) INCLUDE (v1)");
-            conn.createStatement().execute(
-                "CREATE LOCAL INDEX " + localIndexTableName2 + " ON " + dataTableFullName + " (k) INCLUDE (v1)");
-            
-            // upsert a single row
-            PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + dataTableFullName + " VALUES(?,?,?)");
-            stmt.setString(1, "a");
-            stmt.setString(2, "x");
-            stmt.setString(3, "1");
-            stmt.execute();
-            conn.commit();
-            
-            // verify the indexes were created
-            PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
-            PTable dataTable = pconn.getTable(new PTableKey(null, dataTableFullName));
-            assertEquals("Unexpected number of indexes ", 3, dataTable.getIndexes().size());
-            PTable indexTable = dataTable.getIndexes().get(0);
-            byte[] indexTablePhysicalName = indexTable.getPhysicalName().getBytes();
-            PName localIndexTablePhysicalName = dataTable.getIndexes().get(1).getPhysicalName();
-            
-            // drop v2 which causes the regular index and first local index to be dropped
-            conn.createStatement().execute(
-                "ALTER TABLE " + dataTableFullName + " DROP COLUMN v2 ");
-
-            // verify the both of the indexes' metadata were dropped
-            conn.createStatement().execute("SELECT * FROM "+dataTableFullName);
-            try {
-                conn.createStatement().execute("SELECT * FROM "+indexTableName);
-                fail("Index should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            pconn = conn.unwrap(PhoenixConnection.class);
-            dataTable = pconn.getTable(new PTableKey(null, dataTableFullName));
-            try {
-                pconn.getTable(new PTableKey(null, indexTableName));
-                fail("index should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            try {
-                pconn.getTable(new PTableKey(null, localIndexTableName1));
-                fail("index should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            assertEquals("Unexpected number of indexes ", 1, dataTable.getIndexes().size());
-            
-            // verify that the regular index physical table was dropped
-            try {
-                conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(indexTablePhysicalName);
-                fail("Index table should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            
-            // verify that the local index physical table was *not* dropped
-            conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(localIndexTablePhysicalName.getBytes());
-            PTable localIndex2 = conn.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, localIndexTableName2));
-            
-            // there should be a single row belonging to localIndexTableName2 
-            Scan scan = new Scan();
-            scan.addFamily(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES);
-            HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(localIndexTablePhysicalName.getBytes());
-            ResultScanner results = table.getScanner(scan);
-            Result result = results.next();
-            assertNotNull(result);
-            assertNotNull("localIndexTableName2 row is missing", result.getValue(QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES, 
-                localIndex2.getColumnForColumnName(IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, "V1")).getColumnQualifierBytes()));
-            assertNull(results.next());
-        }
-    }
-    
-    @Test
-    public void testDroppingIndexedColDropsViewIndex() throws Exception {
-        helpTestDroppingIndexedColDropsViewIndex(false);
-    }
-    
-    @Test
-    public void testDroppingIndexedColDropsMultiTenantViewIndex() throws Exception {
-        helpTestDroppingIndexedColDropsViewIndex(true);
-    }
-    
-    public void helpTestDroppingIndexedColDropsViewIndex(boolean isMultiTenant) throws Exception {
-        Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
-        props.setProperty(TENANT_ID_ATTRIB, TENANT_ID);
-        try (Connection conn = getConnection();
-                Connection viewConn = isMultiTenant ? getConnection(props) : conn ) {
-            String tableWithView = generateUniqueName();
-            String viewOfTable = generateUniqueName();
-            String viewIndex1 = generateUniqueName();
-            String viewIndex2 = generateUniqueName();
-            
-            conn.setAutoCommit(false);
-            viewConn.setAutoCommit(false);
-            String ddlFormat = "CREATE TABLE " + tableWithView + " (%s k VARCHAR NOT NULL, v1 VARCHAR, v2 VARCHAR, v3 VARCHAR, v4 VARCHAR CONSTRAINT PK PRIMARY KEY(%s k))%s";
-            String ddl = String.format(ddlFormat, isMultiTenant ? "TENANT_ID VARCHAR NOT NULL, " : "",
-                    isMultiTenant ? "TENANT_ID, " : "", isMultiTenant ? "MULTI_TENANT=true" : "");
-            conn.createStatement().execute(ddl);
-            viewConn.createStatement()
-                    .execute(
-                        "CREATE VIEW " + viewOfTable + " ( VIEW_COL1 DECIMAL(10,2), VIEW_COL2 VARCHAR ) AS SELECT * FROM " + tableWithView );
-            // create an index with the column that will be dropped
-            viewConn.createStatement().execute("CREATE INDEX " + viewIndex1 + " ON " + viewOfTable + "(v2) INCLUDE (v4)");
-            // create an index without the column that will be dropped
-            viewConn.createStatement().execute("CREATE INDEX " + viewIndex2 + " ON " + viewOfTable + "(v1) INCLUDE (v4)");
-            // verify index was created
-            try {
-                viewConn.createStatement().execute("SELECT * FROM " + viewIndex1 );
-            } catch (TableNotFoundException e) {
-                fail("Index on view was not created");
-            }
-            
-            // upsert a single row
-            PreparedStatement stmt = viewConn.prepareStatement("UPSERT INTO " + viewOfTable + " VALUES(?,?,?,?,?,?,?)");
-            stmt.setString(1, "a");
-            stmt.setString(2, "b");
-            stmt.setString(3, "c");
-            stmt.setString(4, "d");
-            stmt.setString(5, "e");
-            stmt.setInt(6, 1);
-            stmt.setString(7, "g");
-            stmt.execute();
-            viewConn.commit();
-
-            // verify the index was created
-            PhoenixConnection pconn = viewConn.unwrap(PhoenixConnection.class);
-            PName tenantId = isMultiTenant ? PNameFactory.newName("tenant1") : null; 
-            PTable view = pconn.getTable(new PTableKey(tenantId,  viewOfTable ));
-            PTable viewIndex = pconn.getTable(new PTableKey(tenantId,  viewIndex1 ));
-            byte[] viewIndexPhysicalTable = viewIndex.getPhysicalName().getBytes();
-            assertNotNull("Can't find view index", viewIndex);
-            assertEquals("Unexpected number of indexes ", 2, view.getIndexes().size());
-            assertEquals("Unexpected index ",  viewIndex1 , view.getIndexes().get(0).getName()
-                    .getString());
-            assertEquals("Unexpected index ",  viewIndex2 , view.getIndexes().get(1).getName()
-                .getString());
-            
-            // drop two columns
-            conn.createStatement().execute("ALTER TABLE " + tableWithView + " DROP COLUMN v2, v3 ");
-            
-            // verify columns were dropped
-            try {
-                conn.createStatement().execute("SELECT v2 FROM " + tableWithView );
-                fail("Column should have been dropped");
-            } catch (ColumnNotFoundException e) {
-            }
-            try {
-                conn.createStatement().execute("SELECT v3 FROM " + tableWithView );
-                fail("Column should have been dropped");
-            } catch (ColumnNotFoundException e) {
-            }
-            
-            // verify index metadata was dropped
-            try {
-                viewConn.createStatement().execute("SELECT * FROM " + viewIndex1 );
-                fail("Index metadata should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            
-            pconn = viewConn.unwrap(PhoenixConnection.class);
-            view = pconn.getTable(new PTableKey(tenantId,  viewOfTable ));
-            try {
-                viewIndex = pconn.getTable(new PTableKey(tenantId,  viewIndex1 ));
-                fail("View index should have been dropped");
-            } catch (TableNotFoundException e) {
-            }
-            assertEquals("Unexpected number of indexes ", 1, view.getIndexes().size());
-            assertEquals("Unexpected index ",  viewIndex2 , view.getIndexes().get(0).getName().getString());
-            
-            // verify that the physical index view table is *not* dropped
-            conn.unwrap(PhoenixConnection.class).getQueryServices().getTableDescriptor(viewIndexPhysicalTable);
-            
-            // scan the physical table and verify there is a single row for the second local index
-            Scan scan = new Scan();
-            HTable table = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(viewIndexPhysicalTable);
-            ResultScanner results = table.getScanner(scan);
-            Result result = results.next();
-            assertNotNull(result);
-            PTable viewIndexPTable = pconn.getTable(new PTableKey(pconn.getTenantId(), viewIndex2));
-            PColumn column = viewIndexPTable.getColumnForColumnName(IndexUtil.getIndexColumnName(QueryConstants.DEFAULT_COLUMN_FAMILY, "V4"));
-            byte[] cq = column.getColumnQualifierBytes();
-            // there should be a single row belonging to VIEWINDEX2 
-            assertNotNull(viewIndex2 + " row is missing", result.getValue(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, cq));
-            assertNull(results.next());
-        }
-    }
 }
         

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
index fe2f1b4..00157b1 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/hbase/index/covered/EndToEndCoveredColumnsIndexBuilderIT.java
@@ -148,7 +148,7 @@ public class EndToEndCoveredColumnsIndexBuilderIT {
     public void verify(TableState state) {
       try {
         Scanner kvs =
-            ((LocalTableState) state).getIndexedColumnsTableState(Arrays.asList(columns), false).getFirst();
+            ((LocalTableState) state).getIndexedColumnsTableState(Arrays.asList(columns), false, false).getFirst();
 
         int count = 0;
         Cell kv;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
index 3a7a7eb..59e7801 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/LocalTableState.java
@@ -18,13 +18,9 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
-import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
-import org.apache.hadoop.hbase.regionserver.KeyValueScanner;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.hbase.index.ValueGetter;
 import org.apache.phoenix.hbase.index.covered.data.IndexMemStore;
@@ -147,7 +143,7 @@ public class LocalTableState implements TableState {
      * @throws IOException
      */
     public Pair<Scanner, IndexUpdate> getIndexedColumnsTableState(
-        Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations) throws IOException {
+        Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound) throws IOException {
         ensureLocalStateInitialized(indexedColumns, ignoreNewerMutations);
         // filter out things with a newer timestamp and track the column references to which it applies
         ColumnTracker tracker = new ColumnTracker(indexedColumns);
@@ -158,7 +154,7 @@ public class LocalTableState implements TableState {
             }
         }
 
-        Scanner scanner = this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts);
+        Scanner scanner = this.scannerBuilder.buildIndexedColumnScanner(indexedColumns, tracker, ts, returnNullScannerIfRowNotFound);
 
         return new Pair<Scanner, IndexUpdate>(scanner, new IndexUpdate(tracker));
     }
@@ -222,7 +218,7 @@ public class LocalTableState implements TableState {
         this.kvs.clear();
         this.kvs.addAll(update);
     }
-
+    
     /**
      * Apply the {@link KeyValue}s set in {@link #setPendingUpdates(Collection)}.
      */
@@ -242,9 +238,9 @@ public class LocalTableState implements TableState {
     }
 
     @Override
-    public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations)
+    public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound)
             throws IOException {
-        Pair<Scanner, IndexUpdate> pair = getIndexedColumnsTableState(indexedColumns, ignoreNewerMutations);
+        Pair<Scanner, IndexUpdate> pair = getIndexedColumnsTableState(indexedColumns, ignoreNewerMutations, returnNullScannerIfRowNotFound);
         ValueGetter valueGetter = IndexManagementUtil.createGetterFromScanner(pair.getFirst(), getCurrentRowKey());
         return new Pair<ValueGetter, IndexUpdate>(valueGetter, pair.getSecond());
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
index bd4bdfb..aa3c39d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/TableState.java
@@ -58,12 +58,12 @@ public interface TableState {
   /**
    * Get a getter interface for the state of the index row
    * @param indexedColumns list of indexed columns.
-   * @param ignoreNewerMutations ignore mutations newer than m when determining current state. Useful
+ * @param ignoreNewerMutations ignore mutations newer than m when determining current state. Useful
    *        when replaying mutation state for partial index rebuild where writes succeeded to the data
    *        table, but not to the index table.
    */
   Pair<ValueGetter, IndexUpdate> getIndexUpdateState(
-      Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations) throws IOException;
+      Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound) throws IOException;
 
   /**
    * @return the row key for the current row for which we are building an index update.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
index 0f960e4..6f8d1be 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/covered/example/CoveredColumnIndexCodec.java
@@ -77,7 +77,7 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
     private IndexUpdate getIndexUpdateForGroup(ColumnGroup group, TableState state) {
         List<CoveredColumn> refs = group.getColumns();
         try {
-            Pair<Scanner, IndexUpdate> stateInfo = ((LocalTableState)state).getIndexedColumnsTableState(refs, false);
+            Pair<Scanner, IndexUpdate> stateInfo = ((LocalTableState)state).getIndexedColumnsTableState(refs, false, false);
             Scanner kvs = stateInfo.getFirst();
             Pair<Integer, List<ColumnEntry>> columns = getNextEntries(refs, kvs, state.getCurrentRowKey());
             // make sure we close the scanner
@@ -132,7 +132,7 @@ public class CoveredColumnIndexCodec extends BaseIndexCodec {
     private IndexUpdate getDeleteForGroup(ColumnGroup group, TableState state) {
         List<CoveredColumn> refs = group.getColumns();
         try {
-            Pair<Scanner, IndexUpdate> kvs = ((LocalTableState)state).getIndexedColumnsTableState(refs, false);
+            Pair<Scanner, IndexUpdate> kvs = ((LocalTableState)state).getIndexedColumnsTableState(refs, false, false);
             Pair<Integer, List<ColumnEntry>> columns = getNextEntries(refs, kvs.getFirst(), state.getCurrentRowKey());
             // make sure we close the scanner reference
             kvs.getFirst().close();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
index f8d0cf1..000ea1f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/scanner/ScannerBuilder.java
@@ -25,7 +25,6 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.KeyValueUtil;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.filter.BinaryComparator;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
@@ -57,7 +56,7 @@ public class ScannerBuilder {
     this.update = update;
   }
 
-  public Scanner buildIndexedColumnScanner(Collection<? extends ColumnReference> indexedColumns, ColumnTracker tracker, long ts) {
+  public Scanner buildIndexedColumnScanner(Collection<? extends ColumnReference> indexedColumns, ColumnTracker tracker, long ts, boolean returnNullIfRowNotFound) {
 
     Filter columnFilters = getColumnFilters(indexedColumns);
     FilterList filters = new FilterList(Lists.newArrayList(columnFilters));
@@ -71,7 +70,7 @@ public class ScannerBuilder {
     filters.addFilter(new ApplyAndFilterDeletesFilter(getAllFamilies(indexedColumns)));
 
     // combine the family filters and the rest of the filters as a
-    return getFilteredScanner(filters);
+    return getFilteredScanner(filters, returnNullIfRowNotFound);
   }
 
   /**
@@ -108,14 +107,14 @@ public class ScannerBuilder {
     return families;
   }
 
-  private Scanner getFilteredScanner(Filter filters) {
+  private Scanner getFilteredScanner(Filter filters, boolean returnNullIfRowNotFound) {
     // create a scanner and wrap it as an iterator, meaning you can only go forward
     final FilteredKeyValueScanner kvScanner = new FilteredKeyValueScanner(filters, memstore);
     // seek the scanner to initialize it
     KeyValue start = KeyValueUtil.createFirstOnRow(update.getRow());
     try {
       if (!kvScanner.seek(start)) {
-        return new EmptyScanner();
+        return returnNullIfRowNotFound ? null : new EmptyScanner();
       }
     } catch (IOException e) {
       // This should never happen - everything should explode if so.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
index c6642e7..a60adef 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/util/IndexManagementUtil.java
@@ -98,7 +98,7 @@ public class IndexManagementUtil {
     }
 
     public static ValueGetter createGetterFromScanner(Scanner scanner, byte[] currentRow) {
-        return new LazyValueGetter(scanner, currentRow);
+        return scanner!=null ? new LazyValueGetter(scanner, currentRow) : null;
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
index 6061dd9..2224e38 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/IndexMaintainer.java
@@ -953,7 +953,7 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
         byte[] indexRowKey = this.buildRowKey(valueGetter, dataRowKeyPtr, regionStartKey, regionEndKey);
         Put put = null;
         // New row being inserted: add the empty key value
-        if (valueGetter.getLatestValue(dataEmptyKeyValueRef) == null) {
+        if (valueGetter==null || valueGetter.getLatestValue(dataEmptyKeyValueRef) == null) {
             put = new Put(indexRowKey);
             // add the keyvalue for the empty row
             put.add(kvBuilder.buildPut(new ImmutableBytesPtr(indexRowKey),
@@ -1563,7 +1563,13 @@ public class IndexMaintainer implements Writable, Iterable<ColumnReference> {
             expression.accept(visitor);
         }
         allColumns.addAll(indexedColumns);
-        allColumns.addAll(coveredColumnsMap.keySet());
+        for (ColumnReference colRef : coveredColumnsMap.keySet()) {
+            if (immutableStorageScheme==ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
+                allColumns.add(colRef);
+            } else {
+                allColumns.add(new ColumnReference(colRef.getFamily(), QueryConstants.SINGLE_KEYVALUE_COLUMN_QUALIFIER_BYTES));
+            }
+        }
         
         int dataPkOffset = (isDataTableSalted ? 1 : 0) + (isMultiTenant ? 1 : 0);
         int nIndexPkColumns = getIndexPkColumnCount();

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
index 4116101..2f162e3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexCodec.java
@@ -13,6 +13,7 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.Mutation;
@@ -26,10 +27,12 @@ import org.apache.phoenix.hbase.index.covered.IndexCodec;
 import org.apache.phoenix.hbase.index.covered.IndexMetaData;
 import org.apache.phoenix.hbase.index.covered.IndexUpdate;
 import org.apache.phoenix.hbase.index.covered.TableState;
+import org.apache.phoenix.hbase.index.covered.update.ColumnReference;
 import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
 import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
 
 import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 
 /**
  * Phoenix-based {@link IndexCodec}. Manages all the logic of how to cleanup an index (
@@ -69,7 +72,7 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
         ptr.set(state.getCurrentRowKey());
         List<IndexUpdate> indexUpdates = Lists.newArrayList();
         for (IndexMaintainer maintainer : indexMaintainers) {
-            Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.ignoreNewerMutations());
+            Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.ignoreNewerMutations(), false);
             ValueGetter valueGetter = statePair.getFirst();
             IndexUpdate indexUpdate = statePair.getSecond();
             indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
@@ -94,15 +97,19 @@ public class PhoenixIndexCodec extends BaseIndexCodec {
             // to aid in rollback if there's a KeyValue column in the index. The alternative would be
             // to hold on to all uncommitted index row keys (even ones already sent to HBase) on the
             // client side.
-            Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(maintainer.getAllColumns(), metaData.ignoreNewerMutations());
+            Set<ColumnReference> cols = Sets.newHashSet(maintainer.getAllColumns());
+            cols.add(new ColumnReference(indexMaintainers.get(0).getDataEmptyKeyValueCF(), indexMaintainers.get(0).getEmptyKeyValueQualifier()));
+            Pair<ValueGetter, IndexUpdate> statePair = state.getIndexUpdateState(cols, metaData.ignoreNewerMutations(), true);
             ValueGetter valueGetter = statePair.getFirst();
-            IndexUpdate indexUpdate = statePair.getSecond();
-            indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
-                    .getTableDesc().getName() : maintainer.getIndexTableName());
-            Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(),
-                    state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
-            indexUpdate.setUpdate(delete);
-            indexUpdates.add(indexUpdate);
+            if (valueGetter!=null) {
+                IndexUpdate indexUpdate = statePair.getSecond();
+                indexUpdate.setTable(maintainer.isLocalIndex() ? state.getEnvironment().getRegion()
+                        .getTableDesc().getName() : maintainer.getIndexTableName());
+                Delete delete = maintainer.buildDeleteMutation(KV_BUILDER, valueGetter, ptr, state.getPendingUpdate(),
+                        state.getCurrentTimestamp(), env.getRegion().getRegionInfo().getStartKey(), env.getRegion().getRegionInfo().getEndKey());
+                indexUpdate.setUpdate(delete);
+                indexUpdates.add(indexUpdate);
+            }
         }
         return indexUpdates;
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
index a41dbf0..b7153a3 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixTransactionalIndexer.java
@@ -531,7 +531,7 @@ public class PhoenixTransactionalIndexer extends BaseRegionObserver {
         }
 
         @Override
-        public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations)
+        public Pair<ValueGetter, IndexUpdate> getIndexUpdateState(Collection<? extends ColumnReference> indexedColumns, boolean ignoreNewerMutations, boolean returnNullScannerIfRowNotFound)
                 throws IOException {
             // TODO: creating these objects over and over again is wasteful
             ColumnTracker tracker = new ColumnTracker(indexedColumns);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index f2820f2..d09284f 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -230,6 +230,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Strings;
+import com.google.common.collect.ImmutableListMultimap;
 import com.google.common.collect.Iterators;
 import com.google.common.collect.ListMultimap;
 import com.google.common.collect.Lists;
@@ -3812,7 +3813,7 @@ public class MetaDataClient {
                             tableRefsToDrop.addAll(indexesToDrop);
                         }
                         // Drop any index tables that had the dropped column in the PK
-                        connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
+                        state = connection.getQueryServices().updateData(compiler.compile(tableRefsToDrop, null, null, Collections.<PColumn>emptyList(), ts));
 
                         // Drop any tenant-specific indexes
                         if (!tenantIdTableRefMap.isEmpty()) {
@@ -3822,29 +3823,33 @@ public class MetaDataClient {
                                 props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, indexTenantId);
                                 try (PhoenixConnection tenantConn = new PhoenixConnection(connection, connection.getQueryServices(), props)) {
                                     PostDDLCompiler dropCompiler = new PostDDLCompiler(tenantConn);
-                                    tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
+                                    state = tenantConn.getQueryServices().updateData(dropCompiler.compile(entry.getValue(), null, null, Collections.<PColumn>emptyList(), ts));
                                 }
                             }
                         }
 
-                        // Update empty key value column if necessary
-                        for (ColumnRef droppedColumnRef : columnsToDrop) {
-                            // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
-                            // to get any updates from the region server.
-                            // TODO: move this into PostDDLCompiler
-                            // TODO: consider filtering mutable indexes here, but then the issue is that
-                            // we'd need to force an update of the data row empty key value if a mutable
-                            // secondary index is changing its empty key value family.
-                            droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
-                            TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
-                            PColumn droppedColumn = droppedColumnRef.getColumn();
-                            MutationPlan plan = compiler.compile(
-                                    Collections.singletonList(droppedColumnTableRef),
-                                    getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn),
-                                    null,
-                                    Collections.singletonList(droppedColumn),
-                                    ts);
-                            state = connection.getQueryServices().updateData(plan);
+                        // TODO For immutable tables, if the storage scheme is not ONE_CELL_PER_COLUMN we will remove the column values at compaction time
+                        // See https://issues.apache.org/jira/browse/PHOENIX-3605
+                        if (!table.isImmutableRows() || table.getImmutableStorageScheme()==ImmutableStorageScheme.ONE_CELL_PER_COLUMN) {
+                            // Update empty key value column if necessary
+                            for (ColumnRef droppedColumnRef : columnsToDrop) {
+                                // Painful, but we need a TableRef with a pre-set timestamp to prevent attempts
+                                // to get any updates from the region server.
+                                // TODO: move this into PostDDLCompiler
+                                // TODO: consider filtering mutable indexes here, but then the issue is that
+                                // we'd need to force an update of the data row empty key value if a mutable
+                                // secondary index is changing its empty key value family.
+                                droppedColumnRef = droppedColumnRef.cloneAtTimestamp(ts);
+                                TableRef droppedColumnTableRef = droppedColumnRef.getTableRef();
+                                PColumn droppedColumn = droppedColumnRef.getColumn();
+                                MutationPlan plan = compiler.compile(
+                                        Collections.singletonList(droppedColumnTableRef),
+                                        getNewEmptyColumnFamilyOrNull(droppedColumnTableRef.getTable(), droppedColumn),
+                                        null,
+                                        Collections.singletonList(droppedColumn),
+                                        ts);
+                                state = connection.getQueryServices().updateData(plan);
+                            }
                         }
                         // Return the last MutationState
                         return state;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/023f8631/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
index a2e45af..db7b354 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/hbase/index/covered/TestLocalTableState.java
@@ -91,7 +91,7 @@ public class TestLocalTableState {
     ColumnReference col = new ColumnReference(fam, qual);
     table.setCurrentTimestamp(ts);
     //check that our value still shows up first on scan, even though this is a lazy load
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false);
+    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false);
     Scanner s = p.getFirst();
     assertEquals("Didn't get the pending mutation's value first", m.get(fam, qual).get(0), s.next());
   }
@@ -135,13 +135,13 @@ public class TestLocalTableState {
     ColumnReference col = new ColumnReference(fam, qual);
     table.setCurrentTimestamp(ts);
     // check that the value is there
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false);
+    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false);
     Scanner s = p.getFirst();
     assertEquals("Didn't get the pending mutation's value first", kv, s.next());
 
     // rollback that value
     table.rollback(Arrays.asList(kv));
-    p = table.getIndexedColumnsTableState(Arrays.asList(col), false);
+    p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false);
     s = p.getFirst();
     assertEquals("Didn't correctly rollback the row - still found it!", null, s.next());
     Mockito.verify(env, Mockito.times(1)).getRegion();
@@ -179,14 +179,14 @@ public class TestLocalTableState {
     ColumnReference col = new ColumnReference(fam, qual);
     table.setCurrentTimestamp(ts);
     // check that the value is there
-    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false);
+    Pair<Scanner, IndexUpdate> p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false);
     Scanner s = p.getFirst();
     // make sure it read the table the one time
     assertEquals("Didn't get the stored keyvalue!", storedKv, s.next());
 
     // on the second lookup it shouldn't access the underlying table again - the cached columns
     // should know they are done
-    p = table.getIndexedColumnsTableState(Arrays.asList(col), false);
+    p = table.getIndexedColumnsTableState(Arrays.asList(col), false, false);
     s = p.getFirst();
     assertEquals("Lost already loaded update!", storedKv, s.next());
     Mockito.verify(env, Mockito.times(1)).getRegion();