You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by gj...@apache.org on 2019/11/11 22:46:52 UTC
[phoenix] branch 4.14-HBase-1.4 updated: PHOENIX-5508 - ALTER INDEX
REBUILD removes all rows from a simple global index
This is an automated email from the ASF dual-hosted git repository.
gjacoby pushed a commit to branch 4.14-HBase-1.4
in repository https://gitbox.apache.org/repos/asf/phoenix.git
The following commit(s) were added to refs/heads/4.14-HBase-1.4 by this push:
new a78fd7f PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index
a78fd7f is described below
commit a78fd7fbe38cfb4e8e953206273a844a4263f956
Author: Geoffrey Jacoby <gj...@apache.org>
AuthorDate: Fri Oct 4 15:44:18 2019 -0700
PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index
---
.../apache/phoenix/end2end/index/AlterIndexIT.java | 73 ++++
.../apache/phoenix/compile/PostDDLCompiler.java | 482 +++++++++++----------
.../org/apache/phoenix/schema/MetaDataClient.java | 32 +-
3 files changed, 362 insertions(+), 225 deletions(-)
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
new file mode 100644
index 0000000..a01de4d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+public class AlterIndexIT extends ParallelStatsDisabledIT {
+
+ @Test
+ public void testAlterIndexRebuildNoAsync() throws Exception {
+ String indexName = "I_" + generateUniqueName();
+ String tableName = "T_" + generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createAndPopulateTable(conn, tableName);
+ Assert.assertEquals(2, TestUtil.getRowCount(conn, tableName));
+ createIndex(conn, indexName, tableName, "val1", "val2, val3");
+ Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+ rebuildIndex(conn, indexName, tableName, false);
+ Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+ }
+ }
+
+ private void createAndPopulateTable(Connection conn, String tableName) throws Exception {
+ conn.createStatement().execute("create table " + tableName +
+ " (id varchar(10) not null primary key, val1 varchar(10), " +
+ "val2 varchar(10), val3 varchar(10))");
+ conn.createStatement().execute("upsert into " + tableName + " " +
+ "values ('a', 'ab', 'abc', 'abcd')");
+ conn.commit();
+ conn.createStatement().execute("upsert into " + tableName +
+ " values ('b', 'bc', 'bcd', 'bcde')");
+ conn.commit();
+ }
+
+ private void createIndex(Connection conn, String indexName, String tableName,
+ String columns, String includeColumns)
+ throws SQLException {
+ String ddl = "CREATE INDEX " + indexName + " ON " + tableName + " (" + columns + ")" +
+ " INCLUDE (" + includeColumns + ")";
+ conn.createStatement().execute(ddl);
+ }
+
+ private void rebuildIndex(Connection conn, String indexName, String tableName, boolean async)
+ throws SQLException {
+ String format = "ALTER INDEX %s ON %s REBUILD" + (async ? " ASYNC" : "");
+ String sql = String.format(format, indexName, tableName);
+ conn.createStatement().execute(sql);
+ conn.commit();
+
+ }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
index 709534e..74a293d 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/compile/PostDDLCompiler.java
@@ -87,248 +87,290 @@ public class PostDDLCompiler {
final long timestamp) throws SQLException {
PhoenixStatement statement = new PhoenixStatement(connection);
final StatementContext context = new StatementContext(
- statement,
- new ColumnResolver() {
+ statement,
+ new PostDDLMultiColumnResolver(tableRefs),
+ scan,
+ new SequenceManager(statement));
+ return new PostDDLMutationPlan(context, tableRefs, timestamp, emptyCF, deleteList, projectCFs);
+ }
- @Override
- public List<TableRef> getTables() {
- return tableRefs;
- }
+ private static class PostDDLMultiColumnResolver implements ColumnResolver {
- @Override
- public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
- throw new UnsupportedOperationException();
- }
+ private final List<TableRef> tableRefs;
- @Override
- public ColumnRef resolveColumn(String schemaName, String tableName, String colName)
- throws SQLException {
- throw new UnsupportedOperationException();
- }
+ public PostDDLMultiColumnResolver(List<TableRef> tableRefs) {
+ this.tableRefs = tableRefs;
+ }
- @Override
- public List<PFunction> getFunctions() {
- return Collections.<PFunction>emptyList();
- }
-
- @Override
- public PFunction resolveFunction(String functionName)
- throws SQLException {
- throw new FunctionNotFoundException(functionName);
- }
-
- @Override
- public boolean hasUDFs() {
- return false;
- }
-
- @Override
- public PSchema resolveSchema(String schemaName) throws SQLException {
- throw new SchemaNotFoundException(schemaName);
- }
-
- @Override
- public List<PSchema> getSchemas() {
- throw new UnsupportedOperationException();
- }
-
- },
- scan,
- new SequenceManager(statement));
- return new BaseMutationPlan(context, Operation.UPSERT /* FIXME */) {
-
- @Override
- public MutationState execute() throws SQLException {
- if (tableRefs.isEmpty()) {
- return new MutationState(0, 1000, connection);
- }
- boolean wasAutoCommit = connection.getAutoCommit();
- try {
- connection.setAutoCommit(true);
- SQLException sqlE = null;
- /*
- * Handles:
- * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
- * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
- * 3) updating the necessary rows to have an empty KV
- * 4) updating table stats
- */
- long totalMutationCount = 0;
- for (final TableRef tableRef : tableRefs) {
- Scan scan = ScanUtil.newScan(context.getScan());
- SelectStatement select = SelectStatement.COUNT_ONE;
- // We need to use this tableRef
- ColumnResolver resolver = new ColumnResolver() {
- @Override
- public List<TableRef> getTables() {
- return Collections.singletonList(tableRef);
- }
-
- @Override
- public java.util.List<PFunction> getFunctions() {
- return Collections.emptyList();
- };
-
- @Override
- public TableRef resolveTable(String schemaName, String tableName)
- throws SQLException {
- throw new UnsupportedOperationException();
- }
- @Override
- public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
- PColumn column = tableName != null
- ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName)
- : tableRef.getTable().getColumnForColumnName(colName);
- return new ColumnRef(tableRef, column.getPosition());
- }
-
- @Override
- public PFunction resolveFunction(String functionName) throws SQLException {
- throw new UnsupportedOperationException();
- };
-
- @Override
- public boolean hasUDFs() {
- return false;
- }
+ @Override
+ public List<TableRef> getTables() {
+ return tableRefs;
+ }
- @Override
- public List<PSchema> getSchemas() {
- throw new UnsupportedOperationException();
- }
+ @Override
+ public TableRef resolveTable(String schemaName, String tableName) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
- @Override
- public PSchema resolveSchema(String schemaName) throws SQLException {
- throw new SchemaNotFoundException(schemaName);
- }
- };
- PhoenixStatement statement = new PhoenixStatement(connection);
- StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
- long ts = timestamp;
- // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server timestamp.
- // Not sure what the fix should be. We don't need conflict detection nor filtering of invalid transactions
- // in this case, so maybe this is ok.
- if (ts!=HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
- ts = TransactionUtil.convertToNanoseconds(ts);
- }
- ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
- if (emptyCF != null) {
- scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
- scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
- }
- ServerCache cache = null;
- try {
- if (deleteList != null) {
- if (deleteList.isEmpty()) {
- scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
- // In the case of a row deletion, add index metadata so mutable secondary indexing works
- /* TODO: we currently manually run a scan to delete the index data here
- ImmutableBytesWritable ptr = context.getTempPtr();
- tableRef.getTable().getIndexMaintainers(ptr);
- if (ptr.getLength() > 0) {
- IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
- cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
- byte[] uuidValue = cache.getId();
- scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
- }
- */
- } else {
- // In the case of the empty key value column family changing, do not send the index
- // metadata, as we're currently managing this from the client. It's possible for the
- // data empty column family to stay the same, while the index empty column family
- // changes.
- PColumn column = deleteList.get(0);
- byte[] cq = column.getColumnQualifierBytes();
- if (emptyCF == null) {
- scan.addColumn(column.getFamilyName().getBytes(), cq);
- }
- scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
- scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
- }
- }
- List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
- if (projectCFs == null) {
- for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
- columnFamilies.add(family.getName().getBytes());
+ @Override
+ public ColumnRef resolveColumn(String schemaName, String tableName, String colName)
+ throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public List<PFunction> getFunctions() {
+ return Collections.<PFunction>emptyList();
+ }
+
+ @Override
+ public PFunction resolveFunction(String functionName)
+ throws SQLException {
+ throw new FunctionNotFoundException(functionName);
+ }
+
+ @Override
+ public boolean hasUDFs() {
+ return false;
+ }
+
+ @Override
+ public PSchema resolveSchema(String schemaName) throws SQLException {
+ throw new SchemaNotFoundException(schemaName);
+ }
+
+ @Override
+ public List<PSchema> getSchemas() {
+ throw new UnsupportedOperationException();
+ }
+
+ }
+
+ private class PostDDLMutationPlan extends BaseMutationPlan {
+
+ private final StatementContext context;
+ private final List<TableRef> tableRefs;
+ private final long timestamp;
+ private final byte[] emptyCF;
+ private final List<PColumn> deleteList;
+ private final List<byte[]> projectCFs;
+
+ public PostDDLMutationPlan(StatementContext context, List<TableRef> tableRefs,
+ long timestamp, byte[] emptyCF, List<PColumn> deleteList,
+ List<byte[]> projectCFs) {
+ super(context, Operation.UPSERT);
+ this.context = context;
+ this.tableRefs = tableRefs;
+ this.timestamp = timestamp;
+ this.emptyCF = emptyCF;
+ this.deleteList = deleteList;
+ this.projectCFs = projectCFs;
+ }
+
+ @Override
+ public MutationState execute() throws SQLException {
+ if (tableRefs.isEmpty()) {
+ return new MutationState(0, 1000, connection);
+ }
+ boolean wasAutoCommit = connection.getAutoCommit();
+ try {
+ connection.setAutoCommit(true);
+ SQLException sqlE = null;
+ /*
+ * Handles:
+ * 1) deletion of all rows for a DROP TABLE and subsequently deletion of all rows for a DROP INDEX;
+ * 2) deletion of all column values for a ALTER TABLE DROP COLUMN
+ * 3) updating the necessary rows to have an empty KV
+ * 4) updating table stats
+ */
+ long totalMutationCount = 0;
+ for (final TableRef tableRef : tableRefs) {
+ Scan scan = ScanUtil.newScan(context.getScan());
+ SelectStatement select = SelectStatement.COUNT_ONE;
+ // We need to use this tableRef
+ ColumnResolver resolver = new PostDDLSingleTableColumnResolver(tableRef);
+ PhoenixStatement statement = new PhoenixStatement(connection);
+ StatementContext context = new StatementContext(statement, resolver, scan, new SequenceManager(statement));
+ long ts = timestamp;
+ // FIXME: DDL operations aren't transactional, so we're basing the timestamp on a server timestamp.
+ // Not sure what the fix should be. We don't need conflict detection nor filtering of invalid transactions
+ // in this case, so maybe this is ok.
+ if (ts!= HConstants.LATEST_TIMESTAMP && tableRef.getTable().isTransactional()) {
+ ts = TransactionUtil.convertToNanoseconds(ts);
+ }
+ ScanUtil.setTimeRange(scan, scan.getTimeRange().getMin(), ts);
+ if (emptyCF != null) {
+ scan.setAttribute(BaseScannerRegionObserver.EMPTY_CF, emptyCF);
+ scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER, EncodedColumnsUtil.getEmptyKeyValueInfo(tableRef.getTable()).getFirst());
+ }
+ ServerCache cache = null;
+ try {
+ if (deleteList != null) {
+ if (deleteList.isEmpty()) {
+ scan.setAttribute(BaseScannerRegionObserver.DELETE_AGG, QueryConstants.TRUE);
+ // In the case of a row deletion, add index metadata so mutable secondary indexing works
+ /* TODO: we currently manually run a scan to delete the index data here
+ ImmutableBytesWritable ptr = context.getTempPtr();
+ tableRef.getTable().getIndexMaintainers(ptr);
+ if (ptr.getLength() > 0) {
+ IndexMetaDataCacheClient client = new IndexMetaDataCacheClient(connection, tableRef);
+ cache = client.addIndexMetadataCache(context.getScanRanges(), ptr);
+ byte[] uuidValue = cache.getId();
+ scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
}
+ */
} else {
- for (byte[] projectCF : projectCFs) {
- columnFamilies.add(projectCF);
+ // In the case of the empty key value column family changing, do not send the index
+ // metadata, as we're currently managing this from the client. It's possible for the
+ // data empty column family to stay the same, while the index empty column family
+ // changes.
+ PColumn column = deleteList.get(0);
+ byte[] cq = column.getColumnQualifierBytes();
+ if (emptyCF == null) {
+ scan.addColumn(column.getFamilyName().getBytes(), cq);
}
+ scan.setAttribute(BaseScannerRegionObserver.DELETE_CF, column.getFamilyName().getBytes());
+ scan.setAttribute(BaseScannerRegionObserver.DELETE_CQ, cq);
}
- // Need to project all column families into the scan, since we haven't yet created our empty key value
- RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
- context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
- // Explicitly project these column families and don't project the empty key value,
- // since at this point we haven't added the empty key value everywhere.
- if (columnFamilies != null) {
- scan.getFamilyMap().clear();
- for (byte[] family : columnFamilies) {
- scan.addFamily(family);
- }
- projector = new RowProjector(projector,false);
+ }
+ List<byte[]> columnFamilies = Lists.newArrayListWithExpectedSize(tableRef.getTable().getColumnFamilies().size());
+ if (projectCFs == null) {
+ for (PColumnFamily family : tableRef.getTable().getColumnFamilies()) {
+ columnFamilies.add(family.getName().getBytes());
}
- // Ignore exceptions due to not being able to resolve any view columns,
- // as this just means the view is invalid. Continue on and try to perform
- // any other Post DDL operations.
- try {
- // Since dropping a VIEW does not affect the underlying data, we do
- // not need to pass through the view statement here.
- WhereCompiler.compile(context, select); // Push where clause into scan
- } catch (ColumnFamilyNotFoundException e) {
- continue;
- } catch (ColumnNotFoundException e) {
- continue;
- } catch (AmbiguousColumnException e) {
- continue;
+ } else {
+ for (byte[] projectCF : projectCFs) {
+ columnFamilies.add(projectCF);
}
- QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null,
- OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
+ }
+ // Need to project all column families into the scan, since we haven't yet created our empty key value
+ RowProjector projector = ProjectionCompiler.compile(context, SelectStatement.COUNT_ONE, GroupBy.EMPTY_GROUP_BY);
+ context.getAggregationManager().compile(context, GroupBy.EMPTY_GROUP_BY);
+ // Explicitly project these column families and don't project the empty key value,
+ // since at this point we haven't added the empty key value everywhere.
+ if (columnFamilies != null) {
+ scan.getFamilyMap().clear();
+ for (byte[] family : columnFamilies) {
+ scan.addFamily(family);
+ }
+ projector = new RowProjector(projector,false);
+ }
+ // Ignore exceptions due to not being able to resolve any view columns,
+ // as this just means the view is invalid. Continue on and try to perform
+ // any other Post DDL operations.
+ try {
+ // Since dropping a VIEW does not affect the underlying data, we do
+ // not need to pass through the view statement here.
+ WhereCompiler.compile(context, select); // Push where clause into scan
+ } catch (ColumnFamilyNotFoundException e) {
+ continue;
+ } catch (ColumnNotFoundException e) {
+ continue;
+ } catch (AmbiguousColumnException e) {
+ continue;
+ }
+ QueryPlan plan = new AggregatePlan(context, select, tableRef, projector, null, null,
+ OrderBy.EMPTY_ORDER_BY, null, GroupBy.EMPTY_GROUP_BY, null, null);
+ try {
+ ResultIterator iterator = plan.iterator();
try {
- ResultIterator iterator = plan.iterator();
+ Tuple row = iterator.next();
+ ImmutableBytesWritable ptr = context.getTempPtr();
+ totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
+ } catch (SQLException e) {
+ sqlE = e;
+ } finally {
try {
- Tuple row = iterator.next();
- ImmutableBytesWritable ptr = context.getTempPtr();
- totalMutationCount += (Long)projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
+ iterator.close();
} catch (SQLException e) {
- sqlE = e;
+ if (sqlE == null) {
+ sqlE = e;
+ } else {
+ sqlE.setNextException(e);
+ }
} finally {
- try {
- iterator.close();
- } catch (SQLException e) {
- if (sqlE == null) {
- sqlE = e;
- } else {
- sqlE.setNextException(e);
- }
- } finally {
- if (sqlE != null) {
- throw sqlE;
- }
+ if (sqlE != null) {
+ throw sqlE;
}
}
- } catch (TableNotFoundException e) {
- // Ignore and continue, as HBase throws when table hasn't been written to
- // FIXME: Remove if this is fixed in 0.96
- }
- } finally {
- if (cache != null) { // Remove server cache if there is one
- cache.close();
}
+ } catch (TableNotFoundException e) {
+ // Ignore and continue, as HBase throws when table hasn't been written to
+ // FIXME: Remove if this is fixed in 0.96
}
-
- }
- final long count = totalMutationCount;
- return new MutationState(1, 1000, connection) {
- @Override
- public long getUpdateCount() {
- return count;
+ } finally {
+ if (cache != null) { // Remove server cache if there is one
+ cache.close();
}
- };
- } finally {
- if (!wasAutoCommit) connection.setAutoCommit(wasAutoCommit);
+ }
+
}
+ final long count = totalMutationCount;
+ return new MutationState(1, 1000, connection) {
+ @Override
+ public long getUpdateCount() {
+ return count;
+ }
+ };
+ } finally {
+ if (!wasAutoCommit) connection.setAutoCommit(wasAutoCommit);
+ }
+ }
+
+ private class PostDDLSingleTableColumnResolver implements ColumnResolver {
+ private final TableRef tableRef;
+
+ public PostDDLSingleTableColumnResolver(TableRef tableRef) {
+ this.tableRef = tableRef;
+ }
+
+ @Override
+ public List<TableRef> getTables() {
+ return Collections.singletonList(tableRef);
+ }
+
+ @Override
+ public List<PFunction> getFunctions() {
+ return Collections.emptyList();
+ }
+
+ ;
+
+ @Override
+ public TableRef resolveTable(String schemaName, String tableName)
+ throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public ColumnRef resolveColumn(String schemaName, String tableName, String colName) throws SQLException {
+ PColumn column = tableName != null
+ ? tableRef.getTable().getColumnFamily(tableName).getPColumnForColumnName(colName)
+ : tableRef.getTable().getColumnForColumnName(colName);
+ return new ColumnRef(tableRef, column.getPosition());
+ }
+
+ @Override
+ public PFunction resolveFunction(String functionName) throws SQLException {
+ throw new UnsupportedOperationException();
+ }
+
+ ;
+
+ @Override
+ public boolean hasUDFs() {
+ return false;
+ }
+
+ @Override
+ public List<PSchema> getSchemas() {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public PSchema resolveSchema(String schemaName) throws SQLException {
+ throw new SchemaNotFoundException(schemaName);
}
- };
+ }
}
}
\ No newline at end of file
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index 60147fb..dda8895 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -133,6 +133,7 @@ import java.util.Set;
import org.apache.hadoop.hbase.HColumnDescriptor;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ClusterConnection;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -4008,11 +4009,31 @@ public class MetaDataClient {
if (newIndexState == PIndexState.BUILDING && !isAsync) {
PTable index = indexRef.getTable();
// First delete any existing rows of the index
- Long scn = connection.getSCN();
- long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
- MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
- connection.getQueryServices().updateData(plan);
- NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
+ if (index.getIndexType().equals(IndexType.GLOBAL) && index.getViewIndexId() == null){
+ //for a global index of a normal base table, it's safe to just truncate and
+ //rebuild. We preserve splits to reduce the amount of splitting we need to do
+ //during rebuild
+ org.apache.hadoop.hbase.TableName physicalTableName =
+ org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes());
+ try (Admin admin = connection.getQueryServices().getAdmin()) {
+ admin.disableTable(physicalTableName);
+ admin.truncateTable(physicalTableName, true);
+ //trunateTable automatically re-enables when it's done
+ } catch(IOException ie) {
+ String failedTable = physicalTableName.getNameAsString();
+ throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE).
+ setMessage("Error when truncating index table [" + failedTable +
+ "] before rebuilding: " + ie.getMessage()).
+ setTableName(failedTable).build().buildException();
+ }
+ } else {
+ Long scn = connection.getSCN();
+ long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
+ MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
+ connection.getQueryServices().updateData(plan);
+ }
+ NamedTableNode dataTableNode = NamedTableNode.create(null,
+ TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
// Next rebuild the index
connection.setAutoCommit(true);
if (connection.getSCN() != null) {
@@ -4021,6 +4042,7 @@ public class MetaDataClient {
TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
return buildIndex(index, dataTableRef);
}
+
return new MutationState(1, 1000, connection);
} catch (TableNotFoundException e) {
if (!statement.ifExists()) {