You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by gj...@apache.org on 2019/11/11 22:22:58 UTC

[phoenix] 01/02: PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index

This is an automated email from the ASF dual-hosted git repository.

gjacoby pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 99cfa7dfdaeb52022ec452f04633ed2a416ce86c
Author: Geoffrey Jacoby <gj...@apache.org>
AuthorDate: Fri Nov 8 14:13:27 2019 -0800

    PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index
---
 .../apache/phoenix/end2end/index/AlterIndexIT.java | 73 ++++++++++++++++++++++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 32 ++++++++--
 2 files changed, 100 insertions(+), 5 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
new file mode 100644
index 0000000..a01de4d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+public class AlterIndexIT extends ParallelStatsDisabledIT {
+
+    @Test
+    public void testAlterIndexRebuildNoAsync() throws Exception {
+        String indexName = "I_" + generateUniqueName();
+        String tableName = "T_" + generateUniqueName();
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            createAndPopulateTable(conn, tableName);
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, tableName));
+            createIndex(conn, indexName, tableName, "val1", "val2, val3");
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+            rebuildIndex(conn, indexName, tableName, false);
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+        }
+    }
+
+    private void createAndPopulateTable(Connection conn, String tableName) throws Exception {
+        conn.createStatement().execute("create table " + tableName +
+            " (id varchar(10) not null primary key, val1 varchar(10), " +
+            "val2 varchar(10), val3 varchar(10))");
+        conn.createStatement().execute("upsert into " + tableName + " " +
+            "values ('a', 'ab', 'abc', 'abcd')");
+        conn.commit();
+        conn.createStatement().execute("upsert into " + tableName +
+            " values ('b', 'bc', 'bcd', 'bcde')");
+        conn.commit();
+    }
+
+    private void createIndex(Connection conn, String indexName, String tableName,
+                                      String columns, String includeColumns)
+        throws SQLException {
+        String ddl = "CREATE INDEX " + indexName + " ON " + tableName + " (" + columns + ")" +
+            " INCLUDE (" + includeColumns + ")";
+        conn.createStatement().execute(ddl);
+    }
+
+    private void rebuildIndex(Connection conn, String indexName, String tableName, boolean async)
+        throws SQLException {
+        String format = "ALTER INDEX %s ON %s REBUILD" + (async ? " ASYNC" : "");
+        String sql = String.format(format, indexName, tableName);
+            conn.createStatement().execute(sql);
+            conn.commit();
+
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d290333..28e3441 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import com.google.gson.JsonObject;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -4410,11 +4411,31 @@ public class MetaDataClient {
             if (newIndexState == PIndexState.BUILDING && !isAsync) {
                 PTable index = indexRef.getTable();
                 // First delete any existing rows of the index
-                Long scn = connection.getSCN();
-                long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
-                MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
-                connection.getQueryServices().updateData(plan);
-                NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
+                if (index.getIndexType().equals(IndexType.GLOBAL) && index.getViewIndexId() == null){
+                    //for a global index of a normal base table, it's safe to just truncate and
+                    //rebuild. We preserve splits to reduce the amount of splitting we need to do
+                    //during rebuild
+                    org.apache.hadoop.hbase.TableName physicalTableName =
+                        org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes());
+                    try (Admin admin = connection.getQueryServices().getAdmin()) {
+                        admin.disableTable(physicalTableName);
+                        admin.truncateTable(physicalTableName, true);
+                        //trunateTable automatically re-enables when it's done
+                    } catch(IOException ie) {
+                        String failedTable = physicalTableName.getNameAsString();
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE).
+                            setMessage("Error when truncating index table [" + failedTable +
+                                "] before rebuilding: " + ie.getMessage()).
+                            setTableName(failedTable).build().buildException();
+                    }
+                } else {
+                    Long scn = connection.getSCN();
+                    long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
+                    MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
+                    connection.getQueryServices().updateData(plan);
+                }
+                NamedTableNode dataTableNode = NamedTableNode.create(null,
+                    TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
                 // Next rebuild the index
                 connection.setAutoCommit(true);
                 if (connection.getSCN() != null) {
@@ -4423,6 +4444,7 @@ public class MetaDataClient {
                 TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
                 return buildIndex(index, dataTableRef);
             }
+
             return new MutationState(1, 1000, connection);
         } catch (TableNotFoundException e) {
             if (!statement.ifExists()) {