You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by gj...@apache.org on 2019/11/11 22:22:57 UTC

[phoenix] branch 4.x-HBase-1.5 updated (42ffee3 -> 113e9cf)

This is an automated email from the ASF dual-hosted git repository.

gjacoby pushed a change to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git.


    from 42ffee3  PHOENIX-5562 Simplify detection of concurrent updates on data tables with indexes
     new 99cfa7d  PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index
     new 113e9cf  PHOENIX-5560 - View Index Tables created incorrectly with IndexRegionObserver

The 2 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../apache/phoenix/end2end/index/AlterIndexIT.java | 73 ++++++++++++++++++++++
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 68 +++++++++++++++++---
 .../org/apache/phoenix/schema/MetaDataClient.java  | 32 ++++++++--
 3 files changed, 159 insertions(+), 14 deletions(-)
 create mode 100644 phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java


[phoenix] 02/02: PHOENIX-5560 - View Index Tables created incorrectly with IndexRegionObserver

Posted by gj...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gjacoby pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 113e9cf3f9e80c4daa0c8d2f6a6801a2f7931dac
Author: Geoffrey Jacoby <gj...@apache.org>
AuthorDate: Fri Nov 8 13:49:59 2019 -0800

    PHOENIX-5560 - View Index Tables created incorrectly with IndexRegionObserver
---
 .../apache/phoenix/end2end/index/ViewIndexIT.java  | 68 +++++++++++++++++++---
 1 file changed, 59 insertions(+), 9 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
index 3126ee4..9da0e50 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/ViewIndexIT.java
@@ -25,6 +25,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.Date;
 import java.sql.DriverManager;
@@ -37,10 +38,17 @@ import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.TableDescriptor;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.end2end.IndexToolIT;
 import org.apache.phoenix.end2end.SplitSystemCatalogIT;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.hbase.index.Indexer;
+import org.apache.phoenix.index.GlobalIndexChecker;
+import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.query.KeyRange;
@@ -70,13 +78,16 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         return Arrays.asList(true, false);
     }
 
-    private void createBaseTable(String schemaName, String tableName, boolean multiTenant, Integer saltBuckets, String splits)
+    private void createBaseTable(String schemaName, String tableName, boolean multiTenant,
+                                 Integer saltBuckets, String splits, boolean mutable)
             throws SQLException {
         Connection conn = getConnection();
         if (isNamespaceMapped) {
             conn.createStatement().execute("CREATE SCHEMA IF NOT EXISTS " + schemaName);
         }
-        String ddl = "CREATE TABLE " + SchemaUtil.getTableName(schemaName, tableName) + " (t_id VARCHAR NOT NULL,\n" +
+        String ddl = "CREATE " + (mutable ? "" : "IMMUTABLE") +
+            " TABLE " + SchemaUtil.getTableName(schemaName, tableName) +
+            " (t_id VARCHAR NOT NULL,\n" +
                 "k1 VARCHAR NOT NULL,\n" +
                 "k2 INTEGER NOT NULL,\n" +
                 "v1 VARCHAR,\n" +
@@ -118,10 +129,10 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         conn.commit();
     }
     
-    private Connection getConnection() throws SQLException{
+    private PhoenixConnection getConnection() throws SQLException{
         Properties props = new Properties();
         props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(isNamespaceMapped));
-        return DriverManager.getConnection(getUrl(),props);
+        return (PhoenixConnection) DriverManager.getConnection(getUrl(),props);
     }
 
     private Connection getTenantConnection(String tenant) throws SQLException {
@@ -145,7 +156,7 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         String viewName = "VIEW_" + generateUniqueName();
         String fullViewName = SchemaUtil.getTableName(viewSchemaName, viewName);
 
-        createBaseTable(schemaName, tableName, false, null, null);
+        createBaseTable(schemaName, tableName, false, null, null, true);
         Connection conn1 = getConnection();
         Connection conn2 = getConnection();
         conn1.createStatement().execute("CREATE VIEW " + fullViewName + " AS SELECT * FROM " + fullTableName);
@@ -169,7 +180,7 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         String fullTableName = SchemaUtil.getTableName(SCHEMA1, tableName);
         String fullViewName = SchemaUtil.getTableName(SCHEMA2, generateUniqueName());
         
-        createBaseTable(SCHEMA1, tableName, true, null, null);
+        createBaseTable(SCHEMA1, tableName, true, null, null, true);
         Connection conn = DriverManager.getConnection(getUrl());
         PreparedStatement stmt = conn.prepareStatement(
                 "UPSERT INTO " + fullTableName
@@ -240,8 +251,47 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         QueryPlan plan = stmt.unwrap(PhoenixStatement.class).getQueryPlan();
         assertEquals(4, plan.getSplits().size());
     }
-    
-    
+
+    @Test
+    public void testCoprocsOnGlobalMTImmutableViewIndex() throws Exception {
+        testCoprocsOnGlobalViewIndexHelper(true, false);
+    }
+
+    @Test
+    public void testCoprocsOnGlobalNonMTMutableViewIndex() throws Exception {
+        testCoprocsOnGlobalViewIndexHelper(false, true);
+    }
+
+    @Test
+    public void testCoprocsOnGlobalMTMutableViewIndex() throws Exception {
+        testCoprocsOnGlobalViewIndexHelper(true, true);
+    }
+
+    @Test
+    public void testCoprocsOnGlobalNonMTImmutableViewIndex() throws Exception {
+        testCoprocsOnGlobalViewIndexHelper(false, false);
+    }
+
+    private void testCoprocsOnGlobalViewIndexHelper(boolean multiTenant, boolean mutable) throws SQLException, IOException {
+        String schemaName = generateUniqueName();
+        String baseTable =  generateUniqueName();
+        String globalView = generateUniqueName();
+        String globalViewIdx =  generateUniqueName();
+        createBaseTable(schemaName, baseTable, multiTenant, null, null, mutable);
+        try (PhoenixConnection conn = getConnection()) {
+            createView(conn, schemaName, globalView, baseTable);
+            createViewIndex(conn, schemaName, globalViewIdx, globalView, "K1");
+            //now check that the right coprocs are installed
+            Admin admin = conn.getQueryServices().getAdmin();
+            TableDescriptor td = admin.getTableDescriptor(TableName.valueOf(
+                MetaDataUtil.getViewIndexPhysicalName(SchemaUtil.getPhysicalHBaseTableName(
+                    schemaName, baseTable, isNamespaceMapped).getString())));
+            assertTrue(td.hasCoprocessor(GlobalIndexChecker.class.getName()));
+            assertFalse(td.hasCoprocessor(IndexRegionObserver.class.getName()));
+            assertFalse(td.hasCoprocessor(Indexer.class.getName()));
+        }
+    }
+
     @Test
     public void testMultiTenantViewGlobalIndex() throws Exception {
         String baseTable =  SchemaUtil.getTableName(SCHEMA1, generateUniqueName());
@@ -550,7 +600,7 @@ public class ViewIndexIT extends SplitSystemCatalogIT {
         String tenantViewIndexName = "TV_" + generateUniqueName();
         Connection globalConn = getConnection();
         Connection tenantConn = getTenantConnection(TENANT1);
-        createBaseTable(SCHEMA1, tableName, true, 0, null);
+        createBaseTable(SCHEMA1, tableName, true, 0, null, true);
         createView(globalConn, SCHEMA1, globalViewName, tableName);
         createViewIndex(globalConn, SCHEMA1, globalViewIndexName, globalViewName, "v1");
         createView(tenantConn, SCHEMA1, tenantViewName, tableName);


[phoenix] 01/02: PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index

Posted by gj...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

gjacoby pushed a commit to branch 4.x-HBase-1.5
in repository https://gitbox.apache.org/repos/asf/phoenix.git

commit 99cfa7dfdaeb52022ec452f04633ed2a416ce86c
Author: Geoffrey Jacoby <gj...@apache.org>
AuthorDate: Fri Nov 8 14:13:27 2019 -0800

    PHOENIX-5508 - ALTER INDEX REBUILD removes all rows from a simple global index
---
 .../apache/phoenix/end2end/index/AlterIndexIT.java | 73 ++++++++++++++++++++++
 .../org/apache/phoenix/schema/MetaDataClient.java  | 32 ++++++++--
 2 files changed, 100 insertions(+), 5 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
new file mode 100644
index 0000000..a01de4d
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/AlterIndexIT.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.end2end.index;
+
+import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+public class AlterIndexIT extends ParallelStatsDisabledIT {
+
+    @Test
+    public void testAlterIndexRebuildNoAsync() throws Exception {
+        String indexName = "I_" + generateUniqueName();
+        String tableName = "T_" + generateUniqueName();
+        try (Connection conn = DriverManager.getConnection(getUrl())) {
+            createAndPopulateTable(conn, tableName);
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, tableName));
+            createIndex(conn, indexName, tableName, "val1", "val2, val3");
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+            rebuildIndex(conn, indexName, tableName, false);
+            Assert.assertEquals(2, TestUtil.getRowCount(conn, indexName));
+        }
+    }
+
+    private void createAndPopulateTable(Connection conn, String tableName) throws Exception {
+        conn.createStatement().execute("create table " + tableName +
+            " (id varchar(10) not null primary key, val1 varchar(10), " +
+            "val2 varchar(10), val3 varchar(10))");
+        conn.createStatement().execute("upsert into " + tableName + " " +
+            "values ('a', 'ab', 'abc', 'abcd')");
+        conn.commit();
+        conn.createStatement().execute("upsert into " + tableName +
+            " values ('b', 'bc', 'bcd', 'bcde')");
+        conn.commit();
+    }
+
+    private void createIndex(Connection conn, String indexName, String tableName,
+                                      String columns, String includeColumns)
+        throws SQLException {
+        String ddl = "CREATE INDEX " + indexName + " ON " + tableName + " (" + columns + ")" +
+            " INCLUDE (" + includeColumns + ")";
+        conn.createStatement().execute(ddl);
+    }
+
+    private void rebuildIndex(Connection conn, String indexName, String tableName, boolean async)
+        throws SQLException {
+        String format = "ALTER INDEX %s ON %s REBUILD" + (async ? " ASYNC" : "");
+        String sql = String.format(format, indexName, tableName);
+            conn.createStatement().execute(sql);
+            conn.commit();
+
+    }
+}
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index d290333..28e3441 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -137,6 +137,7 @@ import org.apache.hadoop.hbase.HColumnDescriptor;
 import com.google.gson.JsonObject;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Admin;
 import org.apache.hadoop.hbase.client.ClusterConnection;
 import org.apache.hadoop.hbase.client.Delete;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
@@ -4410,11 +4411,31 @@ public class MetaDataClient {
             if (newIndexState == PIndexState.BUILDING && !isAsync) {
                 PTable index = indexRef.getTable();
                 // First delete any existing rows of the index
-                Long scn = connection.getSCN();
-                long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
-                MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
-                connection.getQueryServices().updateData(plan);
-                NamedTableNode dataTableNode = NamedTableNode.create(null, TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
+                if (index.getIndexType().equals(IndexType.GLOBAL) && index.getViewIndexId() == null){
+                    //for a global index of a normal base table, it's safe to just truncate and
+                    //rebuild. We preserve splits to reduce the amount of splitting we need to do
+                    //during rebuild
+                    org.apache.hadoop.hbase.TableName physicalTableName =
+                        org.apache.hadoop.hbase.TableName.valueOf(index.getPhysicalName().getBytes());
+                    try (Admin admin = connection.getQueryServices().getAdmin()) {
+                        admin.disableTable(physicalTableName);
+                        admin.truncateTable(physicalTableName, true);
+                        //trunateTable automatically re-enables when it's done
+                    } catch(IOException ie) {
+                        String failedTable = physicalTableName.getNameAsString();
+                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNKNOWN_ERROR_CODE).
+                            setMessage("Error when truncating index table [" + failedTable +
+                                "] before rebuilding: " + ie.getMessage()).
+                            setTableName(failedTable).build().buildException();
+                    }
+                } else {
+                    Long scn = connection.getSCN();
+                    long ts = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
+                    MutationPlan plan = new PostDDLCompiler(connection).compile(Collections.singletonList(indexRef), null, null, Collections.<PColumn>emptyList(), ts);
+                    connection.getQueryServices().updateData(plan);
+                }
+                NamedTableNode dataTableNode = NamedTableNode.create(null,
+                    TableName.create(schemaName, dataTableName), Collections.<ColumnDef>emptyList());
                 // Next rebuild the index
                 connection.setAutoCommit(true);
                 if (connection.getSCN() != null) {
@@ -4423,6 +4444,7 @@ public class MetaDataClient {
                 TableRef dataTableRef = FromCompiler.getResolver(dataTableNode, connection).getTables().get(0);
                 return buildIndex(index, dataTableRef);
             }
+
             return new MutationState(1, 1000, connection);
         } catch (TableNotFoundException e) {
             if (!statement.ifExists()) {