You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@phoenix.apache.org by la...@apache.org on 2019/06/18 23:23:54 UTC

[phoenix] branch 4.x-HBase-1.3 updated: PHOENIX-5355 Speed up BaseIndexIT.

This is an automated email from the ASF dual-hosted git repository.

larsh pushed a commit to branch 4.x-HBase-1.3
in repository https://gitbox.apache.org/repos/asf/phoenix.git


The following commit(s) were added to refs/heads/4.x-HBase-1.3 by this push:
     new 235678e  PHOENIX-5355 Speed up BaseIndexIT.
235678e is described below

commit 235678e97108d49989ff776b47c96b136556dada
Author: Lars Hofhansl <la...@apache.org>
AuthorDate: Tue Jun 18 16:24:19 2019 -0700

    PHOENIX-5355 Speed up BaseIndexIT.
---
 .../org/apache/phoenix/end2end/CreateTableIT.java  | 62 ++++++++++++++
 .../apache/phoenix/end2end/index/BaseIndexIT.java  | 98 ----------------------
 2 files changed, 62 insertions(+), 98 deletions(-)

diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
index fb6a0ce..1b2b8bd 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/CreateTableIT.java
@@ -17,6 +17,7 @@
  */
 package org.apache.phoenix.end2end;
 
+import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotEquals;
@@ -24,6 +25,7 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.PreparedStatement;
@@ -34,12 +36,17 @@ import java.util.List;
 import java.util.Properties;
 
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.regionserver.BloomType;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryServices;
 import org.apache.phoenix.query.QueryServicesOptions;
@@ -47,10 +54,12 @@ import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
 import org.apache.phoenix.schema.PTable.QualifierEncodingScheme;
 import org.apache.phoenix.schema.PTableKey;
+import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SchemaNotFoundException;
 import org.apache.phoenix.schema.TableAlreadyExistsException;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.junit.Assert;
@@ -802,6 +811,59 @@ public class CreateTableIT extends ParallelStatsDisabledIT {
         }
     }
 
+    /**
+     * Ensure that HTD contains table priorities correctly.
+     */
+    @Test
+    public void testTableDescriptorPriority() throws SQLException, IOException {
+        String tableName = "TBL_" + generateUniqueName();
+        String indexName = "IND_" + generateUniqueName();
+        String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
+        String fullIndexeName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
+        // Check system tables priorities.
+        try (HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin(); 
+                Connection c = DriverManager.getConnection(getUrl())) {
+            ResultSet rs = c.getMetaData().getTables("", 
+                    "\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + "\"", 
+                    null, 
+                    new String[] {PTableType.SYSTEM.toString()});
+            ReadOnlyProps p = c.unwrap(PhoenixConnection.class).getQueryServices().getProps();
+            while (rs.next()) {
+                String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
+                String tName = rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
+                org.apache.hadoop.hbase.TableName hbaseTableName = SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
+                HTableDescriptor htd = admin.getTableDescriptor(hbaseTableName);
+                String val = htd.getValue("PRIORITY");
+                assertNotNull("PRIORITY is not set for table:" + htd, val);
+                assertTrue(Integer.parseInt(val)
+                        >= PhoenixRpcSchedulerFactory.getMetadataPriority(config));
+            }
+            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
+            String ddl ="CREATE TABLE " + fullTableName + TestUtil.TEST_TABLE_SCHEMA;
+            try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
+                conn.setAutoCommit(false);
+                Statement stmt = conn.createStatement();
+                stmt.execute(ddl);
+                BaseTest.populateTestTable(fullTableName);
+                ddl = "CREATE INDEX " + indexName
+                        + " ON " + fullTableName + " (long_col1, long_col2)"
+                        + " INCLUDE (decimal_col1, decimal_col2)";
+                stmt.execute(ddl);
+            }
+
+            HTableDescriptor dataTable = admin.getTableDescriptor(
+                    org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
+            String val = dataTable.getValue("PRIORITY");
+            assertTrue(val == null || Integer.parseInt(val) < HConstants.HIGH_QOS);
+
+            HTableDescriptor indexTable = admin.getTableDescriptor(
+                    org.apache.hadoop.hbase.TableName.valueOf(fullIndexeName));
+            val = indexTable.getValue("PRIORITY");
+            assertNotNull("PRIORITY is not set for table:" + indexTable, val);
+            assertTrue(Integer.parseInt(val) >= PhoenixRpcSchedulerFactory.getIndexPriority(config));
+        }
+    }
+
     private int checkGuidePostWidth(String tableName) throws Exception {
         try (Connection conn = DriverManager.getConnection(getUrl())) {
             String query =
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
index e98b7bb..c4de820 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/BaseIndexIT.java
@@ -42,19 +42,15 @@ import java.util.List;
 import java.util.Properties;
 import java.util.Random;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellScanner;
 import org.apache.hadoop.hbase.HConstants;
-import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
-import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
 import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
-import org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -62,14 +58,12 @@ import org.apache.phoenix.compile.FromCompiler;
 import org.apache.phoenix.end2end.ParallelStatsDisabledIT;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.jdbc.PhoenixConnection;
-import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.jdbc.PhoenixResultSet;
 import org.apache.phoenix.jdbc.PhoenixStatement;
 import org.apache.phoenix.parse.NamedTableNode;
 import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.query.BaseTest;
 import org.apache.phoenix.query.QueryServices;
-import org.apache.phoenix.query.QueryServicesOptions;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.PTableImpl;
 import org.apache.phoenix.schema.PTableKey;
@@ -81,7 +75,6 @@ import org.apache.phoenix.util.EnvironmentEdgeManager;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.PropertiesUtil;
 import org.apache.phoenix.util.QueryUtil;
-import org.apache.phoenix.util.ReadOnlyProps;
 import org.apache.phoenix.util.SchemaUtil;
 import org.apache.phoenix.util.TestUtil;
 import org.apache.phoenix.util.TransactionUtil;
@@ -1152,61 +1145,6 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
         assertNull(tableScanner.next());
     }
 
-    /**
-     * Ensure that HTD contains table priorities correctly.
-     */
-    @Test
-    public void testTableDescriptorPriority() throws SQLException, IOException {
-        String tableName = "TBL_" + generateUniqueName();
-        String indexName = "IND_" + generateUniqueName();
-        String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
-        String fullIndexeName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, indexName);
-        // Check system tables priorities.
-        try (HBaseAdmin admin = driver.getConnectionQueryServices(null, null).getAdmin(); 
-                Connection c = DriverManager.getConnection(getUrl())) {
-            ResultSet rs = c.getMetaData().getTables("", 
-                    "\""+ PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA + "\"", 
-                    null, 
-                    new String[] {PTableType.SYSTEM.toString()});
-            ReadOnlyProps p = c.unwrap(PhoenixConnection.class).getQueryServices().getProps();
-            while (rs.next()) {
-                String schemaName = rs.getString(PhoenixDatabaseMetaData.TABLE_SCHEM);
-                String tName = rs.getString(PhoenixDatabaseMetaData.TABLE_NAME);
-                org.apache.hadoop.hbase.TableName hbaseTableName = SchemaUtil.getPhysicalTableName(SchemaUtil.getTableName(schemaName, tName), p);
-                HTableDescriptor htd = admin.getTableDescriptor(hbaseTableName);
-                String val = htd.getValue("PRIORITY");
-                assertNotNull("PRIORITY is not set for table:" + htd, val);
-                assertTrue(Integer.parseInt(val)
-                        >= PhoenixRpcSchedulerFactory.getMetadataPriority(config));
-            }
-            Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-            String ddl ="CREATE TABLE " + fullTableName + TestUtil.TEST_TABLE_SCHEMA + tableDDLOptions;
-            try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-                conn.setAutoCommit(false);
-                Statement stmt = conn.createStatement();
-                stmt.execute(ddl);
-                BaseTest.populateTestTable(fullTableName);
-                ddl = "CREATE " + (localIndex ? "LOCAL" : "") + " INDEX " + indexName
-                        + " ON " + fullTableName + " (long_col1, long_col2)"
-                        + " INCLUDE (decimal_col1, decimal_col2)";
-                stmt.execute(ddl);
-            }
-
-            HTableDescriptor dataTable = admin.getTableDescriptor(
-                    org.apache.hadoop.hbase.TableName.valueOf(fullTableName));
-            String val = dataTable.getValue("PRIORITY");
-            assertTrue(val == null || Integer.parseInt(val) < HConstants.HIGH_QOS);
-
-            if (!localIndex && mutable) {
-                HTableDescriptor indexTable = admin.getTableDescriptor(
-                        org.apache.hadoop.hbase.TableName.valueOf(fullIndexeName));
-                val = indexTable.getValue("PRIORITY");
-                assertNotNull("PRIORITY is not set for table:" + indexTable, val);
-                assertTrue(Integer.parseInt(val) >= PhoenixRpcSchedulerFactory.getIndexPriority(config));
-            }
-        }
-    }
-
     @Test
     public void testQueryBackToDataTableWithDescPKColumn() throws SQLException {
         doTestQueryBackToDataTableWithDescPKColumn(true);
@@ -1304,40 +1242,4 @@ public abstract class BaseIndexIT extends ParallelStatsDisabledIT {
         }
     }
 
-    @Test
-    public void testMaxIndexesPerTable() throws SQLException {
-        String tableName = "TBL_" + generateUniqueName();
-        String indexName = "IND_" + generateUniqueName();
-        String fullTableName = SchemaUtil.getTableName(TestUtil.DEFAULT_SCHEMA_NAME, tableName);
-        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
-        try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
-            Configuration conf =
-                    conn.unwrap(PhoenixConnection.class).getQueryServices().getConfiguration();
-            int maxIndexes =
-                    conf.getInt(QueryServices.MAX_INDEXES_PER_TABLE,
-                        QueryServicesOptions.DEFAULT_MAX_INDEXES_PER_TABLE);
-            conn.createStatement()
-                    .execute("CREATE TABLE " + fullTableName
-                            + " (k VARCHAR NOT NULL PRIMARY KEY, \"V1\" VARCHAR, \"v2\" VARCHAR)"
-                            + tableDDLOptions);
-            for (int i = 0; i < maxIndexes; i++) {
-                conn.createStatement().execute("CREATE " + (localIndex ? "LOCAL " : "") + "INDEX "
-                        + indexName + i + " ON " + fullTableName + "(\"v2\") INCLUDE (\"V1\")");
-            }
-            try {
-                conn.createStatement()
-                        .execute("CREATE " + (localIndex ? "LOCAL " : "") + "INDEX " + indexName
-                                + maxIndexes + " ON " + fullTableName
-                                + "(\"v2\") INCLUDE (\"V1\")");
-                fail("Expected exception TOO_MANY_INDEXES");
-            } catch (SQLException e) {
-                assertEquals(e.getErrorCode(), SQLExceptionCode.TOO_MANY_INDEXES.getErrorCode());
-            }
-            conn.createStatement()
-                    .execute("CREATE " + (localIndex ? "LOCAL " : "") + "INDEX IF NOT EXISTS "
-                            + indexName + "0" + " ON " + fullTableName
-                            + "(\"v2\") INCLUDE (\"V1\")");
-        }
-    }
-
 }