You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by dk...@apache.org on 2022/03/21 07:50:52 UTC
[hive] branch master updated: HIVE-25956: Non blocking RENAME TABLE implementation (Denys Kuzmenko, reviewed by Laszlo Pinter)
This is an automated email from the ASF dual-hosted git repository.
dkuzmenko pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new ff26c39 HIVE-25956: Non blocking RENAME TABLE implementation (Denys Kuzmenko, reviewed by Laszlo Pinter)
ff26c39 is described below
commit ff26c392d58439267af48d80a59a92dabb5cb3ce
Author: Denys Kuzmenko <dk...@cloudera.com>
AuthorDate: Mon Mar 21 08:50:40 2022 +0100
HIVE-25956: Non blocking RENAME TABLE implementation (Denys Kuzmenko, reviewed by Laszlo Pinter)
Closes #3022
---
.../ddl/table/AbstractBaseAlterTableAnalyzer.java | 2 +-
.../drop/AlterTableDropConstraintAnalyzer.java | 2 +-
.../apache/hadoop/hive/ql/hooks/WriteEntity.java | 10 +-
.../org/apache/hadoop/hive/ql/io/AcidUtils.java | 5 +
.../hadoop/hive/ql/lockmgr/TestDbTxnManager2.java | 102 +++++++++++++++++++++
.../hadoop/hive/metastore/HiveAlterHandler.java | 1 +
6 files changed, 118 insertions(+), 4 deletions(-)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
index e48926e..03aa022 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java
@@ -132,7 +132,7 @@ public abstract class AbstractBaseAlterTableAnalyzer extends BaseSemanticAnalyze
// See HIVE-16688 for use cases.
return WriteType.DDL_EXCLUSIVE;
}
- return WriteEntity.determineAlterTableWriteType(op);
+ return WriteEntity.determineAlterTableWriteType(op, table, conf);
}
protected void validateAlterTableType(Table table, AlterTableType op, boolean expectView)
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/AlterTableDropConstraintAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/AlterTableDropConstraintAnalyzer.java
index 2dfa4c4..8392d31 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/AlterTableDropConstraintAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/drop/AlterTableDropConstraintAnalyzer.java
@@ -58,7 +58,7 @@ public class AlterTableDropConstraintAnalyzer extends AbstractAlterTableAnalyzer
setAcidDdlDesc(desc);
writeType = WriteType.DDL_EXCLUSIVE;
} else {
- writeType = WriteEntity.determineAlterTableWriteType(AlterTableType.DROP_CONSTRAINT);
+ writeType = WriteEntity.determineAlterTableWriteType(AlterTableType.DROP_CONSTRAINT, table, conf);
}
inputs.add(new ReadEntity(table));
WriteEntity alterTableOutput = new WriteEntity(table, writeType);
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
index 677ef30..21ae79a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/WriteEntity.java
@@ -18,10 +18,13 @@
package org.apache.hadoop.hive.ql.hooks;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.DataConnector;
import org.apache.hadoop.hive.metastore.api.Database;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.metadata.DummyPartition;
import org.apache.hadoop.hive.ql.metadata.Partition;
import org.apache.hadoop.hive.ql.metadata.Table;
@@ -202,7 +205,7 @@ public class WriteEntity extends Entity implements Serializable {
* @param op Operation type from the alter table description
* @return the write type this should use.
*/
- public static WriteType determineAlterTableWriteType(AlterTableType op) {
+ public static WriteType determineAlterTableWriteType(AlterTableType op, Table table, Configuration conf) {
switch (op) {
case RENAME_COLUMN:
case CLUSTERED_BY:
@@ -222,7 +225,6 @@ public class WriteEntity extends Entity implements Serializable {
case INTO_BUCKETS:
case ALTERPARTITION:
case ADDCOLS:
- case RENAME:
case TRUNCATE:
case MERGEFILES:
case ADD_CONSTRAINT:
@@ -230,6 +232,10 @@ public class WriteEntity extends Entity implements Serializable {
case OWNER:
return WriteType.DDL_EXCLUSIVE;
+ case RENAME:
+ return AcidUtils.isLocklessReadsSupported(table, conf) ?
+ WriteType.DDL_EXCL_WRITE : WriteType.DDL_EXCLUSIVE;
+
case ADDPARTITION:
case SET_SERDE_PROPS:
case ADDPROPS:
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
index 3dda052..ebf2d79 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
@@ -409,6 +409,11 @@ public class AcidUtils {
+ String.format(DELTA_DIGITS, visibilityTxnId);
}
+ public static boolean isLocklessReadsSupported(Table table, Configuration conf) {
+ return HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED)
+ && AcidUtils.isTransactionalTable(table);
+ }
+
/**
* Represents bucketId and copy_N suffix
*/
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index b78a93f..c3f4cd6 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -3592,4 +3592,106 @@ public class TestDbTxnManager2 extends DbTxnManagerEndToEndTestBase{
driver.getFetchTask().fetch(res);
Assert.assertEquals("No records found", 2, res.size());
}
+
+ @Test
+ public void testRenameTableNonBlocking() throws Exception {
+ testRenameTable(false);
+ }
+ @Test
+ public void testRenameTableBlocking() throws Exception {
+ testRenameTable(true);
+ }
+
+ private void testRenameTable(boolean blocking) throws Exception {
+ dropTable(new String[] {"tab_acid", "tab_acid_v2"});
+ FileSystem fs = FileSystem.get(conf);
+
+ HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED, !blocking);
+ HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_LOCKS_PARTITION_THRESHOLD, 1);
+ driver = Mockito.spy(driver);
+
+ HiveConf.setBoolVar(driver2.getConf(), HiveConf.ConfVars.HIVE_ACID_LOCKLESS_READS_ENABLED, !blocking);
+ driver2 = Mockito.spy(driver2);
+
+ driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " +
+ "stored as orc TBLPROPERTIES ('transactional'='true')");
+ driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')");
+
+ driver.compileAndRespond("select * from tab_acid");
+ List<String> res = new ArrayList<>();
+
+ driver.lockAndRespond();
+ List<ShowLocksResponseElement> locks = getLocks();
+ Assert.assertEquals("Unexpected lock count", 1, locks.size());
+
+ checkLock(LockType.SHARED_READ,
+ LockState.ACQUIRED, "default", "tab_acid", null, locks);
+
+ DbTxnManager txnMgr2 = (DbTxnManager) TxnManagerFactory.getTxnManagerFactory().getTxnManager(conf);
+ swapTxnManager(txnMgr2);
+ driver2.compileAndRespond("alter table tab_acid rename to tab_acid_v2");
+
+ if (blocking) {
+ txnMgr2.acquireLocks(driver2.getPlan(), ctx, null, false);
+ locks = getLocks();
+
+ ShowLocksResponseElement checkLock = checkLock(LockType.EXCLUSIVE,
+ LockState.WAITING, "default", "tab_acid", null, locks);
+
+ swapTxnManager(txnMgr);
+ Mockito.doNothing().when(driver).lockAndRespond();
+ driver.run();
+
+ driver.getFetchTask().fetch(res);
+ swapTxnManager(txnMgr2);
+
+ FieldSetter.setField(txnMgr2, txnMgr2.getClass().getDeclaredField("numStatements"), 0);
+ txnMgr2.getMS().unlock(checkLock.getLockid());
+ }
+ driver2.lockAndRespond();
+ locks = getLocks();
+ Assert.assertEquals("Unexpected lock count", blocking ? 1 : 2, locks.size());
+
+ checkLock(blocking ? LockType.EXCLUSIVE : LockType.EXCL_WRITE,
+ LockState.ACQUIRED, "default", "tab_acid", null, locks);
+
+ Mockito.doNothing().when(driver2).lockAndRespond();
+ driver2.run();
+
+ if (!blocking) {
+ swapTxnManager(txnMgr);
+ Mockito.doNothing().when(driver).lockAndRespond();
+ driver.run();
+ }
+ Mockito.reset(driver, driver2);
+
+ FileStatus[] stat = fs.listStatus(new Path(getWarehouseDir()),
+ t -> t.getName().matches("tab_acid" + (blocking ? "_v2" : SOFT_DELETE_TABLE_PATTERN)));
+ if (1 != stat.length) {
+ Assert.fail("Table couldn't be found on FS");
+ }
+ driver.getFetchTask().fetch(res);
+ Assert.assertEquals("Expecting 2 rows and found " + res.size(), 2, res.size());
+
+ try {
+ driver.run("select * from tab_acid");
+ } catch (CommandProcessorException ex) {
+ Assert.assertEquals(ErrorMsg.INVALID_TABLE.getErrorCode(), ex.getResponseCode());
+ }
+
+ driver.run("select * from tab_acid_v2");
+ res = new ArrayList<>();
+ driver.getFetchTask().fetch(res);
+ Assert.assertEquals("Expecting 2 rows and found " + res.size(), 2, res.size());
+
+ //create table with the same name
+ driver.run("create table if not exists tab_acid (a int, b int) partitioned by (p string) " +
+ "stored as orc TBLPROPERTIES ('transactional'='true')");
+ driver.run("insert into tab_acid partition(p) (a,b,p) values(1,2,'foo'),(3,4,'bar')");
+
+ driver.run("select * from tab_acid ");
+ res = new ArrayList<>();
+ driver.getFetchTask().fetch(res);
+ Assert.assertEquals("Expecting 2 rows and found " + res.size(), 2, res.size());
+ }
}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 283ea5b..aa32d60 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -245,6 +245,7 @@ public class HiveAlterHandler implements AlterHandler {
// in the table rename, its data location should not be changed. We can check
// if the table directory was created directly under its database directory to tell
// if it is such a table
+ // Same applies to the ACID tables suffixed with the `txnId`, case with `lockless reads`.
String oldtRelativePath = wh.getDatabaseManagedPath(olddb).toUri()
.relativize(srcPath.toUri()).toString();
boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)