You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by vi...@apache.org on 2020/06/29 17:06:10 UTC
[hive] branch master updated: HIVE-23573: Advance the write id for
the table for DDL (Kishen Das reviewed by Peter Vary, Vihang Karajgaonkar)
This is an automated email from the ASF dual-hosted git repository.
vihangk1 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 6440d93 HIVE-23573: Advance the write id for the table for DDL (Kishen Das reviewed by Peter Vary, Vihang Karajgaonkar)
6440d93 is described below
commit 6440d93981e6d6aab59ecf2e77ffa45cd84d47de
Author: Kishen Das <ki...@cloudera.com>
AuthorDate: Mon Jun 29 09:46:12 2020 -0700
HIVE-23573: Advance the write id for the table for DDL (Kishen Das reviewed by Peter Vary, Vihang Karajgaonkar)
---
...estReplicationScenariosAcidTablesBootstrap.java | 4 ++--
.../add/AlterTableAddConstraintDesc.java | 2 +-
.../table/misc/owner/AlterTableSetOwnerDesc.java | 2 +-
.../add/AbstractAddPartitionAnalyzer.java | 17 +++++++++++++++++
.../add/AlterTableAddPartitionAnalyzer.java | 2 ++
.../partition/add/AlterTableAddPartitionDesc.java | 22 ++++++++++++++++++++--
.../drop/AlterTableDropPartitionDesc.java | 21 +++++++++++++++++++--
.../storage/cluster/AlterTableClusteredByDesc.java | 2 +-
.../storage/cluster/AlterTableIntoBucketsDesc.java | 2 +-
.../cluster/AlterTableNotClusteredDesc.java | 2 +-
.../storage/cluster/AlterTableNotSortedDesc.java | 2 +-
.../storage/compact/AlterTableCompactDesc.java | 20 ++++++++++++++++++--
.../storage/serde/AlterTableSetSerdeDesc.java | 2 +-
.../storage/serde/AlterTableSetSerdePropsDesc.java | 2 +-
.../fileformat/AlterTableSetFileFormatDesc.java | 2 +-
.../org/apache/hadoop/hive/ql/metadata/Table.java | 17 +++++++++++++++++
.../hive/ql/parse/RewriteSemanticAnalyzer.java | 8 +++-----
.../hadoop/hive/ql/stats/ColStatsProcessor.java | 8 ++++++--
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 2 +-
.../udf/generic/TestGenericUDTFGetSQLSchema.java | 1 +
.../org/apache/hive/streaming/TestStreaming.java | 10 ++++++----
21 files changed, 121 insertions(+), 29 deletions(-)
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
index b0c3a9e..5d94db7 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java
@@ -190,10 +190,10 @@ public class TestReplicationScenariosAcidTablesBootstrap
prepareIncNonAcidData(primaryDbName);
prepareIncAcidData(primaryDbName);
// Allocate write ids for tables t1 and t2 for all txns
- // t1=5+2(insert) and t2=5+5(insert, alter add column)
+ // t1=5+2(insert) and t2=5+6(insert, alter add column), now alter also creates a transaction
Map<String, Long> tables = new HashMap<>();
tables.put("t1", numTxns+2L);
- tables.put("t2", numTxns+5L);
+ tables.put("t2", numTxns+6L);
allocateWriteIdsForTables(primaryDbName, tables, txnHandler, txns, primaryConf);
// Bootstrap dump with open txn timeout as 1s.
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/AlterTableAddConstraintDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/AlterTableAddConstraintDesc.java
index c05abfa..4bcbf4a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/AlterTableAddConstraintDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/constraint/add/AlterTableAddConstraintDesc.java
@@ -41,6 +41,6 @@ public class AlterTableAddConstraintDesc extends AbstractAlterTableWithConstrain
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java
index 2eb8f99..c885e19 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/misc/owner/AlterTableSetOwnerDesc.java
@@ -47,6 +47,6 @@ public class AlterTableSetOwnerDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
index e1c8718..0736f16 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
import org.apache.hadoop.hive.ql.QueryState;
import org.apache.hadoop.hive.ql.ddl.DDLWork;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer;
import org.apache.hadoop.hive.ql.ddl.table.AlterTableType;
import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils;
@@ -130,4 +131,20 @@ abstract class AbstractAddPartitionAnalyzer extends AbstractAlterTableAnalyzer {
protected abstract void postProcess(TableName tableName, Table table, AlterTableAddPartitionDesc desc,
Task<DDLWork> ddlTask) throws SemanticException;
+
+ // Equivalent to acidSinks, but for DDL operations that change data.
+ private DDLDescWithWriteId ddlDescWithWriteId;
+
+ protected void setAcidDdlDesc(DDLDescWithWriteId descWithWriteId) {
+ if (this.ddlDescWithWriteId != null) {
+ throw new IllegalStateException("ddlDescWithWriteId is already set: " + this.ddlDescWithWriteId);
+ }
+ this.ddlDescWithWriteId = descWithWriteId;
+ }
+
+ @Override
+ public DDLDescWithWriteId getAcidDdlDesc() {
+ return ddlDescWithWriteId;
+ }
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java
index a3ee65f..8d625a4 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionAnalyzer.java
@@ -68,6 +68,8 @@ public class AlterTableAddPartitionAnalyzer extends AbstractAddPartitionAnalyzer
return;
}
+ setAcidDdlDesc(desc);
+
Long writeId = null;
int stmtId = 0;
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java
index 61af383..c4b2dab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AlterTableAddPartitionDesc.java
@@ -25,7 +25,8 @@ import java.util.Map;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.Order;
import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
-import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
+import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.plan.Explain;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
@@ -34,7 +35,7 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
* DDL task description for ALTER TABLE ... ADD PARTITION ... commands.
*/
@Explain(displayName = "Add Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class AlterTableAddPartitionDesc implements DDLDesc, Serializable {
+public class AlterTableAddPartitionDesc implements DDLDescWithWriteId, Serializable {
private static final long serialVersionUID = 1L;
/**
@@ -167,6 +168,7 @@ public class AlterTableAddPartitionDesc implements DDLDesc, Serializable {
private final String tableName;
private final boolean ifNotExists;
private final List<PartitionDesc> partitions;
+ private Long writeId;
private ReplicationSpec replicationSpec = null; // TODO: make replicationSpec final too
@@ -217,4 +219,20 @@ public class AlterTableAddPartitionDesc implements DDLDesc, Serializable {
}
return replicationSpec;
}
+
+ @Override
+ public void setWriteId(long writeId) {
+ this.writeId = writeId;
+ }
+
+ @Override
+ public String getFullTableName() {
+ return AcidUtils.getFullTableName(dbName,tableName);
+ }
+
+ @Override
+ public boolean mayNeedWriteId() {
+ return true;
+ }
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java
index dbb3289..bb64bd6 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/drop/AlterTableDropPartitionDesc.java
@@ -23,7 +23,7 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Map;
-import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
import org.apache.hadoop.hive.common.TableName;
import org.apache.hadoop.hive.ql.parse.ReplicationSpec;
import org.apache.hadoop.hive.ql.plan.Explain;
@@ -34,7 +34,7 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
* DDL task description for ALTER TABLE ... DROP PARTITION ... commands.
*/
@Explain(displayName = "Drop Partition", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class AlterTableDropPartitionDesc implements DDLDesc, Serializable {
+public class AlterTableDropPartitionDesc implements DDLDescWithWriteId, Serializable {
private static final long serialVersionUID = 1L;
/**
@@ -65,6 +65,7 @@ public class AlterTableDropPartitionDesc implements DDLDesc, Serializable {
private final ArrayList<PartitionDesc> partSpecs;
private final boolean ifPurge;
private final ReplicationSpec replicationSpec;
+ private Long writeId;
public AlterTableDropPartitionDesc(TableName tableName, Map<Integer, List<ExprNodeGenericFuncDesc>> partSpecs,
boolean ifPurge, ReplicationSpec replicationSpec) {
@@ -100,4 +101,20 @@ public class AlterTableDropPartitionDesc implements DDLDesc, Serializable {
public ReplicationSpec getReplicationSpec() {
return replicationSpec;
}
+
+ @Override
+ public void setWriteId(long writeId) {
+ this.writeId = writeId;
+ }
+
+ @Override
+ public String getFullTableName() {
+ return getTableName();
+ }
+
+ @Override
+ public boolean mayNeedWriteId() {
+ return true;
+ }
+
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableClusteredByDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableClusteredByDesc.java
index 1922890..9ee01df 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableClusteredByDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableClusteredByDesc.java
@@ -74,6 +74,6 @@ public class AlterTableClusteredByDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableIntoBucketsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableIntoBucketsDesc.java
index 7be0be1..5914f32 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableIntoBucketsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableIntoBucketsDesc.java
@@ -49,6 +49,6 @@ public class AlterTableIntoBucketsDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotClusteredDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotClusteredDesc.java
index 69f97b3..aa3418a 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotClusteredDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotClusteredDesc.java
@@ -40,6 +40,6 @@ public class AlterTableNotClusteredDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotSortedDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotSortedDesc.java
index 3ff370a..eacc6b8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotSortedDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/cluster/AlterTableNotSortedDesc.java
@@ -40,6 +40,6 @@ public class AlterTableNotSortedDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
index 90e2cd3..f770211 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactDesc.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.hive.ql.ddl.table.storage.compact;
import java.util.Map;
import org.apache.hadoop.hive.common.TableName;
-import org.apache.hadoop.hive.ql.ddl.DDLDesc;
+import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId;
import org.apache.hadoop.hive.ql.plan.Explain;
import org.apache.hadoop.hive.ql.plan.Explain.Level;
@@ -30,12 +30,13 @@ import org.apache.hadoop.hive.ql.plan.Explain.Level;
* commands.
*/
@Explain(displayName = "Compact", explainLevels = { Level.USER, Level.DEFAULT, Level.EXTENDED })
-public class AlterTableCompactDesc implements DDLDesc {
+public class AlterTableCompactDesc implements DDLDescWithWriteId {
private final String tableName;
private final Map<String, String> partitionSpec;
private final String compactionType;
private final boolean isBlocking;
private final Map<String, String> properties;
+ private Long writeId;
public AlterTableCompactDesc(TableName tableName, Map<String, String> partitionSpec, String compactionType,
boolean isBlocking, Map<String, String> properties) {
@@ -71,4 +72,19 @@ public class AlterTableCompactDesc implements DDLDesc {
public Map<String, String> getProperties() {
return properties;
}
+
+ @Override
+ public void setWriteId(long writeId) {
+ this.writeId = writeId;
+ }
+
+ @Override
+ public String getFullTableName() {
+ return tableName;
+ }
+
+ @Override
+ public boolean mayNeedWriteId() {
+ return true;
+ }
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdeDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdeDesc.java
index 61a706d..952c219 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdeDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdeDesc.java
@@ -49,6 +49,6 @@ public class AlterTableSetSerdeDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdePropsDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdePropsDesc.java
index c0c21af..efdcaab 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdePropsDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/serde/AlterTableSetSerdePropsDesc.java
@@ -41,6 +41,6 @@ public class AlterTableSetSerdePropsDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/set/fileformat/AlterTableSetFileFormatDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/set/fileformat/AlterTableSetFileFormatDesc.java
index 0804f50..c260675 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/set/fileformat/AlterTableSetFileFormatDesc.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/set/fileformat/AlterTableSetFileFormatDesc.java
@@ -63,6 +63,6 @@ public class AlterTableSetFileFormatDesc extends AbstractAlterTableDesc {
@Override
public boolean mayNeedWriteId() {
- return false;
+ return true;
}
}
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
index 61b9fb8..097035c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
@@ -1317,4 +1317,21 @@ public class Table implements Serializable {
this.isCheckFetched = tbl.isCheckFetched;
}
+ /**
+ * This method ignores the write Id, while comparing two tables.
+ *
+ * @param tbl table to compare with
+ * @return
+ */
+ public boolean equalsWithIgnoreWriteId(Table tbl ) {
+ long targetWriteId = getTTable().getWriteId();
+ long entityWriteId = tbl.getTTable().getWriteId();
+ getTTable().setWriteId(0L);
+ tbl.getTTable().setWriteId(0L);
+ boolean result = equals(tbl);
+ getTTable().setWriteId(targetWriteId);
+ tbl.getTTable().setWriteId(entityWriteId);
+ return result;
+ }
+
};
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
index 0b19f17..a53494b 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/RewriteSemanticAnalyzer.java
@@ -405,11 +405,9 @@ public abstract class RewriteSemanticAnalyzer extends CalcitePlanner {
*/
private boolean isTargetTable(Entity entity, Table targetTable) {
//todo: https://issues.apache.org/jira/browse/HIVE-15048
- /**
- * is this the right way to compare? Should it just compare paths?
- * equals() impl looks heavy weight
- */
- return targetTable.equals(entity.getTable());
+ // Since any DDL now advances the write id, we should ignore the write Id,
+ // while comparing two tables
+ return targetTable.equalsWithIgnoreWriteId(entity.getTable());
}
/**
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
index 53c5b1d..2b34e76 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java
@@ -24,6 +24,7 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.Constants;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
@@ -188,9 +189,12 @@ public class ColStatsProcessor implements IStatsProcessor {
HiveTxnManager txnMgr = AcidUtils.isTransactionalTable(tbl)
? SessionState.get().getTxnMgr() : null;
if (txnMgr != null) {
- request.setValidWriteIdList(AcidUtils.getTableValidWriteIdList(conf,
- AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName())).toString());
request.setWriteId(txnMgr.getAllocatedTableWriteId(tbl.getDbName(), tbl.getTableName()));
+ ValidWriteIdList validWriteIdList =
+ AcidUtils.getTableValidWriteIdList(conf, AcidUtils.getFullTableName(tbl.getDbName(), tbl.getTableName()));
+ if (validWriteIdList != null) {
+ request.setValidWriteIdList(validWriteIdList.toString());
+ }
}
db.setPartitionColumnStatistics(request);
return 0;
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 61f2bd2..cc8c5c8 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -1465,7 +1465,7 @@ public class TestTxnCommands2 {
List<String> r = runStatementOnDriver("select a,b from " + Table.ACIDTBLPART + " order by a,b");
int[][] targetVals = {{2,1},{4,3},{5,6},{7,8}};
Assert.assertEquals(stringifyValues(targetVals), r);
- //currently multi-insrt doesn't allow same table/partition in > 1 output branch
+ //currently multi-insert doesn't allow same table/partition in > 1 output branch
String s = "from " + Table.ACIDTBLPART + " target right outer join " +
Table.NONACIDPART2 + " source on target.a = source.a2 " +
" INSERT INTO TABLE " + Table.ACIDTBLPART + " PARTITION(p='even') select source.a2, source.b2 where source.a2=target.a " +
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java
index 3615d2b..bce6f6c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDTFGetSQLSchema.java
@@ -45,6 +45,7 @@ public class TestGenericUDTFGetSQLSchema {
conf.set("hive.security.authorization.manager",
"org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider");
sessionState = SessionState.start(conf);
+ sessionState.initTxnMgr(conf);
}
@AfterClass
diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
index 5e0c386..1ec7dbe 100644
--- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
+++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java
@@ -964,8 +964,9 @@ public class TestStreaming {
min = pd.getMinWriteId();
}
}
- Assert.assertEquals(minTxn, min);
- Assert.assertEquals(maxTxn, max);
+ // We are doing +1, as DDL operation will also advance the write Id now.
+ Assert.assertEquals(minTxn + 1, min);
+ Assert.assertEquals(maxTxn + 1, max);
InputFormat inf = new OrcInputFormat();
JobConf job = new JobConf();
@@ -1019,8 +1020,9 @@ public class TestStreaming {
min = pd.getMinWriteId();
}
}
- Assert.assertEquals(minTxn, min);
- Assert.assertEquals(maxTxn, max);
+ // We are doing +1, as DDL operation will also advance the write Id now.
+ Assert.assertEquals(minTxn + 1, min);
+ Assert.assertEquals(maxTxn + 1, max);
boolean isVectorizationEnabled = conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED);
if (vectorize) {
conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true);