You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by sa...@apache.org on 2018/02/23 16:31:13 UTC
[15/21] hive git commit: HIVE-18192: Introduce WriteID per table
rather than using global transaction ID (Sankar Hariappan,
reviewed by Eugene Koifman)
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
index 68cde2d..709f021 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java
@@ -26,7 +26,7 @@ import java.util.List;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
@@ -117,8 +117,8 @@ public class TestVectorizedOrcAcidRowBatchReader {
.filesystem(fs)
.bucket(bucket)
.writingBase(false)
- .minimumTransactionId(1)
- .maximumTransactionId(NUM_OTID)
+ .minimumWriteId(1)
+ .maximumWriteId(NUM_OTID)
.inspector(inspector)
.reporter(Reporter.NULL)
.recordIdColumn(1)
@@ -141,7 +141,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
// Create a delete delta that has rowIds divisible by 2 but not by 3. This will produce
// a delete delta file with 50,000 delete events.
long currTxnId = NUM_OTID + 1;
- options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId);
+ options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
updater = new OrcRecordUpdater(root, options);
for (long i = 1; i <= NUM_OTID; ++i) {
for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
@@ -154,7 +154,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
// Now, create a delete delta that has rowIds divisible by 3 but not by 2. This will produce
// a delete delta file with 25,000 delete events.
currTxnId = NUM_OTID + 2;
- options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId);
+ options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
updater = new OrcRecordUpdater(root, options);
for (long i = 1; i <= NUM_OTID; ++i) {
for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
@@ -167,7 +167,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
// Now, create a delete delta that has rowIds divisible by both 3 and 2. This will produce
// a delete delta file with 25,000 delete events.
currTxnId = NUM_OTID + 3;
- options.minimumTransactionId(currTxnId).maximumTransactionId(currTxnId);
+ options.minimumWriteId(currTxnId).maximumWriteId(currTxnId);
updater = new OrcRecordUpdater(root, options);
for (long i = 1; i <= NUM_OTID; ++i) {
for (long j = 0; j < NUM_ROWID_PER_OTID; j += 1) {
@@ -216,7 +216,7 @@ public class TestVectorizedOrcAcidRowBatchReader {
List<OrcSplit> splits = getSplits();
// Mark one of the transactions as an exception to test that invalid transactions
// are being handled properly.
- conf.set(ValidTxnList.VALID_TXNS_KEY, "14:1:1:5"); // Exclude transaction 5
+ conf.set(ValidWriteIdList.VALID_WRITEIDS_KEY, "tbl:14:1:1:5"); // Exclude transaction 5
VectorizedOrcAcidRowBatchReader vectorizedReader = new VectorizedOrcAcidRowBatchReader(splits.get(0), conf, Reporter.NULL, new VectorizedRowBatchCtx());
if (deleteEventRegistry.equals(ColumnizedDeleteEventRegistry.class.getName())) {
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
index 7650917..d411a8b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java
@@ -21,7 +21,14 @@ import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.hive.common.JavaUtils;
import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse;
import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
import org.apache.hadoop.hive.metastore.txn.AcidWriteSetService;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -29,11 +36,6 @@ import org.junit.After;
import org.junit.Assert;
import org.apache.hadoop.hive.common.FileUtils;
import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.api.LockState;
-import org.apache.hadoop.hive.metastore.api.LockType;
-import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
-import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.ql.Context;
import org.apache.hadoop.hive.ql.Driver;
@@ -965,13 +967,16 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 2, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB_PART", "p=blah", locks);
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB_PART", "p=blah", locks);
- AddDynamicPartitions adp = new AddDynamicPartitions(txnId, "default", "TAB_PART",
+ long writeId = txnMgr.getTableWriteId("default", "TAB_PART");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeId, "default", "TAB_PART",
Collections.singletonList("p=blah"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
txnMgr.commitTxn();
adp.setTxnid(txnId2);
+ writeId = txnMgr2.getTableWriteId("default", "TAB_PART");
+ adp.setWriteid(writeId);
txnHandler.addDynamicPartitions(adp);
LockException expectedException = null;
try {
@@ -1022,7 +1027,13 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);
//update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list
Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(),
+
+ AllocateTableWriteIdsResponse writeIds
+ = txnHandler.allocateTableWriteIds(new AllocateTableWriteIdsRequest(Collections.singletonList(txnMgr2.getCurrentTxnId()),
+ "default", "tab2"));
+ Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
+
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(),
"default", "tab2", Collections.EMPTY_LIST);
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1039,7 +1050,12 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", null, locks);//since TAB2 is empty
//update stmt has p=blah, thus nothing is actually update and we generate empty dyn part list
Assert.assertEquals(0, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
- adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(),
+
+ writeIds = txnHandler.allocateTableWriteIds(new AllocateTableWriteIdsRequest(Collections.singletonList(txnMgr2.getCurrentTxnId()),
+ "default", "tab2"));
+ Assert.assertEquals(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
+
+ adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(),
"default", "tab2", Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);//simulate partition update
@@ -1054,8 +1070,13 @@ public class TestDbTxnManager2 {
Assert.assertEquals(1, TxnDbUtil.countQueryAgent(conf, "select count(*) from WRITE_SET"));
checkCmdOnDriver(driver.compileAndRespond("update TAB2 set b = 17 where a = 1"));//no rows match
txnMgr.acquireLocks(driver.getPlan(), ctx, "Long Running");
+
+ writeIds = txnHandler.allocateTableWriteIds(new AllocateTableWriteIdsRequest(Collections.singletonList(txnMgr.getCurrentTxnId()),
+ "default", "tab2"));
+ Assert.assertEquals(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getTxnId());
+
//so generate empty Dyn Part call
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(),
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeIds.getTxnToWriteIds().get(0).getWriteId(),
"default", "tab2", Collections.EMPTY_LIST);
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1095,7 +1116,12 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB_PART", "p=blah", locks);
txnMgr.rollbackTxn();
- AddDynamicPartitions adp = new AddDynamicPartitions(txnId, "default", "TAB_PART",
+ AllocateTableWriteIdsResponse writeIds
+ = txnHandler.allocateTableWriteIds(new AllocateTableWriteIdsRequest(Collections.singletonList(txnId), "default", "TAB_PART"));
+ Assert.assertEquals(txnId, writeIds.getTxnToWriteIds().get(0).getTxnId());
+
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnId, writeIds.getTxnToWriteIds().get(0).getWriteId(),
+ "default", "TAB_PART",
Arrays.asList("p=blah"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1172,8 +1198,9 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=one", locks);
//this simulates the completion of txnid:2
//this simulates the completion of txnid:idTxnUpdate1
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab2",
- Collections.singletonList("p=two"));
+ long writeId = txnMgr2.getTableWriteId("default", "tab2");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab2",
+ Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
txnMgr2.commitTxn();//txnid:idTxnUpdate1
@@ -1181,7 +1208,8 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB2", "p=one", locks);
//completion of txnid:idTxnUpdate2
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab2",
+ writeId = txnMgr.getTableWriteId("default", "tab2");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab2",
Collections.singletonList("p=one"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1224,7 +1252,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=one", locks);
//this simulates the completion of txnid:idTxnUpdate3
- adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr2.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=one"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1236,7 +1265,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
//completion of txnid:idTxnUpdate4
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1282,7 +1312,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
//this simulates the completion of txnid:idTxnUpdate1
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1",
+ long writeId = txnMgr2.getTableWriteId("default", "tab1");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=one"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1293,7 +1324,8 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
//completion of txnid:idTxnUpdate2
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1338,7 +1370,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
//this simulates the completion of txnid:idTxnUpdate1
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1",
+ long writeId = txnMgr2.getTableWriteId("default", "tab1");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=one"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1349,7 +1382,8 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
//completion of txnid:idTxnUpdate2
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
@@ -1398,7 +1432,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
//this simulates the completion of "Update tab2" txn
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1",
+ long writeId = txnMgr2.getTableWriteId("default", "tab1");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -1409,7 +1444,8 @@ public class TestDbTxnManager2 {
Assert.assertEquals("Unexpected lock count", 1, locks.size());
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
//completion of "delete from tab1" txn
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
@@ -1467,7 +1503,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_WRITE, LockState.WAITING, "default", "TAB1", "p=two", locks);
//this simulates the completion of "delete from tab1" txn
- AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), "default", "tab1",
+ long writeId = txnMgr2.getTableWriteId("default", "tab1");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnMgr2.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
@@ -1480,7 +1517,8 @@ public class TestDbTxnManager2 {
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "TAB1", "p=one", locks);
checkLock(LockType.SHARED_WRITE, LockState.ACQUIRED, "default", "TAB1", "p=two", locks);
//completion of txnid:txnIdSelect
- adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), "default", "tab1",
+ writeId = txnMgr.getTableWriteId("default", "tab1");
+ adp = new AddDynamicPartitions(txnMgr.getCurrentTxnId(), writeId, "default", "tab1",
Collections.singletonList("p=two"));
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
@@ -1653,15 +1691,16 @@ public class TestDbTxnManager2 {
0,
TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1));
//complete 1st txn
- AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, "default", "target",
+ long writeId = txnMgr.getTableWriteId("default", "target");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, writeId, "default", "target",
Collections.singletonList("p=1/q=3"));//update clause
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
- adp = new AddDynamicPartitions(txnId1, "default", "target",
+ adp = new AddDynamicPartitions(txnId1, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=2/q=2"));//delete clause
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
- adp = new AddDynamicPartitions(txnId1, "default", "target",
+ adp = new AddDynamicPartitions(txnId1, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=1/q=3","p=1/q=1"));//insert clause
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
@@ -1718,15 +1757,16 @@ public class TestDbTxnManager2 {
0,
TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId2));
//complete 2nd txn
- adp = new AddDynamicPartitions(txnId2, "default", "target",
+ writeId = txnMgr2.getTableWriteId("default", "target");
+ adp = new AddDynamicPartitions(txnId2, writeId, "default", "target",
Collections.singletonList(cc ? "p=1/q=3" : "p=1/p=2"));//update clause
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
- adp = new AddDynamicPartitions(txnId2, "default", "target",
+ adp = new AddDynamicPartitions(txnId2, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=2/q=2"));//delete clause
adp.setOperationType(DataOperationType.DELETE);
txnHandler.addDynamicPartitions(adp);
- adp = new AddDynamicPartitions(txnId2, "default", "target",
+ adp = new AddDynamicPartitions(txnId2, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=1/q=3","p=1/q=1"));//insert clause
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
@@ -1965,7 +2005,8 @@ public class TestDbTxnManager2 {
//Plan is using DummyPartition, so can only lock the table... unfortunately
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "default", "target", null, locks);
checkLock(LockType.SHARED_READ, LockState.ACQUIRED, "_dummy_database", "_dummy_table", null, locks);
- AddDynamicPartitions adp = new AddDynamicPartitions(txnid2, "default", "target", Arrays.asList("p=1/q=2","p=1/q=2"));
+ long writeId = txnMgr.getTableWriteId("default", "target");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnid2, writeId, "default", "target", Arrays.asList("p=1/q=2","p=1/q=2"));
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
Assert.assertEquals(
@@ -2038,7 +2079,8 @@ public class TestDbTxnManager2 {
0,//because it's using a DP write
TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnId1));
//complete T1 transaction (simulate writing to 2 partitions)
- AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, "default", "target",
+ long writeId = txnMgr.getTableWriteId("default", "target");
+ AddDynamicPartitions adp = new AddDynamicPartitions(txnId1, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=1/q=3"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
@@ -2074,7 +2116,8 @@ public class TestDbTxnManager2 {
TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2));
//complete T2 txn
//simulate Insert into 2 partitions
- adp = new AddDynamicPartitions(txnid2, "default", "target",
+ writeId = txnMgr2.getTableWriteId("default", "target");
+ adp = new AddDynamicPartitions(txnid2, writeId, "default", "target",
Arrays.asList("p=1/q=2","p=1/q=3"));
adp.setOperationType(DataOperationType.INSERT);
txnHandler.addDynamicPartitions(adp);
@@ -2085,7 +2128,7 @@ public class TestDbTxnManager2 {
TxnDbUtil.countQueryAgent(conf, "select count(*) from TXN_COMPONENTS where tc_txnid=" + txnid2 + " and tc_operation_type='i'"));
//simulate Update of 1 partitions; depending on causeConflict, choose one of the partitions
//which was modified by the T1 update stmt or choose a non-conflicting one
- adp = new AddDynamicPartitions(txnid2, "default", "target",
+ adp = new AddDynamicPartitions(txnid2, writeId, "default", "target",
Collections.singletonList(causeConflict ? "p=1/q=2" : "p=1/q=1"));
adp.setOperationType(DataOperationType.UPDATE);
txnHandler.addDynamicPartitions(adp);
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
index 3c007a7..a40ad24 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/TestGenMapRedUtilsCreateConditionalTask.java
@@ -164,7 +164,7 @@ public class TestGenMapRedUtilsCreateConditionalTask {
TableDesc tableDesc = new TableDesc();
reset(mockWork);
when(mockWork.getLoadTableWork()).thenReturn(new LoadTableDesc(
- condOutputPath, tableDesc, null, null));
+ condOutputPath, tableDesc, null));
newWork = GenMapRedUtils.mergeMovePaths(condInputPath, mockWork, lineageState);
assertNotNull(newWork);
assertNotEquals(newWork, mockWork);
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
index 1140391..ffd0445 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestUpdateDeleteSemanticAnalyzer.java
@@ -262,6 +262,7 @@ public class TestUpdateDeleteSemanticAnalyzer {
BaseSemanticAnalyzer sem = SemanticAnalyzerFactory.get(queryState, tree);
SessionState.get().initTxnMgr(conf);
+ SessionState.get().getTxnMgr().openTxn(ctx, conf.getUser());
db = sem.getDb();
// I have to create the tables here (rather than in setup()) because I need the Hive
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
index c337fd5..7ea017a 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
@@ -24,11 +24,15 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
import org.apache.hadoop.hive.conf.HiveConf;
import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.TableType;
+import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse;
import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
import org.apache.hadoop.hive.metastore.api.FieldSchema;
import org.apache.hadoop.hive.metastore.api.MetaException;
@@ -41,6 +45,7 @@ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
import org.apache.hadoop.hive.metastore.api.Table;
import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.metastore.txn.TxnUtils;
@@ -69,6 +74,7 @@ import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -140,6 +146,7 @@ public abstract class CompactorTest {
boolean isTemporary)
throws TException {
Table table = new Table();
+ table.setTableType(TableType.MANAGED_TABLE.name());
table.setTableName(tableName);
table.setDbName(dbName);
table.setOwner("me");
@@ -150,6 +157,16 @@ public abstract class CompactorTest {
table.setPartitionKeys(partKeys);
}
+ // Set the table as transactional for compaction to work
+ if (parameters == null) {
+ parameters = new HashMap<>();
+ }
+ parameters.put(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL, "true");
+ if (sortCols != null) {
+ // Sort columns are not allowed for full ACID table. So, change it to insert-only table
+ parameters.put(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES,
+ TransactionalValidationListener.INSERTONLY_TRANSACTIONAL_PROPERTY);
+ }
table.setParameters(parameters);
if (isTemporary) table.setTemporary(true);
@@ -181,6 +198,14 @@ public abstract class CompactorTest {
return txns.get(0);
}
+ protected long allocateWriteId(String dbName, String tblName, long txnid)
+ throws MetaException, TxnAbortedException, NoSuchTxnException {
+ AllocateTableWriteIdsRequest awiRqst
+ = new AllocateTableWriteIdsRequest(Collections.singletonList(txnid), dbName, tblName);
+ AllocateTableWriteIdsResponse awiResp = txnHandler.allocateTableWriteIds(awiRqst);
+ return awiResp.getTxnToWriteIds().get(0).getWriteId();
+ }
+
protected void addDeltaFile(Table t, Partition p, long minTxn, long maxTxn, int numRecords)
throws Exception {
addFile(t, p, minTxn, maxTxn, numRecords, FileType.DELTA, 2, true);
@@ -220,15 +245,19 @@ public abstract class CompactorTest {
return paths;
}
- protected void burnThroughTransactions(int num)
+ protected void burnThroughTransactions(String dbName, String tblName, int num)
throws MetaException, NoSuchTxnException, TxnAbortedException {
- burnThroughTransactions(num, null, null);
+ burnThroughTransactions(dbName, tblName, num, null, null);
}
- protected void burnThroughTransactions(int num, Set<Long> open, Set<Long> aborted)
+ protected void burnThroughTransactions(String dbName, String tblName, int num, Set<Long> open, Set<Long> aborted)
throws MetaException, NoSuchTxnException, TxnAbortedException {
OpenTxnsResponse rsp = txnHandler.openTxns(new OpenTxnRequest(num, "me", "localhost"));
+ AllocateTableWriteIdsRequest awiRqst = new AllocateTableWriteIdsRequest(rsp.getTxn_ids(), dbName, tblName);
+ AllocateTableWriteIdsResponse awiResp = txnHandler.allocateTableWriteIds(awiRqst);
+ int i = 0;
for (long tid : rsp.getTxn_ids()) {
+ assert(awiResp.getTxnToWriteIds().get(i++).getTxnId() == tid);
if (aborted != null && aborted.contains(tid)) {
txnHandler.abortTxn(new AbortTxnRequest(tid));
} else if (open == null || (open != null && !open.contains(tid))) {
@@ -350,7 +379,7 @@ public abstract class CompactorTest {
@Override
public RawReader<Text> getRawReader(Configuration conf, boolean collapseEvents, int bucket,
- ValidTxnList validTxnList,
+ ValidWriteIdList validWriteIdList,
Path baseDirectory, Path... deltaDirectory) throws IOException {
List<Path> filesToRead = new ArrayList<Path>();
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
index db8e46c..3ca073c 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
@@ -76,7 +76,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addBaseFile(t, null, 25L, 25);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "camtc", 25);
CompactionRequest rqst = new CompactionRequest("default", "camtc", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -107,7 +107,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addBaseFile(t, p, 25L, 25);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "campc", 25);
CompactionRequest rqst = new CompactionRequest("default", "campc", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
@@ -138,7 +138,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "camitc", 25);
CompactionRequest rqst = new CompactionRequest("default", "camitc", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -176,7 +176,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "camipc", 25);
CompactionRequest rqst = new CompactionRequest("default", "camipc", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
@@ -214,7 +214,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "bblt", 25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -251,7 +251,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "bblp", 25);
CompactionRequest rqst = new CompactionRequest("default", "bblp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
@@ -296,7 +296,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "bblt", 25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -368,7 +368,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "bblt", 25);
CompactionRequest rqst = new CompactionRequest("default", "bblt", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
@@ -439,7 +439,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addBaseFile(t, p, 25L, 25);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "campcnb", 25);
CompactionRequest rqst = new CompactionRequest("default", "campcnb", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
@@ -469,7 +469,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addBaseFile(t, null, 25L, 25);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "dt", 25);
CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -477,14 +477,14 @@ public class TestCleaner extends CompactorTest {
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+ // Drop table will clean the table entry from the compaction queue and hence cleaner have no effect
ms.dropTable("default", "dt");
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
- Assert.assertEquals(1, rsp.getCompactsSize());
- Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
+ Assert.assertEquals(0, rsp.getCompactsSize());
}
@Test
@@ -496,7 +496,7 @@ public class TestCleaner extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addBaseFile(t, p, 25L, 25);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "dp", 25);
CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
@@ -505,14 +505,14 @@ public class TestCleaner extends CompactorTest {
txnHandler.markCompacted(ci);
txnHandler.setRunAs(ci.id, System.getProperty("user.name"));
+ // Drop partition will clean the partition entry from the compaction queue and hence cleaner have no effect
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
startCleaner();
// Check there are no compactions requests left.
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
- Assert.assertEquals(1, rsp.getCompactsSize());
- Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
+ Assert.assertEquals(0, rsp.getCompactsSize());
}
@Override
boolean useHive130DeltaDirName() {
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
index f35826e..d2818db 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
@@ -339,7 +339,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "cthdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -350,6 +350,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "cthdp", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -371,7 +373,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "cphdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -383,6 +385,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "cphdp", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -404,7 +408,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(53);
+ burnThroughTransactions("default", "nctdpnhe", 53);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -415,6 +419,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "nctdpnhe", txnid);
+ Assert.assertEquals(54, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -440,7 +446,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 210L, 210L, 1);
addDeltaFile(t, null, 211L, 211L, 1);
- burnThroughTransactions(210);
+ burnThroughTransactions("default", "cttmd", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -451,6 +457,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "cttmd", txnid);
+ Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -481,7 +489,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 210L, 210L, 1);
addDeltaFile(t, p, 211L, 211L, 1);
- burnThroughTransactions(210);
+ burnThroughTransactions("default", "cptmd", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -493,6 +501,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "cptmd", txnid);
+ Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -514,7 +524,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 201L, 205L, 5);
addDeltaFile(t, null, 206L, 211L, 6);
- burnThroughTransactions(210);
+ burnThroughTransactions("default", "nctned", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -525,6 +535,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "nctned", txnid);
+ Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -550,7 +562,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 300L, 310L, 11);
addDeltaFile(t, null, 311L, 321L, 11);
- burnThroughTransactions(320);
+ burnThroughTransactions("default", "cmomwbv", 320);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -561,6 +573,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "cmomwbv", txnid);
+ Assert.assertEquals(321, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -590,7 +604,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 210L, 210L, 1);
addDeltaFile(t, p, 211L, 211L, 1);
- burnThroughTransactions(210);
+ burnThroughTransactions("default", "ednb", 210);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -602,6 +616,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "ednb", txnid);
+ Assert.assertEquals(211, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -624,7 +640,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "ttospgocr", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -636,6 +652,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "ttospgocr", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
txnid = openTxn();
@@ -648,6 +666,8 @@ public class TestInitiator extends CompactorTest {
req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
res = txnHandler.lock(req);
+ writeid = allocateWriteId("default", "ttospgocr", txnid);
+ Assert.assertEquals(25, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -670,7 +690,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "nctdp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "default");
@@ -681,6 +701,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "nctdp", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
startInitiator();
@@ -698,7 +720,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "dt", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -709,6 +731,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "dt", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
ms.dropTable("default", "dt");
@@ -729,7 +753,7 @@ public class TestInitiator extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(23);
+ burnThroughTransactions("default", "dp", 23);
long txnid = openTxn();
LockComponent comp = new LockComponent(LockType.SHARED_WRITE, LockLevel.PARTITION, "default");
@@ -741,6 +765,8 @@ public class TestInitiator extends CompactorTest {
LockRequest req = new LockRequest(components, "me", "localhost");
req.setTxnid(txnid);
LockResponse res = txnHandler.lock(req);
+ long writeid = allocateWriteId("default", "dp", txnid);
+ Assert.assertEquals(24, writeid);
txnHandler.commitTxn(new CommitTxnRequest(txnid));
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
index 0638126..0353ebf 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.metastore.txn.TxnStore;
import org.apache.hadoop.hive.ql.io.AcidUtils;
import org.junit.After;
import org.junit.Assert;
@@ -236,7 +235,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 23L, 24L, 2);
addDeltaFile(t, null, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "st", 25);
CompactionRequest rqst = new CompactionRequest("default", "st", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -262,7 +261,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, p, 23L, 24L, 2);
addDeltaFile(t, p, 21L, 24L, 4);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "sp", 25);
CompactionRequest rqst = new CompactionRequest("default", "sp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
@@ -285,7 +284,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "mtwb", 25);
CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -344,7 +343,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 23L, 25L, 3);
addLengthFile(t, null, 23L, 25L, 3);
addDeltaFile(t, null, 26L, 27L, 2);
- burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
+ burnThroughTransactions("default", "mtwb", 27, new HashSet<Long>(Arrays.asList(23L)), null);
CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -380,7 +379,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 23L, 25L, 3);
addLengthFile(t, null, 23L, 25L, 3);
addDeltaFile(t, null, 26L, 27L, 2);
- burnThroughTransactions(27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
+ burnThroughTransactions("default", "mtwb", 27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -416,7 +415,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "mpwb", 25);
CompactionRequest rqst = new CompactionRequest("default", "mpwb", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
@@ -469,7 +468,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 1L, 2L, 2);
addDeltaFile(t, null, 3L, 4L, 2);
- burnThroughTransactions(5);
+ burnThroughTransactions("default", "mtnb", 5);
CompactionRequest rqst = new CompactionRequest("default", "mtnb", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -522,7 +521,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "matwb", 25);
CompactionRequest rqst = new CompactionRequest("default", "matwb", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -598,7 +597,7 @@ public class TestWorker extends CompactorTest {
* and then the 'requested'
* minor compaction to combine delta_21_23, delta_25_33 and delta_35_35 to make delta_21_35
* or major compaction to create base_35*/
- burnThroughTransactions(35);
+ burnThroughTransactions("default", "mapwb", 35);
CompactionRequest rqst = new CompactionRequest("default", "mapwb", type);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
@@ -690,7 +689,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "mapwb", 25);
CompactionRequest rqst = new CompactionRequest("default", "mapwb", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
@@ -734,7 +733,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 1L, 2L, 2);
addDeltaFile(t, null, 3L, 4L, 2);
- burnThroughTransactions(4);
+ burnThroughTransactions("default", "matnb", 4);
CompactionRequest rqst = new CompactionRequest("default", "matnb", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -778,7 +777,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "matl", 25);
CompactionRequest rqst = new CompactionRequest("default", "matl", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -822,7 +821,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 21L, 22L, 2);
addDeltaFile(t, null, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "mtl", 25);
CompactionRequest rqst = new CompactionRequest("default", "mtl", CompactionType.MINOR);
txnHandler.compact(rqst);
@@ -865,7 +864,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2, 2, false);
addDeltaFile(t, p, 23L, 26L, 4);
- burnThroughTransactions(27);
+ burnThroughTransactions("default", "mapwbmb", 27);
CompactionRequest rqst = new CompactionRequest("default", "mapwbmb", CompactionType.MAJOR);
rqst.setPartitionname("ds=today");
@@ -919,7 +918,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 23L, 25L, 3);
addLengthFile(t, null, 23L, 25L, 3);
addDeltaFile(t, null, 26L, 27L, 2);
- burnThroughTransactions(27, new HashSet<Long>(Arrays.asList(23L)), null);
+ burnThroughTransactions("default", "mtwb", 27, new HashSet<Long>(Arrays.asList(23L)), null);
CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -955,7 +954,7 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 23L, 25L, 3);
addLengthFile(t, null, 23L, 25L, 3);
addDeltaFile(t, null, 26L, 27L, 2);
- burnThroughTransactions(27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
+ burnThroughTransactions("default", "mtwb", 27, null, new HashSet<Long>(Arrays.asList(24L, 25L)));
CompactionRequest rqst = new CompactionRequest("default", "mtwb", CompactionType.MAJOR);
txnHandler.compact(rqst);
@@ -991,19 +990,19 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, null, 1L, 2L, 2);
addDeltaFile(t, null, 3L, 4L, 2);
- burnThroughTransactions(4);
+ burnThroughTransactions("default", "dt", 4);
CompactionRequest rqst = new CompactionRequest("default", "dt", CompactionType.MAJOR);
txnHandler.compact(rqst);
+ // Drop table will clean the table entry from the compaction queue and hence worker have no effect
ms.dropTable("default", "dt");
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
- Assert.assertEquals(1, compacts.size());
- Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(compacts.get(0).getState()));
+ Assert.assertEquals(0, compacts.size());
}
@Test
@@ -1015,20 +1014,20 @@ public class TestWorker extends CompactorTest {
addDeltaFile(t, p, 21L, 22L, 2);
addDeltaFile(t, p, 23L, 24L, 2);
- burnThroughTransactions(25);
+ burnThroughTransactions("default", "dp", 25);
CompactionRequest rqst = new CompactionRequest("default", "dp", CompactionType.MINOR);
rqst.setPartitionname("ds=today");
txnHandler.compact(rqst);
+ // Drop partition will clean the partition entry from the compaction queue and hence worker have no effect
ms.dropPartition("default", "dp", Collections.singletonList("today"), true);
startWorker();
ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
List<ShowCompactResponseElement> compacts = rsp.getCompacts();
- Assert.assertEquals(1, compacts.size());
- Assert.assertTrue(TxnStore.SUCCEEDED_RESPONSE.equals(rsp.getCompacts().get(0).getState()));
+ Assert.assertEquals(0, compacts.size());
}
@After
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/results/clientpositive/acid_nullscan.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_nullscan.q.out b/ql/src/test/results/clientpositive/acid_nullscan.q.out
index 7fcc831..76df2b6 100644
--- a/ql/src/test/results/clientpositive/acid_nullscan.q.out
+++ b/ql/src/test/results/clientpositive/acid_nullscan.q.out
@@ -42,12 +42,12 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid_vectorized
- Statistics: Num rows: 1 Data size: 24510 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 24540 Basic stats: COMPLETE Column stats: NONE
GatherStats: false
Filter Operator
isSamplingPred: false
predicate: false (type: boolean)
- Statistics: Num rows: 1 Data size: 24510 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 1 Data size: 24540 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: sum(a)
mode: hash
@@ -83,7 +83,7 @@ STAGE PLANS:
serialization.ddl struct acid_vectorized { i32 a, string b}
serialization.format 1
serialization.lib org.apache.hadoop.hive.serde2.NullStructSerDe
- totalSize 2451
+ totalSize 2454
transactional true
transactional_properties default
#### A masked pattern was here ####
@@ -106,7 +106,7 @@ STAGE PLANS:
serialization.ddl struct acid_vectorized { i32 a, string b}
serialization.format 1
serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
- totalSize 2451
+ totalSize 2454
transactional true
transactional_properties default
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/results/clientpositive/acid_table_stats.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/acid_table_stats.q.out b/ql/src/test/results/clientpositive/acid_table_stats.q.out
index 74d4c44..fa6c666 100644
--- a/ql/src/test/results/clientpositive/acid_table_stats.q.out
+++ b/ql/src/test/results/clientpositive/acid_table_stats.q.out
@@ -95,7 +95,7 @@ Partition Parameters:
numFiles 2
numRows 0
rawDataSize 0
- totalSize 3950
+ totalSize 3949
#### A masked pattern was here ####
# Storage Information
@@ -133,9 +133,9 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: acid
- Statistics: Num rows: 81 Data size: 39500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 81 Data size: 39490 Basic stats: COMPLETE Column stats: NONE
Select Operator
- Statistics: Num rows: 81 Data size: 39500 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 81 Data size: 39490 Basic stats: COMPLETE Column stats: NONE
Group By Operator
aggregations: count()
mode: hash
@@ -210,7 +210,7 @@ Partition Parameters:
numFiles 2
numRows 1000
rawDataSize 208000
- totalSize 3950
+ totalSize 3949
#### A masked pattern was here ####
# Storage Information
@@ -261,7 +261,7 @@ Partition Parameters:
numFiles 2
numRows 1000
rawDataSize 208000
- totalSize 3950
+ totalSize 3949
#### A masked pattern was here ####
# Storage Information
@@ -386,7 +386,7 @@ Partition Parameters:
numFiles 4
numRows 1000
rawDataSize 208000
- totalSize 7904
+ totalSize 7890
#### A masked pattern was here ####
# Storage Information
@@ -433,7 +433,7 @@ Partition Parameters:
numFiles 4
numRows 2000
rawDataSize 416000
- totalSize 7904
+ totalSize 7890
#### A masked pattern was here ####
# Storage Information
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
index b3df04f..9c0e020 100644
--- a/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
+++ b/ql/src/test/results/clientpositive/autoColumnStats_4.q.out
@@ -241,7 +241,7 @@ Table Parameters:
numFiles 4
numRows 0
rawDataSize 0
- totalSize 2909
+ totalSize 2884
transactional true
transactional_properties default
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
index 1195f20..fba0158 100644
--- a/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
+++ b/ql/src/test/results/clientpositive/llap/acid_bucket_pruning.q.out
@@ -103,7 +103,7 @@ STAGE PLANS:
serialization.ddl struct acidtbldefault { i32 a}
serialization.format 1
serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
- totalSize 32572
+ totalSize 32616
transactional true
transactional_properties default
#### A masked pattern was here ####
@@ -127,7 +127,7 @@ STAGE PLANS:
serialization.ddl struct acidtbldefault { i32 a}
serialization.format 1
serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
- totalSize 32572
+ totalSize 32616
transactional true
transactional_properties default
#### A masked pattern was here ####
http://git-wip-us.apache.org/repos/asf/hive/blob/cbb9233a/ql/src/test/results/clientpositive/row__id.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/row__id.q.out b/ql/src/test/results/clientpositive/row__id.q.out
index 87fb310..6659327 100644
--- a/ql/src/test/results/clientpositive/row__id.q.out
+++ b/ql/src/test/results/clientpositive/row__id.q.out
@@ -62,23 +62,23 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: hello_acid
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID.transactionid (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
Reduce Output Operator
key expressions: _col0 (type: bigint)
sort order: +
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
Reduce Operator Tree:
Select Operator
expressions: KEY.reducesinkkey0 (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
@@ -104,9 +104,9 @@ POSTHOOK: Input: default@hello_acid@load_date=2016-03-01
POSTHOOK: Input: default@hello_acid@load_date=2016-03-02
POSTHOOK: Input: default@hello_acid@load_date=2016-03-03
#### A masked pattern was here ####
+1
+2
3
-4
-5
PREHOOK: query: explain
select tid from (select row__id.transactionid as tid from hello_acid) sub where tid = 3
PREHOOK: type: QUERY
@@ -123,17 +123,17 @@ STAGE PLANS:
Map Operator Tree:
TableScan
alias: hello_acid
- Statistics: Num rows: 74 Data size: 18600 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 72 Data size: 18410 Basic stats: COMPLETE Column stats: NONE
Filter Operator
predicate: (ROW__ID.transactionid = 3) (type: boolean)
- Statistics: Num rows: 37 Data size: 9300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
Select Operator
expressions: ROW__ID.transactionid (type: bigint)
outputColumnNames: _col0
- Statistics: Num rows: 37 Data size: 9300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
File Output Operator
compressed: false
- Statistics: Num rows: 37 Data size: 9300 Basic stats: COMPLETE Column stats: NONE
+ Statistics: Num rows: 36 Data size: 9205 Basic stats: COMPLETE Column stats: NONE
table:
input format: org.apache.hadoop.mapred.SequenceFileInputFormat
output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat