You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by lp...@apache.org on 2022/09/13 07:33:10 UTC
[hive] branch master updated: HIVE-26482: Add test to check names after compaction on partition (#3532) (Zsolt Miskolczi, reviewed by Karen Coppage, Laszlo Pinter, Laszlo Vegh)
This is an automated email from the ASF dual-hosted git repository.
lpinter pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git
The following commit(s) were added to refs/heads/master by this push:
new 35f6a75c86e HIVE-26482: Add test to check names after compaction on partition (#3532) (Zsolt Miskolczi, reviewed by Karen Coppage, Laszlo Pinter, Laszlo Vegh)
35f6a75c86e is described below
commit 35f6a75c86e7b6df7460a046ce53e87a50d64860
Author: InvisibleProgrammer <zs...@gmail.com>
AuthorDate: Tue Sep 13 09:32:59 2022 +0200
HIVE-26482: Add test to check names after compaction on partition (#3532) (Zsolt Miskolczi, reviewed by Karen Coppage, Laszlo Pinter, Laszlo Vegh)
---
.../apache/hadoop/hive/ql/TestTxnCommands2.java | 42 +++++++++++++++++++++-
1 file changed, 41 insertions(+), 1 deletion(-)
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
index 0e1edcd9d86..d9f197b0919 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java
@@ -73,7 +73,6 @@ import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService;
import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionContext;
import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService;
import org.apache.hadoop.hive.ql.schq.MockScheduledQueryService;
-import org.apache.hadoop.hive.ql.schq.TestScheduledQueryService;
import org.apache.hadoop.hive.ql.session.SessionState;
import org.apache.hadoop.hive.ql.txn.compactor.CompactorMR;
import org.apache.hadoop.hive.ql.txn.compactor.Worker;
@@ -3291,6 +3290,47 @@ public class TestTxnCommands2 extends TxnCommandsBaseForTests {
Assert.assertEquals(resData, stringifyValues(actualData));
}
+ @Test
+ public void testCompactionOutputDirectoryNamesOnPartitionsAndOldDeltasDeleted() throws Exception {
+ String p1 = "p=p1";
+ String p2 = "p=p2";
+ String oldDelta1 = "delta_0000001_0000001_0000";
+ String oldDelta2 = "delta_0000002_0000002_0000";
+ String oldDelta3 = "delta_0000003_0000003_0000";
+ String oldDelta4 = "delta_0000004_0000004_0000";
+
+ String expectedDelta1 = p1 + "/delta_0000001_0000002_v0000021";
+ String expectedDelta2 = p2 + "/delta_0000003_0000004_v0000022";
+
+ runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(1,2)");
+ runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(3,4)");
+ runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(1,2)");
+ runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(3,4)");
+
+ compactPartition(Table.ACIDTBLPART.name().toLowerCase(), CompactionType.MINOR, p1);
+ compactPartition(Table.ACIDTBLPART.name().toLowerCase(), CompactionType.MINOR, p2);
+
+ FileSystem fs = FileSystem.get(hiveConf);
+ String tablePath = getWarehouseDir() + "/" + Table.ACIDTBLPART.name().toLowerCase() + "/";
+
+ Assert.assertTrue(fs.exists(new Path(tablePath + expectedDelta1)));
+ Assert.assertTrue(fs.exists(new Path(tablePath + expectedDelta2)));
+
+ Assert.assertFalse(fs.exists(new Path(tablePath + oldDelta1)));
+ Assert.assertFalse(fs.exists(new Path(tablePath + oldDelta2)));
+ Assert.assertFalse(fs.exists(new Path(tablePath + oldDelta3)));
+ Assert.assertFalse(fs.exists(new Path(tablePath + oldDelta4)));
+ }
+
+ private void compactPartition(String table, CompactionType type, String partition)
+ throws Exception {
+ CompactionRequest compactionRequest = new CompactionRequest("default", table, type);
+ compactionRequest.setPartitionname(partition);
+ txnHandler.compact(compactionRequest);
+ runWorker(hiveConf);
+ runCleaner(hiveConf);
+ }
+
/**
* takes raw data and turns it into a string as if from Driver.getResults()
* sorts rows in dictionary order