You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by ab...@apache.org on 2022/12/06 12:59:35 UTC

[hive] branch master updated: HIVE-26801: Query based compaction fails on tables having columns with keywords(i.e. row in this case) (#3828) (Gopinath Gangadharan reviewed by Denys Kuzmenko)

This is an automated email from the ASF dual-hosted git repository.

abstractdog pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new b4c18bfaa49 HIVE-26801: Query based compaction fails on tables having columns with keywords(i.e. row in this case) (#3828) (Gopinath Gangadharan reviewed by Denys Kuzmenko)
b4c18bfaa49 is described below

commit b4c18bfaa49d6b6a68d3f9935565fab369d73f03
Author: gopigowtham <gg...@cloudera.com>
AuthorDate: Tue Dec 6 18:29:23 2022 +0530

    HIVE-26801: Query based compaction fails on tables having columns with keywords(i.e. row in this case) (#3828) (Gopinath Gangadharan reviewed by Denys Kuzmenko)
---
 .../ql/txn/compactor/TestCrudCompactorOnTez.java   | 65 ++++++++++++++++++++++
 .../ql/txn/compactor/CompactionQueryBuilder.java   |  4 +-
 2 files changed, 67 insertions(+), 2 deletions(-)

diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
index 6f4ddbff5d0..f63a6a8f922 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
@@ -2544,4 +2544,69 @@ public class TestCrudCompactorOnTez extends CompactorOnTezTest {
       }
     }
   }
+
+  @Test
+  public void testCompactionShouldNotFailOnKeywordField() throws Exception {
+    conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true);
+    String dbName = "default";
+    String tblName = "compact_hive_aggregated_data";
+
+    TxnStore txnHandler = TxnUtils.getTxnStore(conf);
+    TestDataProvider testDP = new TestDataProvider();
+
+    // Create test table
+    executeStatementOnDriver("drop table if exists " + tblName, driver);
+    executeStatementOnDriver("CREATE TABLE " + tblName + "(`sessionid` string,`row` int,`timeofoccurrence` bigint)" +
+            "STORED AS ORC TBLPROPERTIES('transactional'='true')", driver);
+
+    // Insert test data into test table
+    executeStatementOnDriver("INSERT INTO TABLE " + tblName +
+            " values (\"abcd\",300,21111111111)",driver);
+    executeStatementOnDriver("INSERT INTO TABLE " + tblName +
+            " values (\"abcd\",300,21111111111)",driver);
+
+    // Find the location of the table
+    IMetaStoreClient msClient = new HiveMetaStoreClient(conf);
+    Table table = msClient.getTable(dbName, tblName);
+    FileSystem fs = FileSystem.get(conf);
+    // Verify deltas (delta_0000001_0000001_0000, delta_0000002_0000002_0000) are present
+    Assert.assertEquals("Delta directories does not match before compaction",
+            Arrays.asList("delta_0000001_0000001_0000", "delta_0000002_0000002_0000"),
+            CompactorTestUtil.getBaseOrDeltaNames(fs, AcidUtils.deltaFileFilter, table, null));
+
+    // Get all data before compaction is run
+    List<String> expectedData = testDP.getAllData(tblName);
+
+    //Do a compaction directly and wait for it to finish
+    CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR);
+    CompactionResponse resp = txnHandler.compact(rqst);
+    runWorker(conf);
+
+    CompactorTestUtil.runCleaner(conf);
+
+    //Check if the compaction succeed
+    ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
+    List<ShowCompactResponseElement> compacts = rsp.getCompacts();
+    Assert.assertEquals("Expecting 1 rows and found " + compacts.size(), 1, compacts.size());
+    Assert.assertEquals("Expecting compaction state 'succeeded' and found:" + compacts.get(0).getState(),
+            "succeeded", compacts.get(0).getState());
+    // Should contain only one base directory now
+    FileStatus[] status = fs.listStatus(new Path(table.getSd().getLocation()));
+    int inputFileCount = 0;
+    for(FileStatus file: status) {
+      inputFileCount++;
+    }
+    Assert.assertEquals("Expecting 1 file and found "+ inputFileCount, 1, inputFileCount);
+
+    // Check bucket file name
+    List<String> baseDir = CompactorTestUtil.getBaseOrDeltaNames(fs, AcidUtils.baseFileFilter, table, null);
+    List<String> expectedBucketFiles = Arrays.asList("bucket_00000");
+    Assert.assertEquals("Bucket names are not matching after compaction", expectedBucketFiles,
+            CompactorTestUtil
+                    .getBucketFileNames(fs, table, null, baseDir.get(0)));
+
+    // Verify all contents
+    List<String> actualData = testDP.getAllData(tblName);
+    Assert.assertEquals(expectedData, actualData);
+  }
 }
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionQueryBuilder.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionQueryBuilder.java
index 8a297533888..52e357d259f 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionQueryBuilder.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionQueryBuilder.java
@@ -303,8 +303,8 @@ class CompactionQueryBuilder {
                 + "ROW__ID.writeId, ROW__ID.bucketId, ROW__ID.rowId, ROW__ID.writeId, "
                 + "NAMED_STRUCT(");
         for (int i = 0; i < cols.size(); ++i) {
-          query.append(i == 0 ? "'" : ", '").append(cols.get(i).getName()).append("', ")
-              .append(cols.get(i).getName());
+          query.append(i == 0 ? "'" : ", '").append(cols.get(i).getName()).append("', `")
+              .append(cols.get(i).getName()).append("`");
         }
         query.append(") ");
       } else { //minor