You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by pv...@apache.org on 2020/05/27 18:43:33 UTC

[hive] branch master updated: HIVE-23531: Major CRUD QB compaction failing with ClassCastException when vectorization off (Karen Coppage reviewed by Laszlo Pinter and Peter Vary)

This is an automated email from the ASF dual-hosted git repository.

pvary pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new 4d16b4d  HIVE-23531: Major CRUD QB compaction failing with ClassCastException when vectorization off (Karen Coppage reviewed by Laszlo Pinter and Peter Vary)
4d16b4d is described below

commit 4d16b4dd8f915ac49b9a752e41d3a80d9b8a02a3
Author: Karen Coppage <ka...@cloudera.com>
AuthorDate: Wed May 27 20:42:34 2020 +0200

    HIVE-23531: Major CRUD QB compaction failing with ClassCastException when vectorization off (Karen Coppage reviewed by Laszlo Pinter and Peter Vary)
---
 .../hive/ql/txn/compactor/TestCrudCompactorOnTez.java   |  5 +++++
 .../hive/ql/txn/compactor/TestMmCompactorOnTez.java     |  5 +++++
 .../apache/hadoop/hive/ql/exec/FileSinkOperator.java    | 17 ++++++++++++++++-
 3 files changed, 26 insertions(+), 1 deletion(-)

diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
index 7bbc4bc..4fb7860 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java
@@ -965,6 +965,11 @@ public class TestCrudCompactorOnTez extends CompactorOnTezTest {
     Assert.assertEquals(expectedData, actualData);
   }
 
+  @Test public void testVectorizationOff() throws Exception {
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
+    testMinorCompactionAfterMajor();
+  }
+
   /**
    * Verify that the expected number of transactions have run, and their state is "succeeded".
    *
diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java
index 2c717b7..451390a 100644
--- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java
+++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java
@@ -570,6 +570,11 @@ public class TestMmCompactorOnTez extends CompactorOnTezTest {
     Assert.assertEquals(expectedData, actualData);
   }
 
+  @Test public void testVectorizationOff() throws Exception {
+    conf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false);
+    testMmMinorCompactionAfterMajor();
+  }
+
   /**
    * Verify that the expected number of transactions have run, and their state is "succeeded".
    *
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
index 1b84ba2..22a24f8 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java
@@ -962,7 +962,7 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
         }
       } else {
         if (conf.isCompactionTable()) {
-          int bucketProperty = ((IntWritable)((Object[])row)[2]).get();
+          int bucketProperty = getBucketProperty(row);
           bucketId = BucketCodec.determineVersion(bucketProperty).decodeWriterId(bucketProperty);
         }
         createBucketFiles(fsp);
@@ -1684,4 +1684,19 @@ public class FileSinkOperator extends TerminalOperator<FileSinkDesc> implements
       job.setBoolean(Utilities.ENSURE_OPERATORS_EXECUTED, true);
     }
   }
+
+  /**
+   * Get the bucket property as an int from the row. This is necessary because
+   * VectorFileSinkOperator wraps row values in Writable objects.
+   * @param row as Object
+   * @return bucket property as int
+   */
+  private int getBucketProperty(Object row) {
+    Object bucketProperty = ((Object[]) row)[2];
+    if (bucketProperty instanceof Writable) {
+      return ((IntWritable) bucketProperty).get();
+    } else {
+      return (int) bucketProperty;
+    }
+  }
 }