You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2022/06/09 02:59:44 UTC
[spark] branch master updated: [SPARK-39387][FOLLOWUP][TESTS] Add a test case for HIVE-25190
This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 6f9997ca9f3 [SPARK-39387][FOLLOWUP][TESTS] Add a test case for HIVE-25190
6f9997ca9f3 is described below
commit 6f9997ca9f3639f01b25a9cff4985a5b3b224578
Author: sychen <sy...@ctrip.com>
AuthorDate: Wed Jun 8 19:59:26 2022 -0700
[SPARK-39387][FOLLOWUP][TESTS] Add a test case for HIVE-25190
### What changes were proposed in this pull request?
Add UT, test whether the Overflow of newLength problem is fixed.
### Why are the changes needed?
https://github.com/apache/spark/pull/36772#pullrequestreview-996975725
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
add UT
Closes #36787 from cxzl25/SPARK-39387-FOLLOWUP.
Authored-by: sychen <sy...@ctrip.com>
Signed-off-by: Dongjoon Hyun <do...@apache.org>
---
.../spark/sql/execution/datasources/orc/OrcQuerySuite.scala | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
index a289a94fdce..2c1120baa7c 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
@@ -832,6 +832,18 @@ abstract class OrcQuerySuite extends OrcQueryTest with SharedSparkSession {
}
}
}
+
+ test("SPARK-39387: BytesColumnVector should not throw RuntimeException due to overflow") {
+ withTempPath { dir =>
+ val path = dir.getCanonicalPath
+ val df = spark.range(1, 22, 1, 1).map { _ =>
+ val byteData = Array.fill[Byte](1024 * 1024)('X')
+ val mapData = (1 to 100).map(i => (i, byteData))
+ mapData
+ }.toDF()
+ df.write.format("orc").save(path)
+ }
+ }
}
class OrcV1QuerySuite extends OrcQuerySuite {
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org