You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2020/11/02 16:39:31 UTC
[spark] branch master updated: [SPARK-33319][SQL][TEST] Add all
built-in SerDes to HiveSerDeReadWriteSuite
This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 789d19c [SPARK-33319][SQL][TEST] Add all built-in SerDes to HiveSerDeReadWriteSuite
789d19c is described below
commit 789d19cab5caa20d35dcdd700ed7fe53ca1893fe
Author: Yuming Wang <yu...@ebay.com>
AuthorDate: Mon Nov 2 08:34:50 2020 -0800
[SPARK-33319][SQL][TEST] Add all built-in SerDes to HiveSerDeReadWriteSuite
### What changes were proposed in this pull request?
This pr add all built-in SerDes to `HiveSerDeReadWriteSuite`.
https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL#LanguageManualDDL-RowFormats&SerDe
### Why are the changes needed?
We will upgrade Parquet, ORC and Avro, need to ensure compatibility.
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
N/A
Closes #30228 from wangyum/SPARK-33319.
Authored-by: Yuming Wang <yu...@ebay.com>
Signed-off-by: Dongjoon Hyun <dh...@apple.com>
---
.../spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
index ac9ae8c..aae49f7 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveSerDeReadWriteSuite.scala
@@ -135,11 +135,12 @@ class HiveSerDeReadWriteSuite extends QueryTest with SQLTestUtils with TestHiveS
}
// MAP<primitive_type, data_type>
withTable("hive_serde") {
- hiveClient.runSqlHive(s"CREATE TABLE hive_serde (c1 MAP <INT, STRING>) STORED AS $fileFormat")
- hiveClient.runSqlHive("INSERT INTO TABLE hive_serde SELECT MAP(1, 'a') FROM (SELECT 1) t")
- checkAnswer(spark.table("hive_serde"), Row(Map(1 -> "a")))
- spark.sql("INSERT INTO TABLE hive_serde SELECT MAP(2, 'b')")
- checkAnswer(spark.table("hive_serde"), Seq(Row(Map(1 -> "a")), Row(Map(2 -> "b"))))
+ hiveClient.runSqlHive(
+ s"CREATE TABLE hive_serde (c1 MAP <STRING, STRING>) STORED AS $fileFormat")
+ hiveClient.runSqlHive("INSERT INTO TABLE hive_serde SELECT MAP('1', 'a') FROM (SELECT 1) t")
+ checkAnswer(spark.table("hive_serde"), Row(Map("1" -> "a")))
+ spark.sql("INSERT INTO TABLE hive_serde SELECT MAP('2', 'b')")
+ checkAnswer(spark.table("hive_serde"), Seq(Row(Map("1" -> "a")), Row(Map("2" -> "b"))))
}
// STRUCT<col_name : data_type [COMMENT col_comment], ...>
@@ -154,7 +155,7 @@ class HiveSerDeReadWriteSuite extends QueryTest with SQLTestUtils with TestHiveS
}
}
- Seq("PARQUET", "ORC", "TEXTFILE").foreach { fileFormat =>
+ Seq("SEQUENCEFILE", "TEXTFILE", "RCFILE", "ORC", "PARQUET", "AVRO").foreach { fileFormat =>
test(s"Read/Write Hive $fileFormat serde table") {
// Numeric Types
checkNumericTypes(fileFormat, "TINYINT", 2)
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org