You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by gu...@apache.org on 2019/07/02 05:57:45 UTC
[spark] branch master updated: [SPARK-28054][SQL][FOLLOW-UP] Fix
error when insert Hive partitioned table dynamically where partition name
is upper case
This is an automated email from the ASF dual-hosted git repository.
gurwls223 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new f148674 [SPARK-28054][SQL][FOLLOW-UP] Fix error when insert Hive partitioned table dynamically where partition name is upper case
f148674 is described below
commit f1486742fa032d838c0730fcb968e42ac145acc8
Author: Liang-Chi Hsieh <vi...@gmail.com>
AuthorDate: Tue Jul 2 14:57:24 2019 +0900
[SPARK-28054][SQL][FOLLOW-UP] Fix error when insert Hive partitioned table dynamically where partition name is upper case
## What changes were proposed in this pull request?
This is a small follow-up for SPARK-28054 to fix wrong indent and use `withSQLConf` as suggested by gatorsmile.
## How was this patch tested?
Existing tests.
Closes #24971 from viirya/SPARK-28054-followup.
Authored-by: Liang-Chi Hsieh <vi...@gmail.com>
Signed-off-by: HyukjinKwon <gu...@apache.org>
---
.../spark/sql/hive/execution/SaveAsHiveFile.scala | 2 +-
.../spark/sql/hive/execution/HiveQuerySuite.scala | 21 +++++++++++----------
2 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala
index 234acb7..62d3bad 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/SaveAsHiveFile.scala
@@ -89,7 +89,7 @@ private[hive] trait SaveAsHiveFile extends DataWritingCommand {
// we also need to lowercase the column names in written partition paths.
// scalastyle:off caselocale
val hiveCompatiblePartitionColumns = partitionAttributes.map { attr =>
- attr.withName(attr.name.toLowerCase)
+ attr.withName(attr.name.toLowerCase)
}
// scalastyle:on caselocale
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
index 13a533c..6986963 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveQuerySuite.scala
@@ -1190,19 +1190,20 @@ class HiveQuerySuite extends HiveComparisonTest with SQLTestUtils with BeforeAnd
}
test("SPARK-28054: Unable to insert partitioned table when partition name is upper case") {
- withTable("spark_28054_test") {
- sql("set hive.exec.dynamic.partition.mode=nonstrict")
- sql("CREATE TABLE spark_28054_test (KEY STRING, VALUE STRING) PARTITIONED BY (DS STRING)")
+ withSQLConf("hive.exec.dynamic.partition.mode" -> "nonstrict") {
+ withTable("spark_28054_test") {
+ sql("CREATE TABLE spark_28054_test (KEY STRING, VALUE STRING) PARTITIONED BY (DS STRING)")
- sql("INSERT INTO TABLE spark_28054_test PARTITION(DS) SELECT 'k' KEY, 'v' VALUE, '1' DS")
+ sql("INSERT INTO TABLE spark_28054_test PARTITION(DS) SELECT 'k' KEY, 'v' VALUE, '1' DS")
- assertResult(Array(Row("k", "v", "1"))) {
- sql("SELECT * from spark_28054_test").collect()
- }
+ assertResult(Array(Row("k", "v", "1"))) {
+ sql("SELECT * from spark_28054_test").collect()
+ }
- sql("INSERT INTO TABLE spark_28054_test PARTITION(ds) SELECT 'k' key, 'v' value, '2' ds")
- assertResult(Array(Row("k", "v", "1"), Row("k", "v", "2"))) {
- sql("SELECT * from spark_28054_test").collect()
+ sql("INSERT INTO TABLE spark_28054_test PARTITION(ds) SELECT 'k' key, 'v' value, '2' ds")
+ assertResult(Array(Row("k", "v", "1"), Row("k", "v", "2"))) {
+ sql("SELECT * from spark_28054_test").collect()
+ }
}
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org