You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by ao...@apache.org on 2019/11/19 13:22:39 UTC

[incubator-iceberg] branch master updated: Spark: Fix case sensitivity setting (#656)

This is an automated email from the ASF dual-hosted git repository.

aokolnychyi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new a21d70c  Spark: Fix case sensitivity setting (#656)
a21d70c is described below

commit a21d70c83539d121a2d04702dbd61b569a9ae09b
Author: Ryan Blue <rd...@users.noreply.github.com>
AuthorDate: Tue Nov 19 05:22:30 2019 -0800

    Spark: Fix case sensitivity setting (#656)
---
 .../src/main/java/org/apache/iceberg/spark/source/IcebergSource.java  | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/spark/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java b/spark/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java
index 7a1f58d..f97de81 100644
--- a/spark/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java
+++ b/spark/src/main/java/org/apache/iceberg/spark/source/IcebergSource.java
@@ -71,9 +71,9 @@ public class IcebergSource implements DataSourceV2, ReadSupport, WriteSupport, D
   public DataSourceReader createReader(StructType readSchema, DataSourceOptions options) {
     Configuration conf = new Configuration(lazyBaseConf());
     Table table = getTableAndResolveHadoopConfiguration(options, conf);
-    String caseSensitive = lazySparkSession().conf().get("spark.sql.caseSensitive", "true");
+    String caseSensitive = lazySparkSession().conf().get("spark.sql.caseSensitive");
 
-    Reader reader = new Reader(table, Boolean.valueOf(caseSensitive), options);
+    Reader reader = new Reader(table, Boolean.parseBoolean(caseSensitive), options);
     if (readSchema != null) {
       // convert() will fail if readSchema contains fields not in table.schema()
       SparkSchemaUtil.convert(table.schema(), readSchema);