You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by hv...@apache.org on 2016/08/23 10:59:33 UTC

spark git commit: [SPARK-17199] Use CatalystConf.resolver for case-sensitivity comparison

Repository: spark
Updated Branches:
  refs/heads/master cc33460a5 -> 9d376ad76


[SPARK-17199] Use CatalystConf.resolver for case-sensitivity comparison

## What changes were proposed in this pull request?

Use `CatalystConf.resolver` consistently for case-sensitivity comparison (removed dups).

## How was this patch tested?

Local build. Waiting for Jenkins to ensure clean build and test.

Author: Jacek Laskowski <ja...@japila.pl>

Closes #14771 from jaceklaskowski/17199-catalystconf-resolver.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/9d376ad7
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/9d376ad7
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/9d376ad7

Branch: refs/heads/master
Commit: 9d376ad76ca702ae3fc6ffd0567e7590d9a8daf3
Parents: cc33460
Author: Jacek Laskowski <ja...@japila.pl>
Authored: Tue Aug 23 12:59:25 2016 +0200
Committer: Herman van Hovell <hv...@databricks.com>
Committed: Tue Aug 23 12:59:25 2016 +0200

----------------------------------------------------------------------
 .../org/apache/spark/sql/catalyst/analysis/Analyzer.scala |  8 +-------
 .../spark/sql/execution/datasources/DataSource.scala      | 10 ++--------
 .../sql/execution/datasources/DataSourceStrategy.scala    |  8 +-------
 .../spark/sql/execution/streaming/FileStreamSink.scala    |  6 +-----
 4 files changed, 5 insertions(+), 27 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/9d376ad7/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 41e0e6d..e559f23 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -64,13 +64,7 @@ class Analyzer(
     this(catalog, conf, conf.optimizerMaxIterations)
   }
 
-  def resolver: Resolver = {
-    if (conf.caseSensitiveAnalysis) {
-      caseSensitiveResolution
-    } else {
-      caseInsensitiveResolution
-    }
-  }
+  def resolver: Resolver = conf.resolver
 
   protected val fixedPoint = FixedPoint(maxIterations)
 

http://git-wip-us.apache.org/repos/asf/spark/blob/9d376ad7/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
index 5ad6ae0..b783d69 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala
@@ -394,13 +394,7 @@ case class DataSource(
             sparkSession, globbedPaths, options, partitionSchema, !checkPathExist)
 
         val dataSchema = userSpecifiedSchema.map { schema =>
-          val equality =
-            if (sparkSession.sessionState.conf.caseSensitiveAnalysis) {
-              org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
-            } else {
-              org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
-            }
-
+          val equality = sparkSession.sessionState.conf.resolver
           StructType(schema.filterNot(f => partitionColumns.exists(equality(_, f.name))))
         }.orElse {
           format.inferSchema(
@@ -430,7 +424,7 @@ case class DataSource(
     relation
   }
 
-  /** Writes the give [[DataFrame]] out to this [[DataSource]]. */
+  /** Writes the given [[DataFrame]] out to this [[DataSource]]. */
   def write(
       mode: SaveMode,
       data: DataFrame): BaseRelation = {

http://git-wip-us.apache.org/repos/asf/spark/blob/9d376ad7/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
index 5eba7df..a662105 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceStrategy.scala
@@ -45,13 +45,7 @@ import org.apache.spark.unsafe.types.UTF8String
  */
 case class DataSourceAnalysis(conf: CatalystConf) extends Rule[LogicalPlan] {
 
-  def resolver: Resolver = {
-    if (conf.caseSensitiveAnalysis) {
-      caseSensitiveResolution
-    } else {
-      caseInsensitiveResolution
-    }
-  }
+  def resolver: Resolver = conf.resolver
 
   // Visible for testing.
   def convertStaticPartitions(

http://git-wip-us.apache.org/repos/asf/spark/blob/9d376ad7/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
index 117d667..0f7d958 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSink.scala
@@ -102,11 +102,7 @@ class FileStreamSinkWriter(
   // Get the actual partition columns as attributes after matching them by name with
   // the given columns names.
   private val partitionColumns = partitionColumnNames.map { col =>
-    val nameEquality = if (data.sparkSession.sessionState.conf.caseSensitiveAnalysis) {
-      org.apache.spark.sql.catalyst.analysis.caseSensitiveResolution
-    } else {
-      org.apache.spark.sql.catalyst.analysis.caseInsensitiveResolution
-    }
+    val nameEquality = data.sparkSession.sessionState.conf.resolver
     data.logicalPlan.output.find(f => nameEquality(f.name, col)).getOrElse {
       throw new RuntimeException(s"Partition column $col not found in schema $dataSchema")
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org