You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by li...@apache.org on 2015/06/07 09:35:31 UTC

spark git commit: [SPARK-8141] [SQL] Precompute datatypes for partition columns and reuse it

Repository: spark
Updated Branches:
  refs/heads/master 081db9479 -> 26d07f1ec


[SPARK-8141] [SQL] Precompute datatypes for partition columns and reuse it

JIRA: https://issues.apache.org/jira/browse/SPARK-8141

Author: Liang-Chi Hsieh <vi...@gmail.com>

Closes #6687 from viirya/reuse_partition_column_types and squashes the following commits:

dab0688 [Liang-Chi Hsieh] Reuse partitionColumnTypes.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/26d07f1e
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/26d07f1e
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/26d07f1e

Branch: refs/heads/master
Commit: 26d07f1ece4174788b0bcdc338a14d0bbc0e3602
Parents: 081db94
Author: Liang-Chi Hsieh <vi...@gmail.com>
Authored: Sun Jun 7 15:33:48 2015 +0800
Committer: Cheng Lian <li...@databricks.com>
Committed: Sun Jun 7 15:33:48 2015 +0800

----------------------------------------------------------------------
 .../src/main/scala/org/apache/spark/sql/sources/interfaces.scala  | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/26d07f1e/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
index f5bd2d2..25887ba 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/sources/interfaces.scala
@@ -435,8 +435,9 @@ abstract class HadoopFsRelation private[sql](maybePartitionSpec: Option[Partitio
           // partition values.
           userDefinedPartitionColumns.map { partitionSchema =>
             val spec = discoverPartitions()
+            val partitionColumnTypes = spec.partitionColumns.map(_.dataType)
             val castedPartitions = spec.partitions.map { case p @ Partition(values, path) =>
-              val literals = values.toSeq.zip(spec.partitionColumns.map(_.dataType)).map {
+              val literals = values.toSeq.zip(partitionColumnTypes).map {
                 case (value, dataType) => Literal.create(value, dataType)
               }
               val castedValues = partitionSchema.zip(literals).map { case (field, literal) =>


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org