You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by rx...@apache.org on 2016/04/25 05:48:04 UTC

spark git commit: [SPARK-14885][SQL] When creating a CatalogColumn, we should use the catalogString of a DataType object.

Repository: spark
Updated Branches:
  refs/heads/master d34d65037 -> 35319d326


[SPARK-14885][SQL] When creating a CatalogColumn, we should use the catalogString of a DataType object.

## What changes were proposed in this pull request?

Right now, the data type field of a CatalogColumn is using the string representation. When we create this string from a DataType object, there are places where we use simpleString instead of catalogString. Although catalogString is the same as simpleString right now, it is still good to use catalogString. So, we will not silently introduce issues when we change the semantic of simpleString or the implementation of catalogString.

## How was this patch tested?
Existing tests.

Author: Yin Huai <yh...@databricks.com>

Closes #12654 from yhuai/useCatalogString.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/35319d32
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/35319d32
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/35319d32

Branch: refs/heads/master
Commit: 35319d326488b3bf9235dfcf9ac4533ce846f21f
Parents: d34d650
Author: Yin Huai <yh...@databricks.com>
Authored: Sun Apr 24 20:48:01 2016 -0700
Committer: Reynold Xin <rx...@databricks.com>
Committed: Sun Apr 24 20:48:01 2016 -0700

----------------------------------------------------------------------
 .../scala/org/apache/spark/sql/execution/SparkSqlParser.scala    | 2 +-
 .../scala/org/apache/spark/sql/execution/command/views.scala     | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/35319d32/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 2b301a6..f22ed43 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -1073,7 +1073,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
         // just convert the whole type string to lower case, otherwise the struct field names
         // will no longer be case sensitive. Instead, we rely on our parser to get the proper
         // case before passing it to Hive.
-        CatalystSqlParser.parseDataType(col.dataType.getText).simpleString,
+        CatalystSqlParser.parseDataType(col.dataType.getText).catalogString,
         nullable = true,
         Option(col.STRING).map(string))
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/35319d32/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
index 7542f9d..07cc4a9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
@@ -144,11 +144,11 @@ case class CreateViewCommand(
     val viewSchema: Seq[CatalogColumn] = {
       if (tableDesc.schema.isEmpty) {
         analyzedPlan.output.map { a =>
-          CatalogColumn(a.name, a.dataType.simpleString)
+          CatalogColumn(a.name, a.dataType.catalogString)
         }
       } else {
         analyzedPlan.output.zip(tableDesc.schema).map { case (a, col) =>
-          CatalogColumn(col.name, a.dataType.simpleString, nullable = true, col.comment)
+          CatalogColumn(col.name, a.dataType.catalogString, nullable = true, col.comment)
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org