You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ma...@apache.org on 2014/10/27 00:36:14 UTC
git commit: [SPARK-3483][SQL] Special chars in column names
Repository: spark
Updated Branches:
refs/heads/master 0481aaa8d -> 974d7b238
[SPARK-3483][SQL] Special chars in column names
Supporting special chars in column names by using back ticks. Closed https://github.com/apache/spark/pull/2804 and created this PR as it has merge conflicts
Author: ravipesala <ra...@huawei.com>
Closes #2927 from ravipesala/SPARK-3483-NEW and squashes the following commits:
f6329f3 [ravipesala] Rebased with master
Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/974d7b23
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/974d7b23
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/974d7b23
Branch: refs/heads/master
Commit: 974d7b238b415791975c60e4e202265d6fd31e2d
Parents: 0481aaa
Author: ravipesala <ra...@huawei.com>
Authored: Sun Oct 26 16:36:11 2014 -0700
Committer: Michael Armbrust <mi...@databricks.com>
Committed: Sun Oct 26 16:36:11 2014 -0700
----------------------------------------------------------------------
.../scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala | 2 ++
.../src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala | 6 ++++++
2 files changed, 8 insertions(+)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/spark/blob/974d7b23/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala
----------------------------------------------------------------------
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala
index 0446734..219322c 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/SparkSQLParser.scala
@@ -75,6 +75,8 @@ class SqlLexical(val keywords: Seq[String]) extends StdLexical {
{ case chars => StringLit(chars mkString "") }
| '"' ~> chrExcept('"', '\n', EofCh).* <~ '"' ^^
{ case chars => StringLit(chars mkString "") }
+ | '`' ~> chrExcept('`', '\n', EofCh).* <~ '`' ^^
+ { case chars => Identifier(chars mkString "") }
| EofCh ^^^ EOF
| '\'' ~> failure("unclosed string literal")
| '"' ~> failure("unclosed string literal")
http://git-wip-us.apache.org/repos/asf/spark/blob/974d7b23/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 3959925..ffb504b 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -748,4 +748,10 @@ class SQLQuerySuite extends QueryTest with BeforeAndAfterAll {
""".stripMargin),
(1 to 100).map(i => Seq(i, i, i)))
}
+
+ test("SPARK-3483 Special chars in column names") {
+ val data = sparkContext.parallelize(Seq("""{"key?number1": "value1", "key.number2": "value2"}"""))
+ jsonRDD(data).registerTempTable("records")
+ sql("SELECT `key?number1` FROM records")
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org