You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2019/05/27 03:48:04 UTC
[spark] branch master updated: [SPARK-27782][SQL] Use '#' to mark
expression id embedded in the name field of SubqueryExec operator
This is an automated email from the ASF dual-hosted git repository.
dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 5060647 [SPARK-27782][SQL] Use '#' to mark expression id embedded in the name field of SubqueryExec operator
5060647 is described below
commit 5060647bb1df864113271e8752c43f2407bca04c
Author: Dilip Biswal <db...@us.ibm.com>
AuthorDate: Sun May 26 20:47:25 2019 -0700
[SPARK-27782][SQL] Use '#' to mark expression id embedded in the name field of SubqueryExec operator
## What changes were proposed in this pull request?
This is a minor pr to use `#` as a marker for expression id that is embedded in the name field of SubqueryExec operator.
## How was this patch tested?
Added a small test in SubquerySuite.
Closes #24652 from dilipbiswal/subquery-name.
Authored-by: Dilip Biswal <db...@us.ibm.com>
Signed-off-by: Dongjoon Hyun <dh...@apple.com>
---
.../main/scala/org/apache/spark/sql/execution/subquery.scala | 2 +-
.../src/test/scala/org/apache/spark/sql/SubquerySuite.scala | 12 ++++++++++++
2 files changed, 13 insertions(+), 1 deletion(-)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala
index c70f17f..960d47b 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/subquery.scala
@@ -116,7 +116,7 @@ case class PlanSubqueries(sparkSession: SparkSession) extends Rule[SparkPlan] {
case subquery: expressions.ScalarSubquery =>
val executedPlan = new QueryExecution(sparkSession, subquery.plan).executedPlan
ScalarSubquery(
- SubqueryExec(s"subquery${subquery.exprId.id}", executedPlan),
+ SubqueryExec(s"scalar-subquery#${subquery.exprId.id}", executedPlan),
subquery.exprId)
}
}
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
index 28a9905..fddc4f6 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
@@ -1371,4 +1371,16 @@ class SubquerySuite extends QueryTest with SharedSQLContext {
}
}
}
+
+ test("Scalar subquery name should start with scalar-subquery#") {
+ val df = sql("SELECT a FROM l WHERE a = (SELECT max(c) FROM r WHERE c = 1)".stripMargin)
+ var subqueryExecs: ArrayBuffer[SubqueryExec] = ArrayBuffer.empty
+ df.queryExecution.executedPlan.transformAllExpressions {
+ case s @ ScalarSubquery(p: SubqueryExec, _) =>
+ subqueryExecs += p
+ s
+ }
+ assert(subqueryExecs.forall(_.name.startsWith("scalar-subquery#")),
+ "SubqueryExec name should start with scalar-subquery#")
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org