You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ru...@apache.org on 2022/11/29 11:20:28 UTC

[spark] branch master updated: [SPARK-41316][SQL] Enable tail-recursion wherever possible

This is an automated email from the ASF dual-hosted git repository.

ruifengz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 99de9879b6a [SPARK-41316][SQL] Enable tail-recursion wherever possible
99de9879b6a is described below

commit 99de9879b6a3942d88c0644dc5fc8c75e682d925
Author: yangjie01 <ya...@baidu.com>
AuthorDate: Tue Nov 29 19:20:05 2022 +0800

    [SPARK-41316][SQL] Enable tail-recursion wherever possible
    
    ### What changes were proposed in this pull request?
    Similar to SPARK-37783, this pr adds `scala.annotation.tailrec` inspected by IDE (IntelliJ),  these are new cases after Spark 3.3.
    
    ### Why are the changes needed?
    To improve performance.
    
    ### Does this PR introduce _any_ user-facing change?
    No
    
    ### How was this patch tested?
    Pass GItHub Actions
    
    Closes #38835 from LuciferYang/SPARK-41316.
    
    Authored-by: yangjie01 <ya...@baidu.com>
    Signed-off-by: Ruifeng Zheng <ru...@apache.org>
---
 .../org/apache/spark/sql/catalyst/optimizer/InjectRuntimeFilter.scala | 4 ++++
 .../scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala     | 1 +
 .../main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala  | 1 +
 .../spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala   | 1 +
 4 files changed, 7 insertions(+)

diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InjectRuntimeFilter.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InjectRuntimeFilter.scala
index 1d5129ff7f0..df04a248a27 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InjectRuntimeFilter.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/InjectRuntimeFilter.scala
@@ -17,6 +17,8 @@
 
 package org.apache.spark.sql.catalyst.optimizer
 
+import scala.annotation.tailrec
+
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.expressions.aggregate.BloomFilterAggregate
 import org.apache.spark.sql.catalyst.planning.ExtractEquiJoinKeys
@@ -116,6 +118,7 @@ object InjectRuntimeFilter extends Rule[LogicalPlan] with PredicateHelper with J
    * do not add a subquery that might have an expensive computation
    */
   private def isSelectiveFilterOverScan(plan: LogicalPlan): Boolean = {
+    @tailrec
     def isSelective(
         p: LogicalPlan,
         predicateReference: AttributeSet,
@@ -225,6 +228,7 @@ object InjectRuntimeFilter extends Rule[LogicalPlan] with PredicateHelper with J
   }
 
   // This checks if there is already a DPP filter, as this rule is called just after DPP.
+  @tailrec
   def hasDynamicPruningSubquery(
       left: LogicalPlan,
       right: LogicalPlan,
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
index ecb93f6b239..1f0fb667753 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
@@ -1083,6 +1083,7 @@ object CollapseProject extends Rule[LogicalPlan] with AliasHelper {
   }
 
   private object ExtractOnlyRef {
+    @scala.annotation.tailrec
     def unapply(expr: Expression): Option[Attribute] = expr match {
       case a: Alias => unapply(a.child)
       case e: ExtractValue => unapply(e.children.head)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
index bbda9eb76b1..b029a3b0ce9 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/planning/patterns.scala
@@ -157,6 +157,7 @@ object ScanOperation extends OperationHelper {
 }
 
 object NodeWithOnlyDeterministicProjectAndFilter {
+  @scala.annotation.tailrec
   def unapply(plan: LogicalPlan): Option[LogicalPlan] = plan match {
     case Project(projectList, child) if projectList.forall(_.deterministic) => unapply(child)
     case Filter(cond, child) if cond.deterministic => unapply(child)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
index 87b11da5d5c..49a6c7232ec 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2ScanRelationPushDown.scala
@@ -454,6 +454,7 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan] with PredicateHelper {
     case other => (other, false)
   }
 
+  @scala.annotation.tailrec
   private def pushDownOffset(
       plan: LogicalPlan,
       offset: Int): Boolean = plan match {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org