You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iceberg.apache.org by bl...@apache.org on 2021/01/11 18:27:15 UTC

[iceberg] branch master updated: Spark: Override simpleString in dynamic file filter nodes (#2066)

This is an automated email from the ASF dual-hosted git repository.

blue pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iceberg.git


The following commit(s) were added to refs/heads/master by this push:
     new 0195044  Spark: Override simpleString in dynamic file filter nodes (#2066)
0195044 is described below

commit 01950440ec2f59594aef3d414f106be438c3e0e3
Author: Anton Okolnychyi <ao...@apple.com>
AuthorDate: Mon Jan 11 20:26:30 2021 +0200

    Spark: Override simpleString in dynamic file filter nodes (#2066)
---
 .../apache/spark/sql/catalyst/plans/logical/DynamicFileFilter.scala  | 5 +++++
 .../spark/sql/execution/datasources/v2/DynamicFileFilterExec.scala   | 5 +++++
 2 files changed, 10 insertions(+)

diff --git a/spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/DynamicFileFilter.scala b/spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/DynamicFileFilter.scala
index 75011b5..e41ef35 100644
--- a/spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/DynamicFileFilter.scala
+++ b/spark3-extensions/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/DynamicFileFilter.scala
@@ -20,6 +20,7 @@
 package org.apache.spark.sql.catalyst.plans.logical
 
 import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeSet}
+import org.apache.spark.sql.catalyst.util.truncatedString
 import org.apache.spark.sql.connector.iceberg.read.SupportsFileFilter
 
 // TODO: fix stats (ignore the fact it is a binary node and report only scanRelation stats)
@@ -34,4 +35,8 @@ case class DynamicFileFilter(
   override def left: LogicalPlan = scanPlan
   override def right: LogicalPlan = fileFilterPlan
   override def output: Seq[Attribute] = scanPlan.output
+
+  override def simpleString(maxFields: Int): String = {
+    s"DynamicFileFilter${truncatedString(output, "[", ", ", "]", maxFields)}"
+  }
 }
diff --git a/spark3-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DynamicFileFilterExec.scala b/spark3-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DynamicFileFilterExec.scala
index 20590d7..64af711 100644
--- a/spark3-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DynamicFileFilterExec.scala
+++ b/spark3-extensions/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DynamicFileFilterExec.scala
@@ -24,6 +24,7 @@ import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeSet, SortOrder}
 import org.apache.spark.sql.catalyst.plans.physical
+import org.apache.spark.sql.catalyst.util.truncatedString
 import org.apache.spark.sql.connector.iceberg.read.SupportsFileFilter
 import org.apache.spark.sql.execution.{BinaryExecNode, SparkPlan}
 import org.apache.spark.sql.vectorized.ColumnarBatch
@@ -51,4 +52,8 @@ case class DynamicFileFilterExec(
     val matchedFileLocations = rows.map(_.getString(0))
     filterable.filterFiles(matchedFileLocations.toSet.asJava)
   }
+
+  override def simpleString(maxFields: Int): String = {
+    s"DynamicFileFilterExec${truncatedString(output, "[", ", ", "]", maxFields)}"
+  }
 }