You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2019/04/26 01:41:22 UTC

[spark] branch master updated: Revert "[SPARK-27439][SQL] Use analyzed plan when explaining Dataset"

This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new d5dbf05  Revert "[SPARK-27439][SQL] Use analyzed plan when explaining Dataset"
d5dbf05 is described below

commit d5dbf053d39d7348c9d552b26328752796dc378c
Author: Dongjoon Hyun <dh...@apple.com>
AuthorDate: Thu Apr 25 18:38:52 2019 -0700

    Revert "[SPARK-27439][SQL] Use analyzed plan when explaining Dataset"
    
    This reverts commit ad60c6d9be3234a0296d1620129d5ca108f0876b.
---
 .../src/main/scala/org/apache/spark/sql/Dataset.scala |  5 +----
 .../scala/org/apache/spark/sql/DataFrameSuite.scala   | 19 +------------------
 2 files changed, 2 insertions(+), 22 deletions(-)

diff --git a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
index e974912..793714f 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
@@ -498,10 +498,7 @@ class Dataset[T] private[sql](
    * @since 1.6.0
    */
   def explain(extended: Boolean): Unit = {
-    // Because views are possibly resolved in the analyzed plan of this dataset. We use analyzed
-    // plan in `ExplainCommand`, for consistency. Otherwise, the plans shown by explain command
-    // might be inconsistent with the evaluated data of this dataset.
-    val explain = ExplainCommand(queryExecution.analyzed, extended = extended)
+    val explain = ExplainCommand(queryExecution.logical, extended = extended)
     sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect().foreach {
       // scalastyle:off println
       r => println(r.getString(0))
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
index 62fcca4..8a9c526 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala
@@ -17,7 +17,7 @@
 
 package org.apache.spark.sql
 
-import java.io.{ByteArrayOutputStream, File}
+import java.io.File
 import java.nio.charset.StandardCharsets
 import java.sql.{Date, Timestamp}
 import java.util.UUID
@@ -2133,21 +2133,4 @@ class DataFrameSuite extends QueryTest with SharedSQLContext {
       checkAnswer(res, Row("1-1", 6, 6))
     }
   }
-
-  test("SPARK-27439: Explain result should match collected result after view change") {
-    withTempView("test", "test2", "tmp") {
-      spark.range(10).createOrReplaceTempView("test")
-      spark.range(5).createOrReplaceTempView("test2")
-      spark.sql("select * from test").createOrReplaceTempView("tmp")
-      val df = spark.sql("select * from tmp")
-      spark.sql("select * from test2").createOrReplaceTempView("tmp")
-
-      val captured = new ByteArrayOutputStream()
-      Console.withOut(captured) {
-        df.explain()
-      }
-      checkAnswer(df, spark.range(10).toDF)
-      assert(captured.toString().contains("Range (0, 10, step=1, splits=2)"))
-    }
-  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org