You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by hu...@apache.org on 2022/07/29 17:34:30 UTC

[spark] branch master updated: [SPARK-39909] Organize the check of push down information for JDBCV2Suite

This is an automated email from the ASF dual-hosted git repository.

huaxingao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 5eb5077acf4 [SPARK-39909] Organize the check of push down information for JDBCV2Suite
5eb5077acf4 is described below

commit 5eb5077acf47e911f7a5299bb029d4e5a2702f9f
Author: chenliang.lu <ma...@gmail.com>
AuthorDate: Fri Jul 29 10:33:53 2022 -0700

    [SPARK-39909] Organize the check of push down information for JDBCV2Suite
    
    ### What changes were proposed in this pull request?
    This PR changes the check method from `check(one_large_string)` to `check(small_string1, small_string2, ...)`
    
    ### Why are the changes needed?
    It can help us check the results individually and make the code more clearer.
    
    ### Does this PR introduce _any_ user-facing change?
    no
    
    ### How was this patch tested?
    existing tests
    
    Closes #37342 from yabola/fix.
    
    Authored-by: chenliang.lu <ma...@gmail.com>
    Signed-off-by: huaxingao <hu...@apple.com>
---
 .../org/apache/spark/sql/jdbc/JDBCV2Suite.scala    | 307 ++++++++++++++-------
 1 file changed, 203 insertions(+), 104 deletions(-)

diff --git a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
index d64b1815007..3b226d60643 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCV2Suite.scala
@@ -210,7 +210,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
   test("TABLESAMPLE (integer_expression ROWS) is the same as LIMIT") {
     val df = sql("SELECT NAME FROM h2.test.employee TABLESAMPLE (3 ROWS)")
     checkSchemaNames(df, Seq("NAME"))
-    checkPushedInfo(df, "PushedFilters: [], PushedLimit: LIMIT 3, ")
+    checkPushedInfo(df,
+      "PushedFilters: []",
+      "PushedLimit: LIMIT 3")
     checkAnswer(df, Seq(Row("amy"), Row("alex"), Row("cathy")))
   }
 
@@ -238,7 +240,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .where($"dept" === 1).limit(1)
     checkLimitRemoved(df1)
     checkPushedInfo(df1,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], PushedLimit: LIMIT 1, ")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedLimit: LIMIT 1")
     checkAnswer(df1, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df2 = spark.read
@@ -251,14 +254,16 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df2, false)
     checkPushedInfo(df2,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], PushedLimit: LIMIT 1, ")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedLimit: LIMIT 1")
     checkAnswer(df2, Seq(Row(2, "alex", 12000.00, 1200.0, false)))
 
     val df3 = sql("SELECT name FROM h2.test.employee WHERE dept > 1 LIMIT 1")
     checkSchemaNames(df3, Seq("NAME"))
     checkLimitRemoved(df3)
     checkPushedInfo(df3,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], PushedLimit: LIMIT 1, ")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedLimit: LIMIT 1")
     checkAnswer(df3, Seq(Row("alex")))
 
     val df4 = spark.read
@@ -283,7 +288,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df5, false)
     // LIMIT is pushed down only if all the filters are pushed down
-    checkPushedInfo(df5, "PushedFilters: [], ")
+    checkPushedInfo(df5, "PushedFilters: []")
     checkAnswer(df5, Seq(Row(10000.00, 1000.0, "amy")))
   }
 
@@ -305,7 +310,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkOffsetRemoved(df1)
     checkPushedInfo(df1,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], PushedOffset: OFFSET 1,")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedOffset: OFFSET 1")
     checkAnswer(df1, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df2 = spark.read
@@ -315,7 +321,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkOffsetRemoved(df2, false)
     checkPushedInfo(df2,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df2, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df3 = spark.read
@@ -325,7 +332,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkOffsetRemoved(df3, false)
     checkPushedInfo(df3,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df3, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df4 = spark.read
@@ -337,7 +345,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .filter($"dept" > 1)
       .offset(1)
     checkOffsetRemoved(df4, false)
-    checkPushedInfo(df4, "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], ReadSchema:")
+    checkPushedInfo(df4,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "ReadSchema:")
     checkAnswer(df4, Seq(Row(2, "david", 10000, 1300, true), Row(6, "jen", 12000, 1200, true)))
 
     val df5 = spark.read
@@ -362,7 +372,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkOffsetRemoved(df6, false)
     // OFFSET is pushed down only if all the filters are pushed down
-    checkPushedInfo(df6, "PushedFilters: [], ")
+    checkPushedInfo(df6, "PushedFilters: []")
     checkAnswer(df6, Seq(Row(10000.00, 1300.0, "dav"), Row(9000.00, 1200.0, "cat")))
   }
 
@@ -375,7 +385,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df1)
     checkOffsetRemoved(df1)
     checkPushedInfo(df1,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], PushedLimit: LIMIT 2, PushedOffset: OFFSET 1,")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedLimit: LIMIT 2",
+      "PushedOffset: OFFSET 1")
     checkAnswer(df1, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df2 = spark.read
@@ -387,7 +399,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df2, false)
     checkOffsetRemoved(df2, false)
     checkPushedInfo(df2,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df2, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df3 = spark.read
@@ -399,7 +412,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df3)
     checkOffsetRemoved(df3, false)
     checkPushedInfo(df3,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], PushedLimit: LIMIT 2, ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedLimit: LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df3, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df4 = spark.read
@@ -412,7 +427,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df4, false)
     checkOffsetRemoved(df4, false)
     checkPushedInfo(df4,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      " ReadSchema:")
     checkAnswer(df4, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df5 = spark.read
@@ -423,8 +439,11 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkLimitRemoved(df5)
     checkOffsetRemoved(df5)
-    checkPushedInfo(df5, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], " +
-      "PushedOffset: OFFSET 1, PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2, ReadSchema:")
+    checkPushedInfo(df5,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedOffset: OFFSET 1",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df5, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df6 = spark.read
@@ -436,7 +455,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkLimitRemoved(df6, false)
     checkOffsetRemoved(df6, false)
-    checkPushedInfo(df6, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+    checkPushedInfo(df6,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df6, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df7 = spark.read
@@ -448,8 +469,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkLimitRemoved(df7)
     checkOffsetRemoved(df7, false)
-    checkPushedInfo(df7, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]," +
-      " PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2, ReadSchema:")
+    checkPushedInfo(df7,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df7, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df8 = spark.read
@@ -462,7 +485,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkLimitRemoved(df8, false)
     checkOffsetRemoved(df8, false)
-    checkPushedInfo(df8, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+    checkPushedInfo(df8,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df8, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df9 = spark.read
@@ -477,7 +502,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df9, false)
     checkOffsetRemoved(df9, false)
     checkPushedInfo(df9,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], PushedLimit: LIMIT 2, ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedLimit: LIMIT 2", " ReadSchema:")
     checkAnswer(df9, Seq(Row(2, "david", 10000.00, 1300.0, true)))
 
     val df10 = spark.read
@@ -506,7 +532,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .offset(1)
     checkLimitRemoved(df11, false)
     checkOffsetRemoved(df11, false)
-    checkPushedInfo(df11, "PushedFilters: [], ")
+    checkPushedInfo(df11, "PushedFilters: []")
     checkAnswer(df11, Seq(Row(9000.00, 1200.0, "cat")))
   }
 
@@ -519,7 +545,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df1)
     checkOffsetRemoved(df1)
     checkPushedInfo(df1,
-      "[DEPT IS NOT NULL, DEPT = 1], PushedLimit: LIMIT 2, PushedOffset: OFFSET 1,")
+      "[DEPT IS NOT NULL, DEPT = 1]",
+      "PushedLimit: LIMIT 2",
+      " PushedOffset: OFFSET 1")
     checkAnswer(df1, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df2 = spark.read
@@ -531,7 +559,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df2)
     checkOffsetRemoved(df2, false)
     checkPushedInfo(df2,
-      "[DEPT IS NOT NULL, DEPT = 1], PushedLimit: LIMIT 2, ReadSchema:")
+      "[DEPT IS NOT NULL, DEPT = 1]",
+      "PushedLimit: LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df2, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df3 = spark.read
@@ -543,7 +573,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df3, false)
     checkOffsetRemoved(df3)
     checkPushedInfo(df3,
-      "[DEPT IS NOT NULL, DEPT = 1], PushedOffset: OFFSET 1, ReadSchema:")
+      "[DEPT IS NOT NULL, DEPT = 1]",
+      "PushedOffset: OFFSET 1",
+      "ReadSchema:")
     checkAnswer(df3, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df4 = spark.read
@@ -556,7 +588,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df4, false)
     checkOffsetRemoved(df4, false)
     checkPushedInfo(df4,
-      "[DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+      "[DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df4, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df5 = spark.read
@@ -567,8 +600,11 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df5)
     checkOffsetRemoved(df5)
-    checkPushedInfo(df5, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], " +
-      "PushedOffset: OFFSET 1, PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2, ReadSchema:")
+    checkPushedInfo(df5,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedOffset: OFFSET 1",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df5, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df6 = spark.read
@@ -580,8 +616,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df6)
     checkOffsetRemoved(df6, false)
-    checkPushedInfo(df6, "[DEPT IS NOT NULL, DEPT = 1]," +
-      " PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2, ReadSchema:")
+    checkPushedInfo(df6,
+      "[DEPT IS NOT NULL, DEPT = 1]",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df6, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df7 = spark.read
@@ -593,7 +631,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df7, false)
     checkOffsetRemoved(df7, false)
-    checkPushedInfo(df7, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+    checkPushedInfo(df7,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df7, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df8 = spark.read
@@ -606,7 +646,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df8, false)
     checkOffsetRemoved(df8, false)
-    checkPushedInfo(df8, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ReadSchema:")
+    checkPushedInfo(df8,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "ReadSchema:")
     checkAnswer(df8, Seq(Row(1, "amy", 10000.00, 1000.0, true)))
 
     val df9 = spark.read
@@ -621,7 +663,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkLimitRemoved(df9, false)
     checkOffsetRemoved(df9, false)
     checkPushedInfo(df9,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], PushedLimit: LIMIT 2, ReadSchema:")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedLimit: LIMIT 2",
+      "ReadSchema:")
     checkAnswer(df9, Seq(Row(2, "david", 10000.00, 1300.0, true)))
 
     val df10 = sql("SELECT dept, sum(salary) FROM h2.test.employee group by dept LIMIT 1 OFFSET 1")
@@ -646,7 +690,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkLimitRemoved(df11, false)
     checkOffsetRemoved(df11, false)
-    checkPushedInfo(df11, "PushedFilters: [], ")
+    checkPushedInfo(df11, "PushedFilters: []")
     checkAnswer(df11, Seq(Row(9000.00, 1200.0, "cat")))
   }
 
@@ -669,7 +713,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkSortRemoved(df1)
     checkLimitRemoved(df1)
     checkPushedInfo(df1,
-      "PushedFilters: [], PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1, ")
+      "PushedFilters: []",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1")
     checkAnswer(df1, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df2 = spark.read
@@ -683,8 +728,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df2)
     checkLimitRemoved(df2)
-    checkPushedInfo(df2, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], " +
-      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1, ")
+    checkPushedInfo(df2,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1")
     checkAnswer(df2, Seq(Row(1, "cathy", 9000.00, 1200.0, false)))
 
     val df3 = spark.read
@@ -698,8 +744,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df3, false)
     checkLimitRemoved(df3, false)
-    checkPushedInfo(df3, "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], " +
-      "PushedTopN: ORDER BY [SALARY DESC NULLS LAST] LIMIT 1, ")
+    checkPushedInfo(df3,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedTopN: ORDER BY [SALARY DESC NULLS LAST] LIMIT 1")
     checkAnswer(df3, Seq(Row(2, "alex", 12000.00, 1200.0, false)))
 
     val df4 =
@@ -707,14 +754,15 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     checkSchemaNames(df4, Seq("NAME"))
     checkSortRemoved(df4)
     checkLimitRemoved(df4)
-    checkPushedInfo(df4, "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], " +
-      "PushedTopN: ORDER BY [SALARY ASC NULLS LAST] LIMIT 1, ")
+    checkPushedInfo(df4,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS LAST] LIMIT 1")
     checkAnswer(df4, Seq(Row("david")))
 
     val df5 = spark.read.table("h2.test.employee")
       .where($"dept" === 1).orderBy($"salary")
     checkSortRemoved(df5, false)
-    checkPushedInfo(df5, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1], ")
+    checkPushedInfo(df5, "PushedFilters: [DEPT IS NOT NULL, DEPT = 1]")
     checkAnswer(df5,
       Seq(Row(1, "cathy", 9000.00, 1200.0, false), Row(1, "amy", 10000.00, 1000.0, true)))
 
@@ -725,8 +773,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df6, false)
     checkLimitRemoved(df6, false)
-    checkPushedInfo(df6, "PushedAggregates: [SUM(SALARY)]," +
-      " PushedFilters: [], PushedGroupByExpressions: [DEPT], ")
+    checkPushedInfo(df6,
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df6, Seq(Row(1, 19000.00)))
 
     val name = udf { (x: String) => x.matches("cat|dav|amy") }
@@ -740,7 +790,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     // LIMIT is pushed down only if all the filters are pushed down
     checkSortRemoved(df7, false)
     checkLimitRemoved(df7, false)
-    checkPushedInfo(df7, "PushedFilters: [], ")
+    checkPushedInfo(df7, "PushedFilters: []")
     checkAnswer(df7, Seq(Row(10000.00, 1000.0, "amy")))
 
     val df8 = spark.read
@@ -749,7 +799,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df8, false)
     checkLimitRemoved(df8, false)
-    checkPushedInfo(df8, "PushedFilters: [], ")
+    checkPushedInfo(df8, "PushedFilters: []")
     checkAnswer(df8, Seq(Row(2, "alex", 12000.00, 1200.0, false)))
 
     val df9 = spark.read
@@ -760,7 +810,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(3)
     checkSortRemoved(df9)
     checkLimitRemoved(df9)
-    checkPushedInfo(df9, "PushedFilters: [], " +
+    checkPushedInfo(df9,
+      "PushedFilters: []",
       "PushedTopN: " +
       "ORDER BY [CASE WHEN (SALARY > 8000.00) AND (SALARY < 10000.00) THEN SALARY ELSE 0.00 END " +
       "ASC NULLS FIRST, DEPT ASC NULLS FIRST, SALARY ASC NULLS FIRST] LIMIT 3,")
@@ -779,7 +830,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(3)
     checkSortRemoved(df10, false)
     checkLimitRemoved(df10, false)
-    checkPushedInfo(df10, "PushedFilters: [], " +
+    checkPushedInfo(df10,
+      "PushedFilters: []",
       "PushedTopN: " +
       "ORDER BY [CASE WHEN (SALARY > 8000.00) AND (SALARY < 10000.00) THEN SALARY ELSE 0.00 END " +
       "ASC NULLS FIRST, DEPT ASC NULLS FIRST, SALARY ASC NULLS FIRST] LIMIT 3,")
@@ -795,7 +847,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df1)
     checkPushedInfo(df1,
-      "PushedFilters: [], PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1, ")
+      "PushedFilters: []",
+      "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1")
     checkAnswer(df1, Seq(Row("cathy", 9000.00)))
 
     val df2 = spark.read
@@ -806,15 +859,15 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .limit(1)
     checkSortRemoved(df2)
     checkPushedInfo(df2,
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1], " +
-        "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1, ")
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 1]",
+        "PushedTopN: ORDER BY [SALARY ASC NULLS FIRST] LIMIT 1")
     checkAnswer(df2, Seq(Row(2, "david", 10000.00)))
   }
 
   test("scan with filter push-down") {
     val df = spark.table("h2.test.people").filter($"id" > 1)
     checkFiltersRemoved(df)
-    checkPushedInfo(df, "PushedFilters: [ID IS NOT NULL, ID > 1], ")
+    checkPushedInfo(df, "PushedFilters: [ID IS NOT NULL, ID > 1]")
     checkAnswer(df, Row("mary", 2))
 
     val df2 = spark.table("h2.test.employee").filter($"name".isin("amy", "cathy"))
@@ -835,32 +888,34 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
 
     val df5 = spark.table("h2.test.employee").filter($"is_manager".and($"salary" > 10000))
     checkFiltersRemoved(df5)
-    checkPushedInfo(df5, "PushedFilters: [IS_MANAGER IS NOT NULL, SALARY IS NOT NULL, " +
+    checkPushedInfo(df5,
+      "PushedFilters: [IS_MANAGER IS NOT NULL, SALARY IS NOT NULL",
       "IS_MANAGER = true, SALARY > 10000.00]")
     checkAnswer(df5, Seq(Row(6, "jen", 12000, 1200, true)))
 
     val df6 = spark.table("h2.test.employee").filter($"is_manager".or($"salary" > 10000))
     checkFiltersRemoved(df6)
-    checkPushedInfo(df6, "PushedFilters: [(IS_MANAGER = true) OR (SALARY > 10000.00)], ")
+    checkPushedInfo(df6, "PushedFilters: [(IS_MANAGER = true) OR (SALARY > 10000.00)]")
     checkAnswer(df6, Seq(Row(1, "amy", 10000, 1000, true), Row(2, "alex", 12000, 1200, false),
       Row(2, "david", 10000, 1300, true), Row(6, "jen", 12000, 1200, true)))
 
     val df7 = spark.table("h2.test.employee").filter(not($"is_manager") === true)
     checkFiltersRemoved(df7)
-    checkPushedInfo(df7, "PushedFilters: [IS_MANAGER IS NOT NULL, NOT (IS_MANAGER = true)], ")
+    checkPushedInfo(df7, "PushedFilters: [IS_MANAGER IS NOT NULL, NOT (IS_MANAGER = true)]")
     checkAnswer(df7, Seq(Row(1, "cathy", 9000, 1200, false), Row(2, "alex", 12000, 1200, false)))
 
     val df8 = spark.table("h2.test.employee").filter($"is_manager" === true)
     checkFiltersRemoved(df8)
-    checkPushedInfo(df8, "PushedFilters: [IS_MANAGER IS NOT NULL, IS_MANAGER = true], ")
+    checkPushedInfo(df8, "PushedFilters: [IS_MANAGER IS NOT NULL, IS_MANAGER = true]")
     checkAnswer(df8, Seq(Row(1, "amy", 10000, 1000, true),
       Row(2, "david", 10000, 1300, true), Row(6, "jen", 12000, 1200, true)))
 
     val df9 = spark.table("h2.test.employee")
       .filter(when($"dept" > 1, true).when($"is_manager", false).otherwise($"dept" > 3))
     checkFiltersRemoved(df9)
-    checkPushedInfo(df9, "PushedFilters: [CASE WHEN DEPT > 1 THEN TRUE " +
-      "WHEN IS_MANAGER = true THEN FALSE ELSE DEPT > 3 END], ")
+    checkPushedInfo(df9,
+      "PushedFilters: [CASE WHEN DEPT > 1 THEN TRUE",
+      "WHEN IS_MANAGER = true THEN FALSE ELSE DEPT > 3 END]")
     checkAnswer(df9, Seq(Row(2, "alex", 12000, 1200, false),
       Row(2, "david", 10000, 1300, true), Row(6, "jen", 12000, 1200, true)))
 
@@ -868,7 +923,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .select($"NAME".as("myName"), $"ID".as("myID"))
       .filter($"myID" > 1)
     checkFiltersRemoved(df10)
-    checkPushedInfo(df10, "PushedFilters: [ID IS NOT NULL, ID > 1], ")
+    checkPushedInfo(df10, "PushedFilters: [ID IS NOT NULL, ID > 1]")
     checkAnswer(df10, Row("mary", 2))
 
     val df11 = sql(
@@ -1348,9 +1403,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       " GROUP BY DePt")
     checkFiltersRemoved(df)
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [MAX(SALARY), AVG(BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], " +
-      "PushedGroupByExpressions: [DEPT], ")
+    checkPushedInfo(df,
+      "PushedAggregates: [MAX(SALARY), AVG(BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df, Seq(Row(10000, 1100.0), Row(12000, 1250.0), Row(12000, 1200.0)))
   }
 
@@ -1369,9 +1425,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     val df = sql("SELECT MAX(ID), AVG(ID) FROM h2.test.people WHERE id > 0")
     checkFiltersRemoved(df)
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [MAX(ID), AVG(ID)], " +
-      "PushedFilters: [ID IS NOT NULL, ID > 0], " +
-      "PushedGroupByExpressions: [], ")
+    checkPushedInfo(df,
+      "PushedAggregates: [MAX(ID), AVG(ID)]",
+      "PushedFilters: [ID IS NOT NULL, ID > 0]",
+      "PushedGroupByExpressions: []")
     checkAnswer(df, Seq(Row(2, 1.5)))
   }
 
@@ -1416,7 +1473,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     val df = sql("SELECT name FROM h2.test.employee GROUP BY name")
     checkAggregateRemoved(df)
     checkPushedInfo(df,
-      "PushedAggregates: [], PushedFilters: [], PushedGroupByExpressions: [NAME],")
+      "PushedAggregates: []",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [NAME]")
     checkAnswer(df, Seq(Row("alex"), Row("amy"), Row("cathy"), Row("david"), Row("jen")))
 
     val df2 = spark.read
@@ -1429,7 +1488,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .agg(Map.empty[String, String])
     checkAggregateRemoved(df2, false)
     checkPushedInfo(df2,
-      "PushedAggregates: [], PushedFilters: [], PushedGroupByExpressions: [NAME],")
+      "PushedAggregates: []",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [NAME]")
     checkAnswer(df2, Seq(Row("alex"), Row("amy"), Row("cathy"), Row("david"), Row("jen")))
 
     val df3 = sql("SELECT CASE WHEN SALARY > 8000 AND SALARY < 10000 THEN SALARY ELSE 0 END as" +
@@ -1506,8 +1567,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
   test("scan with aggregate push-down: SUM with group by") {
     val df1 = sql("SELECT SUM(SALARY) FROM h2.test.employee GROUP BY DEPT")
     checkAggregateRemoved(df1)
-    checkPushedInfo(df1, "PushedAggregates: [SUM(SALARY)], " +
-      "PushedFilters: [], PushedGroupByExpressions: [DEPT], ")
+    checkPushedInfo(df1,
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df1, Seq(Row(19000), Row(22000), Row(12000)))
 
     val df2 = sql(
@@ -1622,8 +1685,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
   test("scan with aggregate push-down: DISTINCT SUM with group by") {
     val df = sql("SELECT SUM(DISTINCT SALARY) FROM h2.test.employee GROUP BY DEPT")
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [SUM(DISTINCT SALARY)], " +
-      "PushedFilters: [], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(df,
+      "PushedAggregates: [SUM(DISTINCT SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df, Seq(Row(19000), Row(22000), Row(12000)))
   }
 
@@ -1632,8 +1697,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       " GROUP BY DEPT, NAME")
     checkFiltersRemoved(df)
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [MAX(SALARY), MIN(BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT, NAME]")
+    checkPushedInfo(df,
+      "PushedAggregates: [MAX(SALARY), MIN(BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT, NAME]")
     checkAnswer(df, Seq(Row(9000, 1200), Row(12000, 1200), Row(10000, 1300),
       Row(10000, 1000), Row(12000, 1200)))
   }
@@ -1646,8 +1713,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     }
     assert(filters1.isEmpty)
     checkAggregateRemoved(df1)
-    checkPushedInfo(df1, "PushedAggregates: [MAX(SALARY)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT, NAME]")
+    checkPushedInfo(df1,
+      "PushedAggregates: [MAX(SALARY)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT, NAME]")
     checkAnswer(df1, Seq(Row("1#amy", 10000), Row("1#cathy", 9000), Row("2#alex", 12000),
       Row("2#david", 10000), Row("6#jen", 12000)))
 
@@ -1658,8 +1727,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     }
     assert(filters2.isEmpty)
     checkAggregateRemoved(df2)
-    checkPushedInfo(df2, "PushedAggregates: [MAX(SALARY), MIN(BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT, NAME]")
+    checkPushedInfo(df2,
+      "PushedAggregates: [MAX(SALARY), MIN(BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT, NAME]")
     checkAnswer(df2, Seq(Row("1#amy", 11000), Row("1#cathy", 10200), Row("2#alex", 13200),
       Row("2#david", 11300), Row("6#jen", 13200)))
 
@@ -1667,7 +1738,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       " FROM h2.test.employee WHERE dept > 0 GROUP BY concat_ws('#', DEPT, NAME)")
     checkFiltersRemoved(df3)
     checkAggregateRemoved(df3, false)
-    checkPushedInfo(df3, "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], ")
+    checkPushedInfo(df3, "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]")
     checkAnswer(df3, Seq(Row("1#amy", 11000), Row("1#cathy", 10200), Row("2#alex", 13200),
       Row("2#david", 11300), Row("6#jen", 13200)))
   }
@@ -1678,8 +1749,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     // filter over aggregate not push down
     checkFiltersRemoved(df, false)
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [MAX(SALARY), MIN(BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(df,
+      "PushedAggregates: [MAX(SALARY), MIN(BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df, Seq(Row(12000, 1200), Row(12000, 1200)))
   }
 
@@ -1688,8 +1761,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .groupBy($"DEPT")
       .min("SALARY").as("total")
     checkAggregateRemoved(df)
-    checkPushedInfo(df, "PushedAggregates: [MIN(SALARY)], " +
-      "PushedFilters: [], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(df,
+      "PushedAggregates: [MIN(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df, Seq(Row(1, 9000), Row(2, 10000), Row(6, 12000)))
   }
 
@@ -1703,8 +1778,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .orderBy($"total")
     checkFiltersRemoved(query, false)// filter over aggregate not pushed down
     checkAggregateRemoved(query)
-    checkPushedInfo(query, "PushedAggregates: [SUM(SALARY)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(query,
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(query, Seq(Row(6, 12000), Row(1, 19000), Row(2, 22000)))
   }
 
@@ -1713,7 +1790,7 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
     val decrease = udf { (x: Double, y: Double) => x - y }
     val query = df.select(decrease(sum($"SALARY"), sum($"BONUS")).as("value"))
     checkAggregateRemoved(query)
-    checkPushedInfo(query, "PushedAggregates: [SUM(SALARY), SUM(BONUS)], ")
+    checkPushedInfo(query, "PushedAggregates: [SUM(SALARY), SUM(BONUS)]")
     checkAnswer(query, Seq(Row(47100.0)))
   }
 
@@ -1779,8 +1856,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       " FROM h2.test.employee WHERE dept > 0 GROUP BY DePt")
     checkFiltersRemoved(df1)
     checkAggregateRemoved(df1)
-    checkPushedInfo(df1, "PushedAggregates: [COVAR_POP(BONUS, BONUS), COVAR_SAMP(BONUS, BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(df1,
+      "PushedAggregates: [COVAR_POP(BONUS, BONUS), COVAR_SAMP(BONUS, BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df1, Seq(Row(10000d, 20000d), Row(2500d, 5000d), Row(0d, null)))
 
     val df2 = sql("SELECT COVAR_POP(DISTINCT bonus, bonus), COVAR_SAMP(DISTINCT bonus, bonus)" +
@@ -1796,8 +1875,10 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       " GROUP BY DePt")
     checkFiltersRemoved(df1)
     checkAggregateRemoved(df1)
-    checkPushedInfo(df1, "PushedAggregates: [CORR(BONUS, BONUS)], " +
-      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], PushedGroupByExpressions: [DEPT]")
+    checkPushedInfo(df1,
+      "PushedAggregates: [CORR(BONUS, BONUS)]",
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df1, Seq(Row(1d), Row(1d), Row(null)))
 
     val df2 = sql("SELECT CORR(DISTINCT bonus, bonus) FROM h2.test.employee WHERE dept > 0" +
@@ -1839,7 +1920,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
         |FROM h2.test.employee WHERE dept > 0 GROUP BY DePt""".stripMargin)
     checkFiltersRemoved(df2)
     checkAggregateRemoved(df2, false)
-    checkPushedInfo(df2, "PushedFilters: [DEPT IS NOT NULL, DEPT > 0], ReadSchema:")
+    checkPushedInfo(df2,
+      "PushedFilters: [DEPT IS NOT NULL, DEPT > 0]",
+      "ReadSchema:")
     checkAnswer(df2,
       Seq(Row(0.0, 1.0, 1.0, 20000.0), Row(0.0, 1.0, 1.0, 5000.0), Row(null, null, null, 0.0)))
 
@@ -1945,8 +2028,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
         "MAX(CASE WHEN (SALARY <= 8000.00) OR (SALARY >= 10000.00) THEN 0.00 ELSE SALARY END), " +
         "MIN(CASE WHEN (SALARY <= 8000.00) AND (SALARY IS NOT NULL) THEN SALARY ELSE 0.00 END), " +
         "SUM(CASE WHEN SALARY > 10000.00 THEN 2 WHEN SALARY > 8000.00 THEN 1 END), " +
-        "AVG(CASE WHEN (SALARY <= 8000.00) AND (SALARY IS NULL) THEN SALARY ELSE 0.00 END)], " +
-        "PushedFilters: [], " +
+        "AVG(CASE WHEN (SALARY <= 8000.00) AND (SALARY IS NULL) THEN SALARY ELSE 0.00 END)]",
+        "PushedFilters: []",
         "PushedGroupByExpressions: [DEPT],")
     checkAnswer(df, Seq(Row(1, 1, 1, 1, 1, 0d, 12000d, 0d, 12000d, 0d, 0d, 2, 0d),
       Row(2, 2, 2, 2, 2, 10000d, 12000d, 10000d, 12000d, 0d, 0d, 3, 0d),
@@ -2025,12 +2108,14 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
 
     val df2 = sql("SELECT `dept.id`, COUNT(`dept id`) FROM h2.test.dept GROUP BY `dept.id`")
     checkPushedInfo(df2,
-      "PushedGroupByExpressions: [`dept.id`]", "PushedAggregates: [COUNT(`dept id`)]")
+      "PushedGroupByExpressions: [`dept.id`]",
+      "PushedAggregates: [COUNT(`dept id`)]")
     checkAnswer(df2, Seq(Row(1, 2)))
 
     val df3 = sql("SELECT `dept id`, COUNT(`dept.id`) FROM h2.test.dept GROUP BY `dept id`")
     checkPushedInfo(df3,
-      "PushedGroupByExpressions: [`dept id`]", "PushedAggregates: [COUNT(`dept.id`)]")
+      "PushedGroupByExpressions: [`dept id`]",
+      "PushedAggregates: [COUNT(`dept.id`)]")
     checkAnswer(df3, Seq(Row(1, 1), Row(2, 1)))
   }
 
@@ -2120,7 +2205,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .filter($"total" > 1000)
     checkAggregateRemoved(df)
     checkPushedInfo(df,
-      "PushedAggregates: [SUM(SALARY)], PushedFilters: [], PushedGroupByExpressions: [DEPT]")
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df, Seq(Row(1, 19000.00), Row(2, 22000.00), Row(6, 12000.00)))
 
     val df2 = spark.table("h2.test.employee")
@@ -2130,7 +2217,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .filter($"total" > 1000)
     checkAggregateRemoved(df2)
     checkPushedInfo(df2,
-      "PushedAggregates: [SUM(SALARY)], PushedFilters: [], PushedGroupByExpressions: [DEPT]")
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [DEPT]")
     checkAnswer(df2, Seq(Row(1, 19000.00), Row(2, 22000.00), Row(6, 12000.00)))
   }
 
@@ -2147,7 +2236,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .filter($"total" > 1000)
     checkAggregateRemoved(df, false)
     checkPushedInfo(df,
-      "PushedAggregates: [SUM(SALARY)], PushedFilters: [], PushedGroupByExpressions: [NAME]")
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [NAME]")
     checkAnswer(df, Seq(Row("alex", 12000.00), Row("amy", 10000.00),
       Row("cathy", 9000.00), Row("david", 10000.00), Row("jen", 12000.00)))
 
@@ -2163,7 +2254,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       .filter($"total" > 1000)
     checkAggregateRemoved(df2, false)
     checkPushedInfo(df2,
-      "PushedAggregates: [SUM(SALARY)], PushedFilters: [], PushedGroupByExpressions: [NAME]")
+      "PushedAggregates: [SUM(SALARY)]",
+      "PushedFilters: []",
+      "PushedGroupByExpressions: [NAME]")
     checkAnswer(df2, Seq(Row("alex", 12000.00), Row("amy", 10000.00),
       Row("cathy", 9000.00), Row("david", 10000.00), Row("jen", 12000.00)))
   }
@@ -2229,13 +2322,17 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
       val df1 = sql("SELECT h2.my_avg(id) FROM h2.test.people")
       checkAggregateRemoved(df1)
       checkPushedInfo(df1,
-        "PushedAggregates: [iavg(ID)], PushedFilters: [], PushedGroupByExpressions: []")
+        "PushedAggregates: [iavg(ID)]",
+        "PushedFilters: []",
+        "PushedGroupByExpressions: []")
       checkAnswer(df1, Seq(Row(1)))
 
       val df2 = sql("SELECT name, h2.my_avg(id) FROM h2.test.people group by name")
       checkAggregateRemoved(df2)
       checkPushedInfo(df2,
-        "PushedAggregates: [iavg(ID)], PushedFilters: [], PushedGroupByExpressions: [NAME]")
+        "PushedAggregates: [iavg(ID)]",
+        "PushedFilters: []",
+        "PushedGroupByExpressions: [NAME]")
       checkAnswer(df2, Seq(Row("fred", 1), Row("mary", 2)))
       withSQLConf(SQLConf.ANSI_ENABLED.key -> "true") {
         val df3 = sql(
@@ -2246,8 +2343,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
           """.stripMargin)
         checkAggregateRemoved(df3)
         checkPushedInfo(df3,
-          "PushedAggregates: [iavg(CASE WHEN NAME = 'fred' THEN ID + 1 ELSE ID END)]," +
-            " PushedFilters: [], PushedGroupByExpressions: []")
+          "PushedAggregates: [iavg(CASE WHEN NAME = 'fred' THEN ID + 1 ELSE ID END)]",
+          "PushedFilters: []",
+          "PushedGroupByExpressions: []")
         checkAnswer(df3, Seq(Row(2)))
 
         val df4 = sql(
@@ -2260,8 +2358,9 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
           """.stripMargin)
         checkAggregateRemoved(df4)
         checkPushedInfo(df4,
-          "PushedAggregates: [iavg(CASE WHEN NAME = 'fred' THEN ID + 1 ELSE ID END)]," +
-            " PushedFilters: [], PushedGroupByExpressions: [NAME]")
+          "PushedAggregates: [iavg(CASE WHEN NAME = 'fred' THEN ID + 1 ELSE ID END)]",
+          "PushedFilters: []",
+          "PushedGroupByExpressions: [NAME]")
         checkAnswer(df4, Seq(Row("fred", 2), Row("mary", 2)))
       }
     } finally {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org