You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by we...@apache.org on 2023/03/16 01:04:30 UTC

[spark] branch master updated: [SPARK-42772][SQL] Change the default value of JDBC options about push down to true

This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new f6d036cef7c [SPARK-42772][SQL] Change the default value of JDBC options about push down to true
f6d036cef7c is described below

commit f6d036cef7c40e9b67e623358415d664ecb14bfb
Author: Jiaan Geng <be...@163.com>
AuthorDate: Thu Mar 16 09:04:10 2023 +0800

    [SPARK-42772][SQL] Change the default value of JDBC options about push down to true
    
    ### What changes were proposed in this pull request?
    Currently, DS V2 pushdown could let JDBC dialect decide to push down `OFFSET`, `LIMIT` and table sample. Because some databases doesn't support one of them, so we should change the default value of these pushdown API false. If one database support the syntax, the JDBC dialect should overwrite the value.
    
    We also have a lot of JDBC options about push down, such as `pushDownOffset`. Users could change the option value to allow or disallow push down.
    
    ### Why are the changes needed?
    This PR change all JDBC v2 pushdown options to true and change all the dialect's pushdown API to false.
    
    ### Does this PR introduce _any_ user-facing change?
    'Yes'.
    The default behavior of pushdown framework is not push down SQL syntax to JDBC data source.
    Users could control the pushdown enable or disable with JDBC options about push down, such as `pushDownOffset`.
    
    ### How was this patch tested?
    Test cases updated.
    
    Closes #40396 from beliefer/SPARK-42415_new.
    
    Authored-by: Jiaan Geng <be...@163.com>
    Signed-off-by: Wenchen Fan <we...@databricks.com>
---
 .../org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala  | 27 +++++++++++-----------
 docs/sql-data-sources-jdbc.md                      | 16 ++++++-------
 docs/sql-migration-guide.md                        |  4 ++++
 .../execution/datasources/jdbc/JDBCOptions.scala   |  8 +++----
 .../org/apache/spark/sql/jdbc/DB2Dialect.scala     |  4 ++++
 .../org/apache/spark/sql/jdbc/H2Dialect.scala      |  4 ++++
 .../org/apache/spark/sql/jdbc/JdbcDialects.scala   |  8 +++++--
 .../apache/spark/sql/jdbc/MsSqlServerDialect.scala |  2 +-
 .../org/apache/spark/sql/jdbc/MySQLDialect.scala   |  4 ++++
 .../org/apache/spark/sql/jdbc/OracleDialect.scala  |  4 ++++
 .../apache/spark/sql/jdbc/PostgresDialect.scala    | 22 ++++++++++--------
 11 files changed, 65 insertions(+), 38 deletions(-)

diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
index 97ee3385090..14f1088eb0d 100644
--- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
+++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala
@@ -314,14 +314,13 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
     }
   }
 
-  private def limitPushed(df: DataFrame, limit: Int): Boolean = {
+  private def checkLimitPushed(df: DataFrame, limit: Option[Int]): Unit = {
     df.queryExecution.optimizedPlan.collect {
       case relation: DataSourceV2ScanRelation => relation.scan match {
         case v1: V1ScanWrapper =>
-          return v1.pushedDownOperators.limit == Some(limit)
+          assert(v1.pushedDownOperators.limit == limit)
       }
     }
-    false
   }
 
   private def checkColumnPruned(df: DataFrame, col: String): Unit = {
@@ -354,7 +353,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
         val df3 = sql(s"SELECT col1 FROM $catalogName.new_table TABLESAMPLE (BUCKET 6 OUT OF 10)" +
           " LIMIT 2")
         checkSamplePushed(df3)
-        assert(limitPushed(df3, 2))
+        checkLimitPushed(df3, Some(2))
         checkColumnPruned(df3, "col1")
         assert(df3.collect().length <= 2)
 
@@ -362,7 +361,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
         val df4 = sql(s"SELECT col1 FROM $catalogName.new_table" +
           " TABLESAMPLE (50 PERCENT) REPEATABLE (12345) LIMIT 2")
         checkSamplePushed(df4)
-        assert(limitPushed(df4, 2))
+        checkLimitPushed(df4, Some(2))
         checkColumnPruned(df4, "col1")
         assert(df4.collect().length <= 2)
 
@@ -371,7 +370,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
           " TABLESAMPLE (BUCKET 6 OUT OF 10) WHERE col1 > 0 LIMIT 2")
         checkSamplePushed(df5)
         checkFilterPushed(df5)
-        assert(limitPushed(df5, 2))
+        checkLimitPushed(df5, Some(2))
         assert(df5.collect().length <= 2)
 
         // sample + filter + limit + column pruning
@@ -381,7 +380,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
           " TABLESAMPLE (BUCKET 6 OUT OF 10) WHERE col1 > 0 LIMIT 2")
         checkSamplePushed(df6)
         checkFilterPushed(df6, false)
-        assert(!limitPushed(df6, 2))
+        checkLimitPushed(df6, None)
         checkColumnPruned(df6, "col1")
         assert(df6.collect().length <= 2)
 
@@ -390,7 +389,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
         // only limit is pushed down because in this test sample is after limit
         val df7 = spark.read.table(s"$catalogName.new_table").limit(2).sample(0.5)
         checkSamplePushed(df7, false)
-        assert(limitPushed(df7, 2))
+        checkLimitPushed(df7, Some(2))
 
         // sample + filter
         // Push down order is sample -> filter -> limit
@@ -422,7 +421,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
   test("simple scan with LIMIT") {
     val df = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
       s"${caseConvert("employee")} WHERE dept > 0 LIMIT 1")
-    assert(limitPushed(df, 1))
+    checkLimitPushed(df, Some(1))
     val rows = df.collect()
     assert(rows.length === 1)
     assert(rows(0).getString(0) === "amy")
@@ -434,7 +433,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
     Seq(NullOrdering.values()).flatten.foreach { nullOrdering =>
       val df1 = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
         s"${caseConvert("employee")} WHERE dept > 0 ORDER BY salary $nullOrdering LIMIT 1")
-      assert(limitPushed(df1, 1))
+      checkLimitPushed(df1, Some(1))
       checkSortRemoved(df1)
       val rows1 = df1.collect()
       assert(rows1.length === 1)
@@ -444,7 +443,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
 
       val df2 = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
         s"${caseConvert("employee")} WHERE dept > 0 ORDER BY bonus DESC $nullOrdering LIMIT 1")
-      assert(limitPushed(df2, 1))
+      checkLimitPushed(df2, Some(1))
       checkSortRemoved(df2)
       val rows2 = df2.collect()
       assert(rows2.length === 1)
@@ -471,7 +470,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
     test("simple scan with LIMIT and OFFSET") {
       val df = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
         s"${caseConvert("employee")} WHERE dept > 0 LIMIT 1 OFFSET 2")
-      assert(limitPushed(df, 3))
+      checkLimitPushed(df, Some(3))
       checkOffsetPushed(df, Some(2))
       val rows = df.collect()
       assert(rows.length === 1)
@@ -487,7 +486,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
         val df1 = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
           s"${caseConvert("employee")}" +
           s" WHERE dept > 0 ORDER BY salary $nullOrdering, bonus LIMIT 1 OFFSET 2")
-        assert(limitPushed(df1, 3))
+        checkLimitPushed(df1, Some(3))
         checkOffsetPushed(df1, Some(2))
         checkSortRemoved(df1)
         val rows1 = df1.collect()
@@ -499,7 +498,7 @@ private[v2] trait V2JDBCTest extends SharedSparkSession with DockerIntegrationFu
         val df2 = sql(s"SELECT name, salary, bonus FROM $catalogAndNamespace." +
           s"${caseConvert("employee")}" +
           s" WHERE dept > 0 ORDER BY salary DESC $nullOrdering, bonus LIMIT 1 OFFSET 2")
-        assert(limitPushed(df2, 3))
+        checkLimitPushed(df2, Some(3))
         checkOffsetPushed(df2, Some(2))
         checkSortRemoved(df2)
         val rows2 = df2.collect()
diff --git a/docs/sql-data-sources-jdbc.md b/docs/sql-data-sources-jdbc.md
index 96ce4a966e4..e106da722a9 100644
--- a/docs/sql-data-sources-jdbc.md
+++ b/docs/sql-data-sources-jdbc.md
@@ -281,36 +281,36 @@ logging into the data sources.
 
   <tr>
     <td><code>pushDownAggregate</code></td>
-    <td><code>false</code></td>
+    <td><code>true</code></td>
     <td>
-     The option to enable or disable aggregate push-down in V2 JDBC data source. The default value is false, in which case Spark will not push down aggregates to the JDBC data source. Otherwise, if sets to true, aggregates will be pushed down to the JDBC data source. Aggregate push-down is usually turned off when the aggregate is performed faster by Spark than by the JDBC data source. Please note that aggregates can be pushed down if and only if all the aggregate functions and the relate [...]
+     The option to enable or disable aggregate push-down in V2 JDBC data source. The default value is true, in which case Spark will push down aggregates to the JDBC data source. Otherwise, if sets to false, aggregates will not be pushed down to the JDBC data source. Aggregate push-down is usually turned off when the aggregate is performed faster by Spark than by the JDBC data source. Please note that aggregates can be pushed down if and only if all the aggregate functions and the relate [...]
     </td>
     <td>read</td>
   </tr>
 
   <tr>
     <td><code>pushDownLimit</code></td>
-    <td><code>false</code></td>
+    <td><code>true</code></td>
     <td>
-     The option to enable or disable LIMIT push-down into V2 JDBC data source. The LIMIT push-down also includes LIMIT + SORT , a.k.a. the Top N operator. The default value is false, in which case Spark does not push down LIMIT or LIMIT with SORT to the JDBC data source. Otherwise, if sets to true, LIMIT or LIMIT with SORT is pushed down to the JDBC data source. If <code>numPartitions</code> is greater than 1, Spark still applies LIMIT or LIMIT with SORT on the result from data source ev [...]
+     The option to enable or disable LIMIT push-down into V2 JDBC data source. The LIMIT push-down also includes LIMIT + SORT , a.k.a. the Top N operator. The default value is true, in which case Spark push down LIMIT or LIMIT with SORT to the JDBC data source. Otherwise, if sets to false, LIMIT or LIMIT with SORT is not pushed down to the JDBC data source. If <code>numPartitions</code> is greater than 1, Spark still applies LIMIT or LIMIT with SORT on the result from data source even if [...]
     </td>
     <td>read</td>
   </tr>
 
   <tr>
     <td><code>pushDownOffset</code></td>
-    <td><code>false</code></td>
+    <td><code>true</code></td>
     <td>
-     The option to enable or disable OFFSET push-down into V2 JDBC data source. The default value is false, in which case Spark will not push down OFFSET to the JDBC data source. Otherwise, if sets to true, Spark will try to push down OFFSET to the JDBC data source. If <code>pushDownOffset</code> is true and <code>numPartitions</code> is equal to 1, OFFSET will be pushed down to the JDBC data source. Otherwise, OFFSET will not be pushed down and Spark still applies OFFSET on the result f [...]
+     The option to enable or disable OFFSET push-down into V2 JDBC data source. The default value is true, in which case Spark will push down OFFSET to the JDBC data source. Otherwise, if sets to false, Spark will not try to push down OFFSET to the JDBC data source. If <code>pushDownOffset</code> is true and <code>numPartitions</code> is equal to 1, OFFSET will be pushed down to the JDBC data source. Otherwise, OFFSET will not be pushed down and Spark still applies OFFSET on the result f [...]
     </td>
     <td>read</td>
   </tr>
 
   <tr>
     <td><code>pushDownTableSample</code></td>
-    <td><code>false</code></td>
+    <td><code>true</code></td>
     <td>
-     The option to enable or disable TABLESAMPLE push-down into V2 JDBC data source. The default value is false, in which case Spark does not push down TABLESAMPLE to the JDBC data source. Otherwise, if value sets to true, TABLESAMPLE is pushed down to the JDBC data source.
+     The option to enable or disable TABLESAMPLE push-down into V2 JDBC data source. The default value is true, in which case Spark push down TABLESAMPLE to the JDBC data source. Otherwise, if value sets to false, TABLESAMPLE is not pushed down to the JDBC data source.
     </td>
     <td>read</td>
   </tr>
diff --git a/docs/sql-migration-guide.md b/docs/sql-migration-guide.md
index e570981e25c..7b4940f7577 100644
--- a/docs/sql-migration-guide.md
+++ b/docs/sql-migration-guide.md
@@ -22,6 +22,10 @@ license: |
 * Table of contents
 {:toc}
 
+## Upgrading from Spark SQL 3.4 to 3.5
+
+- Since Spark 3.5, the JDBC options related to DS V2 pushdown are `true` by default. These options include: `pushDownAggregate`, `pushDownLimit`, `pushDownOffset` and `pushDownTableSample`. To restore the legacy behavior, please set them to `false`. e.g. set `spark.sql.catalog.your_catalog_name.pushDownAggregate` to `false`.
+
 ## Upgrading from Spark SQL 3.3 to 3.4
   
   - Since Spark 3.4, INSERT INTO commands with explicit column lists comprising fewer columns than the target table will automatically add the corresponding default values for the remaining columns (or NULL for any column lacking an explicitly-assigned default value). In Spark 3.3 or earlier, these commands would have failed returning errors reporting that the number of provided columns does not match the number of columns in the target table. Note that disabling `spark.sql.defaultColumn [...]
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCOptions.scala
index 148cd9e9335..268a65b81ff 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCOptions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCOptions.scala
@@ -192,19 +192,19 @@ class JDBCOptions(
 
   // An option to allow/disallow pushing down aggregate into JDBC data source
   // This only applies to Data Source V2 JDBC
-  val pushDownAggregate = parameters.getOrElse(JDBC_PUSHDOWN_AGGREGATE, "false").toBoolean
+  val pushDownAggregate = parameters.getOrElse(JDBC_PUSHDOWN_AGGREGATE, "true").toBoolean
 
   // An option to allow/disallow pushing down LIMIT into V2 JDBC data source
   // This only applies to Data Source V2 JDBC
-  val pushDownLimit = parameters.getOrElse(JDBC_PUSHDOWN_LIMIT, "false").toBoolean
+  val pushDownLimit = parameters.getOrElse(JDBC_PUSHDOWN_LIMIT, "true").toBoolean
 
   // An option to allow/disallow pushing down OFFSET into V2 JDBC data source
   // This only applies to Data Source V2 JDBC
-  val pushDownOffset = parameters.getOrElse(JDBC_PUSHDOWN_OFFSET, "false").toBoolean
+  val pushDownOffset = parameters.getOrElse(JDBC_PUSHDOWN_OFFSET, "true").toBoolean
 
   // An option to allow/disallow pushing down TABLESAMPLE into JDBC data source
   // This only applies to Data Source V2 JDBC
-  val pushDownTableSample = parameters.getOrElse(JDBC_PUSHDOWN_TABLESAMPLE, "false").toBoolean
+  val pushDownTableSample = parameters.getOrElse(JDBC_PUSHDOWN_TABLESAMPLE, "true").toBoolean
 
   // The local path of user's keytab file, which is assumed to be pre-uploaded to all nodes either
   // by --files option of spark-submit or manually
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
index a6ae5a8abf4..aa7aa6859c9 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/DB2Dialect.scala
@@ -185,4 +185,8 @@ private object DB2Dialect extends JdbcDialect {
 
   override def getJdbcSQLQueryBuilder(options: JDBCOptions): JdbcSQLQueryBuilder =
     new DB2SQLQueryBuilder(this, options)
+
+  override def supportsLimit: Boolean = true
+
+  override def supportsOffset: Boolean = true
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
index 5ede793f6d1..c246b50f4e1 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/H2Dialect.scala
@@ -276,4 +276,8 @@ private[sql] object H2Dialect extends JdbcDialect {
       }
     }
   }
+
+  override def supportsLimit: Boolean = true
+
+  override def supportsOffset: Boolean = true
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
index bc50ef1ace7..753f14b8d93 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JdbcDialects.scala
@@ -583,12 +583,16 @@ abstract class JdbcDialect extends Serializable with Logging {
    * {@link OracleDialect.OracleSQLQueryBuilder} and
    * {@link MsSqlServerDialect.MsSqlServerSQLQueryBuilder}.
    */
-  def supportsLimit: Boolean = true
+  def supportsLimit: Boolean = false
 
   /**
    * Returns ture if dialect supports OFFSET clause.
+   *
+   * Note: Some build-in dialect supports OFFSET clause with some trick, please see:
+   * {@link OracleDialect.OracleSQLQueryBuilder} and
+   * {@link MySQLDialect.MySQLSQLQueryBuilder}.
    */
-  def supportsOffset: Boolean = true
+  def supportsOffset: Boolean = false
 
   def supportsTableSample: Boolean = false
 
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
index fc0d2d2470a..e986ac63548 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MsSqlServerDialect.scala
@@ -212,5 +212,5 @@ private object MsSqlServerDialect extends JdbcDialect {
   override def getJdbcSQLQueryBuilder(options: JDBCOptions): JdbcSQLQueryBuilder =
     new MsSqlServerSQLQueryBuilder(this, options)
 
-  override def supportsOffset: Boolean = false
+  override def supportsLimit: Boolean = true
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
index 7db2237c474..e688af561c4 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/MySQLDialect.scala
@@ -320,4 +320,8 @@ private case object MySQLDialect extends JdbcDialect with SQLConfHelper {
 
   override def getJdbcSQLQueryBuilder(options: JDBCOptions): JdbcSQLQueryBuilder =
     new MySQLSQLQueryBuilder(this, options)
+
+  override def supportsLimit: Boolean = true
+
+  override def supportsOffset: Boolean = true
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
index 55b4f1eb004..95774d38e50 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/OracleDialect.scala
@@ -220,4 +220,8 @@ private case object OracleDialect extends JdbcDialect {
 
   override def getJdbcSQLQueryBuilder(options: JDBCOptions): JdbcSQLQueryBuilder =
     new OracleSQLQueryBuilder(this, options)
+
+  override def supportsLimit: Boolean = true
+
+  override def supportsOffset: Boolean = true
 }
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
index c2ca45d9143..011375f9371 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/PostgresDialect.scala
@@ -171,15 +171,6 @@ private object PostgresDialect extends JdbcDialect with SQLConfHelper {
     s"ALTER TABLE $tableName ALTER COLUMN ${quoteIdentifier(columnName)} $nullable"
   }
 
-  override def supportsTableSample: Boolean = true
-
-  override def getTableSample(sample: TableSampleInfo): String = {
-    // hard-coded to BERNOULLI for now because Spark doesn't have a way to specify sample
-    // method name
-    "TABLESAMPLE BERNOULLI" +
-      s" (${(sample.upperBound - sample.lowerBound) * 100}) REPEATABLE (${sample.seed})"
-  }
-
   // CREATE INDEX syntax
   // https://www.postgresql.org/docs/14/sql-createindex.html
   override def createIndex(
@@ -243,4 +234,17 @@ private object PostgresDialect extends JdbcDialect with SQLConfHelper {
       case _ => super.classifyException(message, e)
     }
   }
+
+  override def supportsLimit: Boolean = true
+
+  override def supportsOffset: Boolean = true
+
+  override def supportsTableSample: Boolean = true
+
+  override def getTableSample(sample: TableSampleInfo): String = {
+    // hard-coded to BERNOULLI for now because Spark doesn't have a way to specify sample
+    // method name
+    "TABLESAMPLE BERNOULLI" +
+      s" (${(sample.upperBound - sample.lowerBound) * 100}) REPEATABLE (${sample.seed})"
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org