You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by do...@apache.org on 2020/03/20 04:57:02 UTC

[spark] branch branch-3.0 updated: [SPARK-31181][SQL][TESTS] Remove the default value assumption on CREATE TABLE test cases

This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new c42f9f6  [SPARK-31181][SQL][TESTS] Remove the default value assumption on CREATE TABLE test cases
c42f9f6 is described below

commit c42f9f61f40d4d796413ab2bfb58fbeff5ceb68b
Author: Dongjoon Hyun <do...@apache.org>
AuthorDate: Fri Mar 20 12:28:57 2020 +0800

    [SPARK-31181][SQL][TESTS] Remove the default value assumption on CREATE TABLE test cases
    
    A few `CREATE TABLE` test cases have some assumption on the default value of `LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED`. This PR (SPARK-31181) makes the test cases more explicit from test-case side.
    
    The configuration change was tested via https://github.com/apache/spark/pull/27894 during discussing SPARK-31136. This PR has only the test case part from that PR.
    
    This makes our test case more robust in terms of the default value of `LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED`. Even in the case where we switch the conf value, that will be one-liner with no test case changes.
    
    No.
    
    Pass the Jenkins with the existing tests.
    
    Closes #27946 from dongjoon-hyun/SPARK-EXPLICIT-TEST.
    
    Authored-by: Dongjoon Hyun <do...@apache.org>
    Signed-off-by: Wenchen Fan <we...@databricks.com>
    (cherry picked from commit f1cc86792f825f76c2689660764d62d7be0d1989)
    Signed-off-by: Dongjoon Hyun <do...@apache.org>
---
 .../spark/sql/catalyst/parser/DDLParserSuite.scala | 27 ++++++++++++----------
 .../sql-tests/inputs/describe-table-column.sql     |  2 +-
 .../sql-tests/inputs/postgreSQL/create_view.sql    |  2 +-
 .../results/describe-table-column.sql.out          |  2 +-
 .../results/postgreSQL/create_view.sql.out         |  2 +-
 .../spark/sql/connector/DataSourceV2SQLSuite.scala |  2 ++
 .../sql/execution/command/DDLParserSuite.scala     |  3 ++-
 .../apache/spark/sql/hive/StatisticsSuite.scala    |  4 +++-
 8 files changed, 26 insertions(+), 18 deletions(-)

diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
index e3570899..35a54c8 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
@@ -26,6 +26,7 @@ import org.apache.spark.sql.catalyst.expressions.{EqualTo, Literal}
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.connector.catalog.TableChange.ColumnPosition.{after, first}
 import org.apache.spark.sql.connector.expressions.{ApplyTransform, BucketTransform, DaysTransform, FieldReference, HoursTransform, IdentityTransform, LiteralValue, MonthsTransform, Transform, YearsTransform}
+import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType, TimestampType}
 import org.apache.spark.unsafe.types.UTF8String
 
@@ -2117,18 +2118,20 @@ class DDLParserSuite extends AnalysisTest {
   }
 
   test("create table - without using") {
-    val sql = "CREATE TABLE 1m.2g(a INT)"
-    val expectedTableSpec = TableSpec(
-      Seq("1m", "2g"),
-      Some(new StructType().add("a", IntegerType)),
-      Seq.empty[Transform],
-      None,
-      Map.empty[String, String],
-      None,
-      Map.empty[String, String],
-      None,
-      None)
+    withSQLConf(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key -> "false") {
+      val sql = "CREATE TABLE 1m.2g(a INT)"
+      val expectedTableSpec = TableSpec(
+        Seq("1m", "2g"),
+        Some(new StructType().add("a", IntegerType)),
+        Seq.empty[Transform],
+        None,
+        Map.empty[String, String],
+        None,
+        Map.empty[String, String],
+        None,
+        None)
 
-    testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
+      testCreateOrReplaceDdl(sql, expectedTableSpec, expectedIfNotExists = false)
+    }
   }
 }
diff --git a/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql b/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql
index 821cb47..d55e398 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/describe-table-column.sql
@@ -52,7 +52,7 @@ DROP TABLE desc_complex_col_table;
 
 --Test case insensitive
 
-CREATE TABLE customer(CName STRING);
+CREATE TABLE customer(CName STRING) USING PARQUET;
 
 INSERT INTO customer VALUES('Maria');
 
diff --git a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql
index 39e7084..21ffd85 100644
--- a/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql
+++ b/sql/core/src/test/resources/sql-tests/inputs/postgreSQL/create_view.sql
@@ -41,7 +41,7 @@ DROP TABLE emp;
 -- These views are left around mainly to exercise special cases in pg_dump.
 
 -- [SPARK-19842] Informational Referential Integrity Constraints Support in Spark
-CREATE TABLE view_base_table (key int /* PRIMARY KEY */, data varchar(20));
+CREATE TABLE view_base_table (key int /* PRIMARY KEY */, data varchar(20)) USING PARQUET;
 --
 CREATE VIEW key_dependent_view AS
    SELECT * FROM view_base_table GROUP BY key;
diff --git a/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out b/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out
index ae9240e..c6d3d45 100644
--- a/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/describe-table-column.sql.out
@@ -267,7 +267,7 @@ struct<>
 
 
 -- !query
-CREATE TABLE customer(CName STRING)
+CREATE TABLE customer(CName STRING) USING PARQUET
 -- !query schema
 struct<>
 -- !query output
diff --git a/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out b/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out
index 1f2bd57..ae1cb2f 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/create_view.sql.out
@@ -42,7 +42,7 @@ struct<>
 
 
 -- !query
-CREATE TABLE view_base_table (key int /* PRIMARY KEY */, data varchar(20))
+CREATE TABLE view_base_table (key int /* PRIMARY KEY */, data varchar(20)) USING PARQUET
 -- !query schema
 struct<>
 -- !query output
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
index 07e0959..ffaff66 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
@@ -257,6 +257,7 @@ class DataSourceV2SQLSuite
   }
 
   test("CreateTable: without USING clause") {
+    spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key, "false")
     // unset this config to use the default v2 session catalog.
     spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
     val testCatalog = catalog("testcat").asTableCatalog
@@ -681,6 +682,7 @@ class DataSourceV2SQLSuite
   }
 
   test("CreateTableAsSelect: without USING clause") {
+    spark.conf.set(SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED.key, "false")
     // unset this config to use the default v2 session catalog.
     spark.conf.unset(V2_SESSION_CATALOG_IMPLEMENTATION.key)
     val testCatalog = catalog("testcat").asTableCatalog
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
index bacd64e..28e5082 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
@@ -40,7 +40,8 @@ import org.apache.spark.sql.test.SharedSparkSession
 import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
 
 class DDLParserSuite extends AnalysisTest with SharedSparkSession {
-  private lazy val parser = new SparkSqlParser(new SQLConf)
+  private lazy val parser = new SparkSqlParser(new SQLConf().copy(
+    SQLConf.LEGACY_CREATE_HIVE_TABLE_BY_DEFAULT_ENABLED -> false))
 
   private def assertUnsupported(sql: String, containsThesePhrases: Seq[String] = Seq()): Unit = {
     val e = intercept[ParseException] {
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
index 488175a..c1eab63 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/StatisticsSuite.scala
@@ -1520,10 +1520,12 @@ class StatisticsSuite extends StatisticsCollectionTestBase with TestHiveSingleto
     val ext_tbl = "SPARK_30269_external"
     withTempDir { dir =>
       withTable(tbl, ext_tbl) {
-        sql(s"CREATE TABLE $tbl (key INT, value STRING, ds STRING) PARTITIONED BY (ds)")
+        sql(s"CREATE TABLE $tbl (key INT, value STRING, ds STRING)" +
+          "USING parquet PARTITIONED BY (ds)")
         sql(
           s"""
              | CREATE TABLE $ext_tbl (key INT, value STRING, ds STRING)
+             | USING PARQUET
              | PARTITIONED BY (ds)
              | LOCATION '${dir.toURI}'
            """.stripMargin)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org