You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by ma...@apache.org on 2023/02/19 10:26:40 UTC

[spark] branch master updated: [SPARK-42323][SQL] Assign name to `_LEGACY_ERROR_TEMP_2332`

This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 21a5b416a6a [SPARK-42323][SQL] Assign name to `_LEGACY_ERROR_TEMP_2332`
21a5b416a6a is described below

commit 21a5b416a6a16fe6d960cc04931f5cbf858954ae
Author: itholic <ha...@databricks.com>
AuthorDate: Sun Feb 19 13:25:46 2023 +0300

    [SPARK-42323][SQL] Assign name to `_LEGACY_ERROR_TEMP_2332`
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to assign name to _LEGACY_ERROR_TEMP_2332, "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY".
    
    ### Why are the changes needed?
    
    We should assign proper name to _LEGACY_ERROR_TEMP_*
    
    ### Does this PR introduce _any_ user-facing change?
    
    No
    
    ### How was this patch tested?
    
    `./build/sbt "sql/testOnly org.apache.spark.sql.SQLQueryTestSuite*`
    
    Closes #39977 from itholic/LEGACY_2332.
    
    Authored-by: itholic <ha...@databricks.com>
    Signed-off-by: Max Gekk <ma...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   | 10 ++++-----
 .../spark/sql/execution/datasources/rules.scala    |  7 ++++---
 .../scala/org/apache/spark/sql/SQLQuerySuite.scala | 24 ++++++++++++++--------
 .../spark/sql/hive/execution/SQLQuerySuite.scala   | 24 ++++++++++++++--------
 4 files changed, 41 insertions(+), 24 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json b/core/src/main/resources/error/error-classes.json
index 8cf46cb7aad..8910ca86de4 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -1578,6 +1578,11 @@
     ],
     "sqlState" : "0A000"
   },
+  "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY" : {
+    "message" : [
+      "Unsupported data source type for direct query on files: <dataSourceType>"
+    ]
+  },
   "UNSUPPORTED_DATATYPE" : {
     "message" : [
       "Unsupported data type <typeName>."
@@ -5107,11 +5112,6 @@
       "failed to evaluate expression <sqlExpr>: <msg>"
     ]
   },
-  "_LEGACY_ERROR_TEMP_2332" : {
-    "message" : [
-      "<msg>"
-    ]
-  },
   "_LEGACY_ERROR_TEMP_2400" : {
     "message" : [
       "The <name> expression must evaluate to a constant value, but got <limitExpr>."
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala
index dc4fed49c1c..9255aa2effc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala
@@ -78,9 +78,10 @@ class ResolveSQLOnFile(sparkSession: SparkSession) extends Rule[LogicalPlan] {
         case e: Exception =>
           // the provider is valid, but failed to create a logical plan
           u.failAnalysis(
-            errorClass = "_LEGACY_ERROR_TEMP_2332",
-            messageParameters = Map("msg" -> e.getMessage),
-            cause = e)
+            errorClass = "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY",
+            messageParameters = Map("dataSourceType" -> u.multipartIdentifier.head),
+            cause = e
+          )
       }
   }
 }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 20fa5fee3aa..377be2e720a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -1634,15 +1634,23 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
     checkErrorTableNotFound(e, "`no_db`.`no_table`",
       ExpectedContext("no_db.no_table", 14, 13 + "no_db.no_table".length))
 
-    e = intercept[AnalysisException] {
-      sql("select * from json.invalid_file")
-    }
-    assert(e.message.contains("Path does not exist"))
+    checkError(
+      exception = intercept[AnalysisException] {
+        sql("select * from json.invalid_file")
+      },
+      errorClass = "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY",
+      parameters = Map("dataSourceType" -> "json"),
+      context = ExpectedContext("json.invalid_file", 14, 30)
+    )
 
-    e = intercept[AnalysisException] {
-      sql(s"select id from `org.apache.spark.sql.hive.orc`.`file_path`")
-    }
-    assert(e.message.contains("Hive built-in ORC data source must be used with Hive support"))
+    checkError(
+      exception = intercept[AnalysisException] {
+        sql(s"select id from `org.apache.spark.sql.hive.orc`.`file_path`")
+      },
+      errorClass = "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY",
+      parameters = Map("dataSourceType" -> "org.apache.spark.sql.hive.orc"),
+      context = ExpectedContext("`org.apache.spark.sql.hive.orc`.`file_path`", 15, 57)
+    )
 
     e = intercept[AnalysisException] {
       sql(s"select id from `org.apache.spark.sql.sources.HadoopFsRelationProvider`.`file_path`")
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index a902cb3a69e..31dfbedbbb4 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1401,16 +1401,24 @@ abstract class SQLQuerySuiteBase extends QueryTest with SQLTestUtils with TestHi
     withTempPath(f => {
       spark.range(100).toDF.write.parquet(f.getCanonicalPath)
 
-      var e = intercept[AnalysisException] {
-        sql(s"select id from hive.`${f.getCanonicalPath}`")
-      }
-      assert(e.message.contains("Unsupported data source type for direct query on files: hive"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"select id from hive.`${f.getCanonicalPath}`")
+        },
+        errorClass = "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY",
+        parameters = Map("dataSourceType" -> "hive"),
+        context = ExpectedContext(s"hive.`${f.getCanonicalPath}`", 15, 104)
+      )
 
       // data source type is case insensitive
-      e = intercept[AnalysisException] {
-        sql(s"select id from HIVE.`${f.getCanonicalPath}`")
-      }
-      assert(e.message.contains("Unsupported data source type for direct query on files: HIVE"))
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql(s"select id from HIVE.`${f.getCanonicalPath}`")
+        },
+        errorClass = "UNSUPPORTED_DATASOURCE_FOR_DIRECT_QUERY",
+        parameters = Map("dataSourceType" -> "HIVE"),
+        context = ExpectedContext(s"HIVE.`${f.getCanonicalPath}`", 15, 104)
+      )
     })
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org