You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@spark.apache.org by we...@apache.org on 2022/01/20 08:26:22 UTC

[spark] branch master updated: [SPARK-37931][SQL] Quote the column name if neededQuote the column name if needed

This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 15464e3  [SPARK-37931][SQL] Quote the column name if neededQuote the column name if needed
15464e3 is described below

commit 15464e37a19ee99147550bab96d2674fb05d06df
Author: PengLei <pe...@gmail.com>
AuthorDate: Thu Jan 20 16:24:58 2022 +0800

    [SPARK-37931][SQL] Quote the column name if neededQuote the column name if needed
    
    ### What changes were proposed in this pull request?
    Quote the column name just needed instead of anyway.
    
    ### Why are the changes needed?
    [#comments](https://github.com/apache/spark/pull/35204#discussion_r785725545)
    
    ### Does this PR introduce _any_ user-facing change?
    Yes,It will change the result that users get the schema
    eg:
    ```
    "STRUCT<`_c0`: STRING, `_c1`: INT>"  => "STRUCT<_c0: STRING, _c1: INT>"
    ```
    At now. for end-user. I learn about the 3 way to get schema directly
    1. the function: eg
     ```
    schema_of_json
    schema_of_csv
    ```
    2. table schema
        df.schema or show create table
    3. call `toDDL` for StructType or StructField.
    
    ### How was this patch tested?
    existed testcase.
    
    Closes #35227 from Peng-Lei/Quote-Column.
    
    Authored-by: PengLei <pe...@gmail.com>
    Signed-off-by: Wenchen Fan <we...@databricks.com>
---
 R/pkg/tests/fulltests/test_sparkSQL.R              |  8 ++--
 python/pyspark/sql/functions.py                    |  8 ++--
 .../sql/catalyst/expressions/csvExpressions.scala  |  2 +-
 .../sql/catalyst/expressions/jsonExpressions.scala |  4 +-
 .../org/apache/spark/sql/types/StructField.scala   |  6 +--
 .../catalyst/expressions/CsvExpressionsSuite.scala |  4 +-
 .../expressions/JsonExpressionsSuite.scala         |  8 ++--
 .../apache/spark/sql/types/StructTypeSuite.scala   | 16 +++----
 .../sql-tests/results/charvarchar.sql.out          | 12 ++---
 .../sql-tests/results/csv-functions.sql.out        |  2 +-
 .../sql-tests/results/json-functions.sql.out       |  6 +--
 .../sql-tests/results/show-create-table.sql.out    | 56 +++++++++++-----------
 .../org/apache/spark/sql/CsvFunctionsSuite.scala   |  8 ++--
 .../spark/sql/DataFrameSetOperationsSuite.scala    | 50 +++++++++----------
 .../org/apache/spark/sql/JsonFunctionsSuite.scala  | 10 ++--
 .../command/ShowCreateTableSuiteBase.scala         | 22 ++++-----
 .../command/v1/ShowCreateTableSuite.scala          | 18 +++----
 .../command/v2/ShowCreateTableSuite.scala          | 20 ++++----
 .../execution/command/ShowCreateTableSuite.scala   | 16 +++----
 19 files changed, 138 insertions(+), 138 deletions(-)

diff --git a/R/pkg/tests/fulltests/test_sparkSQL.R b/R/pkg/tests/fulltests/test_sparkSQL.R
index 0e46324e..73b9dcc 100644
--- a/R/pkg/tests/fulltests/test_sparkSQL.R
+++ b/R/pkg/tests/fulltests/test_sparkSQL.R
@@ -1690,9 +1690,9 @@ test_that("column functions", {
 
   df <- as.DataFrame(list(list("col" = "1")))
   c <- collect(select(df, schema_of_csv("Amsterdam,2018")))
-  expect_equal(c[[1]], "STRUCT<`_c0`: STRING, `_c1`: INT>")
+  expect_equal(c[[1]], "STRUCT<_c0: STRING, _c1: INT>")
   c <- collect(select(df, schema_of_csv(lit("Amsterdam,2018"))))
-  expect_equal(c[[1]], "STRUCT<`_c0`: STRING, `_c1`: INT>")
+  expect_equal(c[[1]], "STRUCT<_c0: STRING, _c1: INT>")
 
   # Test to_json(), from_json(), schema_of_json()
   df <- sql("SELECT array(named_struct('name', 'Bob'), named_struct('name', 'Alice')) as people")
@@ -1725,9 +1725,9 @@ test_that("column functions", {
 
   df <- as.DataFrame(list(list("col" = "1")))
   c <- collect(select(df, schema_of_json('{"name":"Bob"}')))
-  expect_equal(c[[1]], "STRUCT<`name`: STRING>")
+  expect_equal(c[[1]], "STRUCT<name: STRING>")
   c <- collect(select(df, schema_of_json(lit('{"name":"Bob"}'))))
-  expect_equal(c[[1]], "STRUCT<`name`: STRING>")
+  expect_equal(c[[1]], "STRUCT<name: STRING>")
 
   # Test to_json() supports arrays of primitive types and arrays
   df <- sql("SELECT array(19, 42, 70) as age")
diff --git a/python/pyspark/sql/functions.py b/python/pyspark/sql/functions.py
index f2bca0b5..e69c37d 100644
--- a/python/pyspark/sql/functions.py
+++ b/python/pyspark/sql/functions.py
@@ -4091,10 +4091,10 @@ def schema_of_json(json: "ColumnOrName", options: Optional[Dict[str, str]] = Non
     --------
     >>> df = spark.range(1)
     >>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
-    [Row(json='STRUCT<`a`: BIGINT>')]
+    [Row(json='STRUCT<a: BIGINT>')]
     >>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
     >>> df.select(schema.alias("json")).collect()
-    [Row(json='STRUCT<`a`: BIGINT>')]
+    [Row(json='STRUCT<a: BIGINT>')]
     """
     if isinstance(json, str):
         col = _create_column_from_literal(json)
@@ -4127,9 +4127,9 @@ def schema_of_csv(csv: "ColumnOrName", options: Optional[Dict[str, str]] = None)
     --------
     >>> df = spark.range(1)
     >>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
-    [Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
+    [Row(csv='STRUCT<_c0: INT, _c1: STRING>')]
     >>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
-    [Row(csv='STRUCT<`_c0`: INT, `_c1`: STRING>')]
+    [Row(csv='STRUCT<_c0: INT, _c1: STRING>')]
     """
     if isinstance(csv, str):
         col = _create_column_from_literal(csv)
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/csvExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/csvExpressions.scala
index 79bbc10..30d992a 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/csvExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/csvExpressions.scala
@@ -153,7 +153,7 @@ case class CsvToStructs(
   examples = """
     Examples:
       > SELECT _FUNC_('1,abc');
-       STRUCT<`_c0`: INT, `_c1`: STRING>
+       STRUCT<_c0: INT, _c1: STRING>
   """,
   since = "3.0.0",
   group = "csv_funcs")
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
index 5b05862..9f00b7c 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/jsonExpressions.scala
@@ -766,9 +766,9 @@ case class StructsToJson(
   examples = """
     Examples:
       > SELECT _FUNC_('[{"col":0}]');
-       ARRAY<STRUCT<`col`: BIGINT>>
+       ARRAY<STRUCT<col: BIGINT>>
       > SELECT _FUNC_('[{"col":01}]', map('allowNumericLeadingZeros', 'true'));
-       ARRAY<STRUCT<`col`: BIGINT>>
+       ARRAY<STRUCT<col: BIGINT>>
   """,
   group = "json_funcs",
   since = "2.4.0")
diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructField.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructField.scala
index 93d57a7f..f490f83 100644
--- a/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructField.scala
+++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/types/StructField.scala
@@ -21,7 +21,7 @@ import org.json4s.JsonAST.JValue
 import org.json4s.JsonDSL._
 
 import org.apache.spark.annotation.Stable
-import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIdentifier}
+import org.apache.spark.sql.catalyst.util.{escapeSingleQuotedString, quoteIfNeeded}
 import org.apache.spark.sql.catalyst.util.StringUtils.StringConcat
 import org.apache.spark.sql.util.SchemaUtils
 
@@ -93,7 +93,7 @@ case class StructField(
    * Returns a string containing a schema in SQL format. For example the following value:
    * `StructField("eventId", IntegerType)` will be converted to `eventId`: INT.
    */
-  private[sql] def sql = s"${quoteIdentifier(name)}: ${dataType.sql}$getDDLComment"
+  private[sql] def sql = s"${quoteIfNeeded(name)}: ${dataType.sql}$getDDLComment"
 
   /**
    * Returns a string containing a schema in DDL format. For example, the following value:
@@ -103,6 +103,6 @@ case class StructField(
    */
   def toDDL: String = {
     val nullString = if (nullable) "" else " NOT NULL"
-    s"${quoteIdentifier(name)} ${dataType.sql}${nullString}$getDDLComment"
+    s"${quoteIfNeeded(name)} ${dataType.sql}${nullString}$getDDLComment"
   }
 }
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala
index 7945974..1d174ed 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala
@@ -158,13 +158,13 @@ class CsvExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with P
   }
 
   test("infer schema of CSV strings") {
-    checkEvaluation(new SchemaOfCsv(Literal.create("1,abc")), "STRUCT<`_c0`: INT, `_c1`: STRING>")
+    checkEvaluation(new SchemaOfCsv(Literal.create("1,abc")), "STRUCT<_c0: INT, _c1: STRING>")
   }
 
   test("infer schema of CSV strings by using options") {
     checkEvaluation(
       new SchemaOfCsv(Literal.create("1|abc"), Map("delimiter" -> "|")),
-      "STRUCT<`_c0`: INT, `_c1`: STRING>")
+      "STRUCT<_c0: INT, _c1: STRING>")
   }
 
   test("to_csv - struct") {
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
index 2ae7c76..af07172 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/JsonExpressionsSuite.scala
@@ -736,17 +736,17 @@ class JsonExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with
 
   test("SPARK-24709: infer schema of json strings") {
     checkEvaluation(new SchemaOfJson(Literal.create("""{"col":0}""")),
-      "STRUCT<`col`: BIGINT>")
+      "STRUCT<col: BIGINT>")
     checkEvaluation(
       new SchemaOfJson(Literal.create("""{"col0":["a"], "col1": {"col2": "b"}}""")),
-      "STRUCT<`col0`: ARRAY<STRING>, `col1`: STRUCT<`col2`: STRING>>")
+      "STRUCT<col0: ARRAY<STRING>, col1: STRUCT<col2: STRING>>")
   }
 
   test("infer schema of JSON strings by using options") {
     checkEvaluation(
       new SchemaOfJson(Literal.create("""{"col":01}"""),
         CreateMap(Seq(Literal.create("allowNumericLeadingZeros"), Literal.create("true")))),
-      "STRUCT<`col`: BIGINT>")
+      "STRUCT<col: BIGINT>")
   }
 
   test("parse date with locale") {
@@ -811,7 +811,7 @@ class JsonExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with
     }
 
     Seq("en-US", "ko-KR", "ru-RU", "de-DE").foreach {
-        checkDecimalInfer(_, """STRUCT<`d`: DECIMAL(7,3)>""")
+        checkDecimalInfer(_, """STRUCT<d: DECIMAL(7,3)>""")
     }
   }
 
diff --git a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
index a7e22e9..16f1223 100644
--- a/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
+++ b/sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
@@ -51,7 +51,7 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
   test("SPARK-24849: toDDL - simple struct") {
     val struct = StructType(Seq(StructField("a", IntegerType)))
 
-    assert(struct.toDDL == "`a` INT")
+    assert(struct.toDDL == "a INT")
   }
 
   test("SPARK-24849: round trip toDDL - fromDDL") {
@@ -61,7 +61,7 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
   }
 
   test("SPARK-24849: round trip fromDDL - toDDL") {
-    val struct = "`a` MAP<INT, STRING>,`b` INT"
+    val struct = "a MAP<INT, STRING>,b INT"
 
     assert(fromDDL(struct).toDDL === struct)
   }
@@ -70,14 +70,14 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
     val struct = new StructType()
       .add("metaData", new StructType().add("eventId", StringType))
 
-    assert(struct.toDDL == "`metaData` STRUCT<`eventId`: STRING>")
+    assert(struct.toDDL == "metaData STRUCT<eventId: STRING>")
   }
 
   test("SPARK-24849: toDDL should output field's comment") {
     val struct = StructType(Seq(
       StructField("b", BooleanType).withComment("Field's comment")))
 
-    assert(struct.toDDL == """`b` BOOLEAN COMMENT 'Field\'s comment'""")
+    assert(struct.toDDL == """b BOOLEAN COMMENT 'Field\'s comment'""")
   }
 
   private val nestedStruct = new StructType()
@@ -89,7 +89,7 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
     ).withComment("comment"))
 
   test("SPARK-33846: toDDL should output nested field's comment") {
-    val ddl = "`a` STRUCT<`b`: STRUCT<`c`: STRING COMMENT 'Deep Nested comment'> " +
+    val ddl = "a STRUCT<b: STRUCT<c: STRING COMMENT 'Deep Nested comment'> " +
       "COMMENT 'Nested comment'> COMMENT 'comment'"
     assert(nestedStruct.toDDL == ddl)
   }
@@ -153,7 +153,7 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
   }
 
   test("interval keyword in schema string") {
-    val interval = "`a` INTERVAL"
+    val interval = "a INTERVAL"
     assert(fromDDL(interval).toDDL === interval)
   }
 
@@ -250,10 +250,10 @@ class StructTypeSuite extends SparkFunSuite with SQLHelper {
   }
 
   test("SPARK-35285: ANSI interval types in schema") {
-    val yearMonthInterval = "`ymi` INTERVAL YEAR TO MONTH"
+    val yearMonthInterval = "ymi INTERVAL YEAR TO MONTH"
     assert(fromDDL(yearMonthInterval).toDDL === yearMonthInterval)
 
-    val dayTimeInterval = "`dti` INTERVAL DAY TO SECOND"
+    val dayTimeInterval = "dti INTERVAL DAY TO SECOND"
     assert(fromDDL(dayTimeInterval).toDDL === dayTimeInterval)
   }
 
diff --git a/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out b/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out
index 5c6b1a7..de994d6 100644
--- a/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/charvarchar.sql.out
@@ -52,8 +52,8 @@ show create table char_tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.char_tbl (
-  `c` CHAR(5),
-  `v` VARCHAR(6))
+  c CHAR(5),
+  v VARCHAR(6))
 USING parquet
 
 
@@ -71,8 +71,8 @@ show create table char_tbl2
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.char_tbl2 (
-  `c` CHAR(5),
-  `v` VARCHAR(6))
+  c CHAR(5),
+  v VARCHAR(6))
 USING parquet
 
 
@@ -162,8 +162,8 @@ show create table char_tbl3
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.char_tbl3 (
-  `c` CHAR(5),
-  `v` VARCHAR(6))
+  c CHAR(5),
+  v VARCHAR(6))
 USING parquet
 
 
diff --git a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out
index 2ca44d5..53cae3f 100644
--- a/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/csv-functions.sql.out
@@ -89,7 +89,7 @@ select schema_of_csv('1|abc', map('delimiter', '|'))
 -- !query schema
 struct<schema_of_csv(1|abc):string>
 -- !query output
-STRUCT<`_c0`: INT, `_c1`: STRING>
+STRUCT<_c0: INT, _c1: STRING>
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out
index ff59553..e509d4e 100644
--- a/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/json-functions.sql.out
@@ -236,7 +236,7 @@ select schema_of_json('{"c1":0, "c2":[1]}')
 -- !query schema
 struct<schema_of_json({"c1":0, "c2":[1]}):string>
 -- !query output
-STRUCT<`c1`: BIGINT, `c2`: ARRAY<BIGINT>>
+STRUCT<c1: BIGINT, c2: ARRAY<BIGINT>>
 
 
 -- !query
@@ -375,7 +375,7 @@ select schema_of_json('{"c1":1}', map('primitivesAsString', 'true'))
 -- !query schema
 struct<schema_of_json({"c1":1}):string>
 -- !query output
-STRUCT<`c1`: STRING>
+STRUCT<c1: STRING>
 
 
 -- !query
@@ -383,7 +383,7 @@ select schema_of_json('{"c1":01, "c2":0.1}', map('allowNumericLeadingZeros', 'tr
 -- !query schema
 struct<schema_of_json({"c1":01, "c2":0.1}):string>
 -- !query output
-STRUCT<`c1`: BIGINT, `c2`: DECIMAL(1,1)>
+STRUCT<c1: BIGINT, c2: DECIMAL(1,1)>
 
 
 -- !query
diff --git a/sql/core/src/test/resources/sql-tests/results/show-create-table.sql.out b/sql/core/src/test/resources/sql-tests/results/show-create-table.sql.out
index ffcbb73..4c7f124 100644
--- a/sql/core/src/test/resources/sql-tests/results/show-create-table.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/show-create-table.sql.out
@@ -16,9 +16,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 
 
@@ -45,9 +45,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 OPTIONS (
   'a' = '1')
@@ -76,9 +76,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 LOCATION 'file:/path/to/table'
 
@@ -106,9 +106,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 LOCATION 'file:/path/to/table'
 
@@ -136,9 +136,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `b` STRING,
-  `c` INT,
-  `a` INT)
+  b STRING,
+  c INT,
+  a INT)
 USING parquet
 PARTITIONED BY (a)
 
@@ -166,9 +166,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 CLUSTERED BY (a)
 SORTED BY (b)
@@ -198,9 +198,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 COMMENT 'This is a comment'
 
@@ -228,9 +228,9 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` INT,
-  `b` STRING,
-  `c` INT)
+  a INT,
+  b STRING,
+  c INT)
 USING parquet
 TBLPROPERTIES (
   'a' = '1')
@@ -258,10 +258,10 @@ SHOW CREATE TABLE tbl
 struct<createtab_stmt:string>
 -- !query output
 CREATE TABLE default.tbl (
-  `a` FLOAT,
-  `b` DECIMAL(10,0),
-  `c` DECIMAL(10,0),
-  `d` DECIMAL(10,1))
+  a FLOAT,
+  b DECIMAL(10,0),
+  c DECIMAL(10,0),
+  d DECIMAL(10,1))
 USING parquet
 
 
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
index 2808652..461bbd8 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/CsvFunctionsSuite.scala
@@ -82,16 +82,16 @@ class CsvFunctionsSuite extends QueryTest with SharedSparkSession {
   test("schema_of_csv - infers schemas") {
     checkAnswer(
       spark.range(1).select(schema_of_csv(lit("0.1,1"))),
-      Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
+      Seq(Row("STRUCT<_c0: DOUBLE, _c1: INT>")))
     checkAnswer(
       spark.range(1).select(schema_of_csv("0.1,1")),
-      Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
+      Seq(Row("STRUCT<_c0: DOUBLE, _c1: INT>")))
   }
 
   test("schema_of_csv - infers schemas using options") {
     val df = spark.range(1)
       .select(schema_of_csv(lit("0.1 1"), Map("sep" -> " ").asJava))
-    checkAnswer(df, Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
+    checkAnswer(df, Seq(Row("STRUCT<_c0: DOUBLE, _c1: INT>")))
   }
 
   test("to_csv - struct") {
@@ -220,7 +220,7 @@ class CsvFunctionsSuite extends QueryTest with SharedSparkSession {
     val input = concat_ws(",", lit(0.1), lit(1))
     checkAnswer(
       spark.range(1).select(schema_of_csv(input)),
-      Seq(Row("STRUCT<`_c0`: DOUBLE, `_c1`: INT>")))
+      Seq(Row("STRUCT<_c0: DOUBLE, _c1: INT>")))
   }
 
   test("optional datetime parser does not affect csv time formatting") {
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala
index b19e430..19a62c2 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala
@@ -804,7 +804,7 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
       StructType(Seq(StructField("topLevelCol", nestedStructType2))))
 
     val union = df1.unionByName(df2, allowMissingColumns = true)
-    assert(union.schema.toDDL == "`topLevelCol` STRUCT<`b`: STRING, `a`: STRING>")
+    assert(union.schema.toDDL == "topLevelCol STRUCT<b: STRING, a: STRING>")
     checkAnswer(union, Row(Row("b", null)) :: Row(Row("b", "a")) :: Nil)
   }
 
@@ -836,15 +836,15 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
       StructType(Seq(StructField("topLevelCol", nestedStructType2))))
 
     var unionDf = df1.unionByName(df2, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` " +
-      "STRUCT<`b`: STRUCT<`ba`: STRING, `bb`: STRING>, `a`: STRUCT<`aa`: STRING>>")
+    assert(unionDf.schema.toDDL == "topLevelCol " +
+      "STRUCT<b: STRUCT<ba: STRING, bb: STRING>, a: STRUCT<aa: STRING>>")
     checkAnswer(unionDf,
       Row(Row(Row("ba", null), null)) ::
       Row(Row(Row(null, "bb"), Row("aa"))) :: Nil)
 
     unionDf = df2.unionByName(df1, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` STRUCT<`a`: STRUCT<`aa`: STRING>, " +
-      "`b`: STRUCT<`bb`: STRING, `ba`: STRING>>")
+    assert(unionDf.schema.toDDL == "topLevelCol STRUCT<a: STRUCT<aa: STRING>, " +
+      "b: STRUCT<bb: STRING, ba: STRING>>")
     checkAnswer(unionDf,
       Row(Row(null, Row(null, "ba"))) ::
       Row(Row(Row("aa"), Row("bb", null))) :: Nil)
@@ -1112,13 +1112,13 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
       StructType(Seq(StructField("arr", arrayType2))))
 
     var unionDf = df1.unionByName(df2)
-    assert(unionDf.schema.toDDL == "`arr` ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>")
+    assert(unionDf.schema.toDDL == "arr ARRAY<STRUCT<ba: STRING, bb: STRING>>")
     checkAnswer(unionDf,
       Row(Seq(Row("ba", "bb"))) ::
       Row(Seq(Row("ba", "bb"))) :: Nil)
 
     unionDf = df2.unionByName(df1)
-    assert(unionDf.schema.toDDL == "`arr` ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>")
+    assert(unionDf.schema.toDDL == "arr ARRAY<STRUCT<bb: STRING, ba: STRING>>")
     checkAnswer(unionDf,
       Row(Seq(Row("bb", "ba"))) ::
       Row(Seq(Row("bb", "ba"))) :: Nil)
@@ -1150,7 +1150,7 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df3.unionByName(df4, true)
-    assert(unionDf.schema.toDDL == "`arr` ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>")
+    assert(unionDf.schema.toDDL == "arr ARRAY<STRUCT<ba: STRING, bb: STRING>>")
     checkAnswer(unionDf,
       Row(Seq(Row("ba", null))) ::
       Row(Seq(Row(null, "bb"))) :: Nil)
@@ -1160,7 +1160,7 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df4.unionByName(df3, true)
-    assert(unionDf.schema.toDDL == "`arr` ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>")
+    assert(unionDf.schema.toDDL == "arr ARRAY<STRUCT<bb: STRING, ba: STRING>>")
     checkAnswer(unionDf,
       Row(Seq(Row("bb", null))) ::
       Row(Seq(Row(null, "ba"))) :: Nil)
@@ -1196,15 +1196,15 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
       StructType(Seq(StructField("topLevelCol", nestedStructType2))))
 
     var unionDf = df1.unionByName(df2)
-    assert(unionDf.schema.toDDL == "`topLevelCol` " +
-      "STRUCT<`b`: ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol " +
+      "STRUCT<b: ARRAY<STRUCT<ba: STRING, bb: STRING>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Row("ba", "bb")))) ::
       Row(Row(Seq(Row("ba", "bb")))) :: Nil)
 
     unionDf = df2.unionByName(df1)
-    assert(unionDf.schema.toDDL == "`topLevelCol` STRUCT<" +
-      "`b`: ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol STRUCT<" +
+      "b: ARRAY<STRUCT<bb: STRING, ba: STRING>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Row("bb", "ba")))) ::
       Row(Row(Seq(Row("bb", "ba")))) :: Nil)
@@ -1240,8 +1240,8 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df3.unionByName(df4, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` " +
-      "STRUCT<`b`: ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol " +
+      "STRUCT<b: ARRAY<STRUCT<ba: STRING, bb: STRING>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Row("ba", null)))) ::
       Row(Row(Seq(Row(null, "bb")))) :: Nil)
@@ -1251,8 +1251,8 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df4.unionByName(df3, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` STRUCT<" +
-      "`b`: ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol STRUCT<" +
+      "b: ARRAY<STRUCT<bb: STRING, ba: STRING>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Row("bb", null)))) ::
       Row(Row(Seq(Row(null, "ba")))) :: Nil)
@@ -1292,15 +1292,15 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
       StructType(Seq(StructField("topLevelCol", nestedStructType2))))
 
     var unionDf = df1.unionByName(df2)
-    assert(unionDf.schema.toDDL == "`topLevelCol` " +
-      "STRUCT<`b`: ARRAY<ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol " +
+      "STRUCT<b: ARRAY<ARRAY<STRUCT<ba: STRING, bb: STRING>>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Seq(Row("ba", "bb"))))) ::
       Row(Row(Seq(Seq(Row("ba", "bb"))))) :: Nil)
 
     unionDf = df2.unionByName(df1)
-    assert(unionDf.schema.toDDL == "`topLevelCol` STRUCT<" +
-      "`b`: ARRAY<ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol STRUCT<" +
+      "b: ARRAY<ARRAY<STRUCT<bb: STRING, ba: STRING>>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Seq(Row("bb", "ba"))))) ::
       Row(Row(Seq(Seq(Row("bb", "ba"))))) :: Nil)
@@ -1340,8 +1340,8 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df3.unionByName(df4, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` " +
-      "STRUCT<`b`: ARRAY<ARRAY<STRUCT<`ba`: STRING, `bb`: STRING>>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol " +
+      "STRUCT<b: ARRAY<ARRAY<STRUCT<ba: STRING, bb: STRING>>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Seq(Row("ba", null))))) ::
       Row(Row(Seq(Seq(Row(null, "bb"))))) :: Nil)
@@ -1351,8 +1351,8 @@ class DataFrameSetOperationsSuite extends QueryTest with SharedSparkSession {
     }
 
     unionDf = df4.unionByName(df3, true)
-    assert(unionDf.schema.toDDL == "`topLevelCol` STRUCT<" +
-      "`b`: ARRAY<ARRAY<STRUCT<`bb`: STRING, `ba`: STRING>>>>")
+    assert(unionDf.schema.toDDL == "topLevelCol STRUCT<" +
+      "b: ARRAY<ARRAY<STRUCT<bb: STRING, ba: STRING>>>>")
     checkAnswer(unionDf,
       Row(Row(Seq(Seq(Row("bb", null))))) ::
       Row(Row(Seq(Seq(Row(null, "ba"))))) :: Nil)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
index 06babab..6661b58 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/JsonFunctionsSuite.scala
@@ -417,7 +417,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession {
   test("infers schemas using options") {
     val df = spark.range(1)
       .select(schema_of_json(lit("{a:1}"), Map("allowUnquotedFieldNames" -> "true").asJava))
-    checkAnswer(df, Seq(Row("STRUCT<`a`: BIGINT>")))
+    checkAnswer(df, Seq(Row("STRUCT<a: BIGINT>")))
   }
 
   test("from_json - array of primitive types") {
@@ -697,14 +697,14 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession {
     val input = regexp_replace(lit("""{"item_id": 1, "item_price": 0.1}"""), "item_", "")
     checkAnswer(
       spark.range(1).select(schema_of_json(input)),
-      Seq(Row("STRUCT<`id`: BIGINT, `price`: DOUBLE>")))
+      Seq(Row("STRUCT<id: BIGINT, price: DOUBLE>")))
   }
 
   test("SPARK-31065: schema_of_json - null and empty strings as strings") {
     Seq("""{"id": null}""", """{"id": ""}""").foreach { input =>
       checkAnswer(
         spark.range(1).select(schema_of_json(input)),
-        Seq(Row("STRUCT<`id`: STRING>")))
+        Seq(Row("STRUCT<id: STRING>")))
     }
   }
 
@@ -716,7 +716,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession {
         schema_of_json(
           lit("""{"id": "a", "drop": {"drop": null}}"""),
           options.asJava)),
-      Seq(Row("STRUCT<`id`: STRING>")))
+      Seq(Row("STRUCT<id: STRING>")))
 
     // Array of structs
     checkAnswer(
@@ -724,7 +724,7 @@ class JsonFunctionsSuite extends QueryTest with SharedSparkSession {
         schema_of_json(
           lit("""[{"id": "a", "drop": {"drop": null}}]"""),
           options.asJava)),
-      Seq(Row("ARRAY<STRUCT<`id`: STRING>>")))
+      Seq(Row("ARRAY<STRUCT<id: STRING>>")))
 
     // Other types are not affected.
     checkAnswer(
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowCreateTableSuiteBase.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowCreateTableSuiteBase.scala
index 53cdec0..7bc0765 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowCreateTableSuiteBase.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/ShowCreateTableSuiteBase.scala
@@ -51,8 +51,8 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
         """.stripMargin)
       val showDDL = getShowCreateDDL(t)
       assert(showDDL(0) == s"CREATE TABLE $fullName (")
-      assert(showDDL(1) == "`a` BIGINT NOT NULL,")
-      assert(showDDL(2) == "`b` BIGINT)")
+      assert(showDDL(1) == "a BIGINT NOT NULL,")
+      assert(showDDL(2) == "b BIGINT)")
       assert(showDDL(3) == s"USING ${classOf[SimpleInsertSource].getName}")
     }
   }
@@ -75,10 +75,10 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
       )
       val showDDL = getShowCreateDDL(t)
       assert(showDDL(0) == s"CREATE TABLE $fullName (")
-      assert(showDDL(1) == "`a` STRING,")
-      assert(showDDL(2) == "`b` STRING,")
+      assert(showDDL(1) == "a STRING,")
+      assert(showDDL(2) == "b STRING,")
       assert(showDDL(3) == "`extra col` ARRAY<INT>,")
-      assert(showDDL(4) == "`<another>` STRUCT<`x`: INT, `y`: ARRAY<BOOLEAN>>)")
+      assert(showDDL(4) == "`<another>` STRUCT<x: INT, y: ARRAY<BOOLEAN>>)")
       assert(showDDL(5) == "USING json")
       assert(showDDL(6).startsWith("LOCATION 'file:") && showDDL(6).endsWith("sample.json'"))
     }
@@ -95,7 +95,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
         """.stripMargin)
       val showDDL = getShowCreateDDL(t)
       assert(showDDL(0) == s"CREATE TABLE $fullName (")
-      assert(showDDL(1) == "`a` STRUCT<`b`: STRING>)")
+      assert(showDDL(1) == "a STRUCT<b: STRING>)")
       assert(showDDL(2) == "USING json")
     }
   }
@@ -119,7 +119,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
            |)
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` STRING) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a STRING) USING json" +
         " OPTIONS ( 'k1' = 'v1', 'k2' = 'v2', 'k3' = 'v3', 'k4' = 'v4', 'k5' = 'v5')" +
         " TBLPROPERTIES ( 'a' = '2', 'b' = '1')"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
@@ -134,7 +134,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
            |AS SELECT 1 AS a, "foo" AS b
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING) USING json"
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING) USING json"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
   }
@@ -148,7 +148,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
            |AS SELECT 1 AS a, "foo" AS b
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING) USING json PARTITIONED BY (b)"
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING) USING json PARTITIONED BY (b)"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
   }
@@ -162,7 +162,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
            |AS SELECT 1 AS a, "foo" AS b, 2.5 AS c
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING, `c` DECIMAL(2,1)) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING, c DECIMAL(2,1)) USING json" +
         s" COMMENT 'This is a comment'"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
@@ -177,7 +177,7 @@ trait ShowCreateTableSuiteBase extends QueryTest with DDLCommandTestUtils {
            |AS SELECT 1 AS a, "foo" AS b, 2.5 AS c
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING, `c` DECIMAL(2,1)) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING, c DECIMAL(2,1)) USING json" +
         s" TBLPROPERTIES ( 'a' = '1')"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
index 023dfce..1dd5e4a 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/ShowCreateTableSuite.scala
@@ -58,11 +58,11 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase
       val showDDL = getShowCreateDDL(t)
       assert(showDDL === Array(
         s"CREATE TABLE $fullName (",
-        "`b` BIGINT,",
-        "`c` BIGINT,",
-        "`extraCol` ARRAY<INT>,",
-        "`<another>` STRUCT<`x`: INT, `y`: ARRAY<BOOLEAN>>,",
-        "`a` BIGINT NOT NULL)",
+        "b BIGINT,",
+        "c BIGINT,",
+        "extraCol ARRAY<INT>,",
+        "`<another>` STRUCT<x: INT, y: ARRAY<BOOLEAN>>,",
+        "a BIGINT NOT NULL)",
         "USING parquet",
         "OPTIONS (",
         "'from' = '0',",
@@ -89,7 +89,7 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase
            |AS SELECT 1 AS a, "foo" AS b
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING) USING json" +
         s" CLUSTERED BY (a) INTO 2 BUCKETS"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
@@ -104,7 +104,7 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase
            |AS SELECT 1 AS a, "foo" AS b
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING) USING json" +
         s" CLUSTERED BY (a) SORTED BY (b) INTO 2 BUCKETS"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
@@ -120,7 +120,7 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase
            |AS SELECT 1 AS a, "foo" AS b, 2.5 AS c
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING, `c` DECIMAL(2,1)) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING, c DECIMAL(2,1)) USING json" +
         s" PARTITIONED BY (c) CLUSTERED BY (a) INTO 2 BUCKETS"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
@@ -136,7 +136,7 @@ trait ShowCreateTableSuiteBase extends command.ShowCreateTableSuiteBase
            |AS SELECT 1 AS a, "foo" AS b, 2.5 AS c
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING, `c` DECIMAL(2,1)) USING json" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING, c DECIMAL(2,1)) USING json" +
         s" PARTITIONED BY (c) CLUSTERED BY (a) SORTED BY (b) INTO 2 BUCKETS"
       assert(getShowCreateDDL(t).mkString(" ") == expected)
     }
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowCreateTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowCreateTableSuite.scala
index 47e59e9..7c50681 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowCreateTableSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/v2/ShowCreateTableSuite.scala
@@ -51,8 +51,8 @@ class ShowCreateTableSuite extends command.ShowCreateTableSuiteBase with Command
       val showDDL = getShowCreateDDL(t, false)
       assert(showDDL === Array(
         s"CREATE TABLE $t (",
-        "`a` INT,",
-        "`b` STRING)",
+        "a INT,",
+        "b STRING)",
         defaultUsing,
         "PARTITIONED BY (a)",
         "COMMENT 'This is a comment'",
@@ -89,11 +89,11 @@ class ShowCreateTableSuite extends command.ShowCreateTableSuiteBase with Command
       val showDDL = getShowCreateDDL(t, false)
       assert(showDDL === Array(
         s"CREATE TABLE $t (",
-        "`a` BIGINT NOT NULL,",
-        "`b` BIGINT,",
-        "`c` BIGINT,",
-        "`extraCol` ARRAY<INT>,",
-        "`<another>` STRUCT<`x`: INT, `y`: ARRAY<BOOLEAN>>)",
+        "a BIGINT NOT NULL,",
+        "b BIGINT,",
+        "c BIGINT,",
+        "extraCol ARRAY<INT>,",
+        "`<another>` STRUCT<x: INT, y: ARRAY<BOOLEAN>>)",
         defaultUsing,
         "OPTIONS (",
         "'from' = '0',",
@@ -128,9 +128,9 @@ class ShowCreateTableSuite extends command.ShowCreateTableSuiteBase with Command
       val showDDL = getShowCreateDDL(t, false)
       assert(showDDL === Array(
         s"CREATE TABLE $t (",
-        "`a` INT,",
-        "`b` STRING,",
-        "`ts` TIMESTAMP)",
+        "a INT,",
+        "b STRING,",
+        "ts TIMESTAMP)",
         defaultUsing,
         "PARTITIONED BY (a, years(ts), months(ts), days(ts), hours(ts))",
         "CLUSTERED BY (b)",
diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
index 58145b0..a7d5e7b 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowCreateTableSuite.scala
@@ -48,7 +48,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |)
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" +
         " WITH SERDEPROPERTIES ( 'serialization.format' = '1')" +
         " STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'" +
@@ -73,7 +73,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
              |)
            """.stripMargin
         )
-        val expected = s"CREATE EXTERNAL TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
+        val expected = s"CREATE EXTERNAL TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
           s" ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" +
           s" WITH SERDEPROPERTIES ( 'serialization.format' = '1')" +
           s" STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'" +
@@ -100,8 +100,8 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |)
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
-        " COMMENT 'bla' PARTITIONED BY (`p1` BIGINT COMMENT 'bla', `p2` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
+        " COMMENT 'bla' PARTITIONED BY (p1 BIGINT COMMENT 'bla', p2 STRING)" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" +
         " WITH SERDEPROPERTIES ( 'serialization.format' = '1')" +
         " STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat'" +
@@ -124,7 +124,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |NULL DEFINED AS 'NaN'
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" +
         " WITH SERDEPROPERTIES (" +
         " 'colelction.delim' = '@'," +
@@ -148,7 +148,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |STORED AS PARQUET
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'" +
         " WITH SERDEPROPERTIES ( 'serialization.format' = '1')" +
         " STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat'" +
@@ -175,7 +175,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |  OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat'
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `c1` INT COMMENT 'bla', `c2` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( c1 INT COMMENT 'bla', c2 STRING)" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe'" +
         " WITH SERDEPROPERTIES (" +
         " 'mapkey.delim' = ','," +
@@ -197,7 +197,7 @@ class ShowCreateTableSuite extends v1.ShowCreateTableSuiteBase with CommandSuite
            |INTO 2 BUCKETS
          """.stripMargin
       )
-      val expected = s"CREATE TABLE $fullName ( `a` INT, `b` STRING)" +
+      val expected = s"CREATE TABLE $fullName ( a INT, b STRING)" +
         " CLUSTERED BY (a) SORTED BY (b ASC) INTO 2 BUCKETS" +
         " ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'" +
         " WITH SERDEPROPERTIES ( 'serialization.format' = '1')" +

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@spark.apache.org
For additional commands, e-mail: commits-help@spark.apache.org