You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "angerszhu (Jira)" <ji...@apache.org> on 2022/06/20 06:01:00 UTC

[jira] [Created] (SPARK-39527) V2Catalog rename not support newIdent with catalog

angerszhu created SPARK-39527:
---------------------------------

             Summary: V2Catalog rename not support newIdent with catalog
                 Key: SPARK-39527
                 URL: https://issues.apache.org/jira/browse/SPARK-39527
             Project: Spark
          Issue Type: Bug
          Components: SQL
    Affects Versions: 3.3.0
            Reporter: angerszhu



{code:java}

  test("rename a table") {
    sql("ALTER TABLE h2.test.empty_table RENAME TO h2.test.empty_table2")
    checkAnswer(
      sql("SHOW TABLES IN h2.test"),
      Seq(Row("test", "empty_table2")))
  }
{code}


{code:java}
[info] - rename a table *** FAILED *** (2 seconds, 358 milliseconds)
[info]   org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException: Failed table renaming from test.empty_table to h2.test.empty_table2
[info]   at org.apache.spark.sql.jdbc.H2Dialect$.classifyException(H2Dialect.scala:117)
[info]   at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.classifyException(JdbcUtils.scala:1176)
[info]   at org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog.$anonfun$renameTable$1(JDBCTableCatalog.scala:102)
[info]   at org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog.$anonfun$renameTable$1$adapted(JDBCTableCatalog.scala:100)
[info]   at org.apache.spark.sql.execution.datasources.jdbc.JdbcUtils$.withConnection(JdbcUtils.scala:1184)
[info]   at org.apache.spark.sql.execution.datasources.v2.jdbc.JDBCTableCatalog.renameTable(JDBCTableCatalog.scala:100)
[info]   at org.apache.spark.sql.execution.datasources.v2.RenameTableExec.run(RenameTableExec.scala:51)
[info]   at org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result$lzycompute(V2CommandExec.scala:43)
[info]   at org.apache.spark.sql.execution.datasources.v2.V2CommandExec.result(V2CommandExec.scala:43)
[info]   at org.apache.spark.sql.execution.datasources.v2.V2CommandExec.executeCollect(V2CommandExec.scala:49)
[info]   at org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.$anonfun$applyOrElse$1(QueryExecution.scala:98)
[info]   at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:111)
[info]   at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:171)
[info]   at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:95)
[info]   at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779)
[info]   at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
[info]   at org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:98)
[info]   at org.apache.spark.sql.execution.QueryExecution$$anonfun$eagerlyExecuteCommands$1.applyOrElse(QueryExecution.scala:94)
[info]   at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDownWithPruning$1(TreeNode.scala:584)
[info]   at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:176)
[info]   at org.apache.spark.sql.catalyst.trees.TreeNode.transformDownWithPruning(TreeNode.scala:584)
[info]   at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDownWithPruning(LogicalPlan.scala:30)
[info]   at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning(AnalysisHelper.scala:267)
[info]   at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDownWithPruning$(AnalysisHelper.scala:263)
[info]   at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
[info]   at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDownWithPruning(LogicalPlan.scala:30)
[info]   at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:560)
[info]   at org.apache.spark.sql.execution.QueryExecution.eagerlyExecuteCommands(QueryExecution.scala:94)
[info]   at org.apache.spark.sql.execution.QueryExecution.commandExecuted$lzycompute(QueryExecution.scala:81)
[info]   at org.apache.spark.sql.execution.QueryExecution.commandExecuted(QueryExecution.scala:79)
[info]   at org.apache.spark.sql.Dataset.<init>(Dataset.scala:220)
[info]   at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
{code}





--
This message was sent by Atlassian Jira
(v8.20.7#820007)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org