You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by "dohongdayi (Jira)" <ji...@apache.org> on 2022/09/21 06:32:00 UTC
[jira] [Resolved] (HUDI-4875) NoSuchTableException is thrown while dropping temporary view after applied HoodieSparkSessionExtension in Spark 3.2
[ https://issues.apache.org/jira/browse/HUDI-4875?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
dohongdayi resolved HUDI-4875.
------------------------------
> NoSuchTableException is thrown while dropping temporary view after applied HoodieSparkSessionExtension in Spark 3.2
> -------------------------------------------------------------------------------------------------------------------
>
> Key: HUDI-4875
> URL: https://issues.apache.org/jira/browse/HUDI-4875
> Project: Apache Hudi
> Issue Type: Bug
> Affects Versions: 0.11.1
> Environment: Spark 3.2.2
> Reporter: dohongdayi
> Priority: Major
> Labels: pull-request-available
> Fix For: 0.12.1
>
>
> NoSuchTableException is thrown while dropping temporary view after applied HoodieSparkSessionExtension in Spark 3.2:
> {code:java}
> org.apache.spark.sql.catalyst.analysis.NoSuchTableException: Table or view 'test_view' not found in database 'default'
> at org.apache.spark.sql.catalyst.catalog.SessionCatalog.requireTableExists(SessionCatalog.scala:225)
> at org.apache.spark.sql.catalyst.catalog.SessionCatalog.getTableRawMetadata(SessionCatalog.scala:516)
> at org.apache.spark.sql.catalyst.catalog.SessionCatalog.getTableMetadata(SessionCatalog.scala:502)
> at org.apache.spark.sql.hudi.SparkAdapter.isHoodieTable(SparkAdapter.scala:160)
> at org.apache.spark.sql.hudi.SparkAdapter.isHoodieTable$(SparkAdapter.scala:159)
> at org.apache.spark.sql.adapter.BaseSpark3Adapter.isHoodieTable(BaseSpark3Adapter.scala:45)
> at org.apache.spark.sql.hudi.analysis.HoodiePostAnalysisRule.apply(HoodieAnalysis.scala:539)
> at org.apache.spark.sql.hudi.analysis.HoodiePostAnalysisRule.apply(HoodieAnalysis.scala:530)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$2(RuleExecutor.scala:211)
> at scala.collection.LinearSeqOptimized.foldLeft(LinearSeqOptimized.scala:126)
> at scala.collection.LinearSeqOptimized.foldLeft$(LinearSeqOptimized.scala:122)
> at scala.collection.immutable.List.foldLeft(List.scala:91)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1(RuleExecutor.scala:208)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$execute$1$adapted(RuleExecutor.scala:200)
> at scala.collection.immutable.List.foreach(List.scala:431)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:200)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.org$apache$spark$sql$catalyst$analysis$Analyzer$$executeSameContext(Analyzer.scala:222)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$execute$1(Analyzer.scala:218)
> at org.apache.spark.sql.catalyst.analysis.AnalysisContext$.withNewAnalysisContext(Analyzer.scala:167)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:218)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.execute(Analyzer.scala:182)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.$anonfun$executeAndTrack$1(RuleExecutor.scala:179)
> at org.apache.spark.sql.catalyst.QueryPlanningTracker$.withTracker(QueryPlanningTracker.scala:88)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.executeAndTrack(RuleExecutor.scala:179)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.$anonfun$executeAndCheck$1(Analyzer.scala:203)
> at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:330)
> at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:202)
> at org.apache.spark.sql.execution.QueryExecution.$anonfun$analyzed$1(QueryExecution.scala:75)
> at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)
> at org.apache.spark.sql.execution.QueryExecution.$anonfun$executePhase$1(QueryExecution.scala:183)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:788)
> at org.apache.spark.sql.execution.QueryExecution.executePhase(QueryExecution.scala:183)
> at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:75)
> at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:73)
> at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:629)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:788)
> at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:620)
> ... 51 elided {code}
--
This message was sent by Atlassian Jira
(v8.20.10#820010)