You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by "sridevi kaup (JIRA)" <ji...@apache.org> on 2018/02/09 09:32:00 UTC
[jira] [Updated] (CARBONDATA-2141) Exception displays while
deleting a table in spark 2.2
[ https://issues.apache.org/jira/browse/CARBONDATA-2141?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
sridevi kaup updated CARBONDATA-2141:
-------------------------------------
Attachment: table_deletion.png
> Exception displays while deleting a table in spark 2.2
> ------------------------------------------------------
>
> Key: CARBONDATA-2141
> URL: https://issues.apache.org/jira/browse/CARBONDATA-2141
> Project: CarbonData
> Issue Type: Bug
> Components: data-query
> Affects Versions: 1.3.0
> Environment: spark 2.2, Hadoop 2.7
> Reporter: Vandana Yadav
> Priority: Minor
> Attachments: table_deletion.png
>
>
> Exception displays while deleting a table in spark 2.2
> Steps to reproduce:
> 1) Start spark-shell
> ./spark-shell --jars /opt/spark/spark-2.2.1/carbonlib/carbondata_2.11-1.3.0-SNAPSHOT-shade-hadoop2.7.2.jar
> 2) import and create spark session and carbon session:
> a)import org.apache.spark.sql.SparkSession
> b) import org.apache.spark.sql.CarbonSession._
> c) val carbon = SparkSession.builder().config(sc.getConf) .getOrCreateCarbonSession("hdfs://localhost:54310/newCarbonStore","/tmp")
> 3) Execute drop table query:
> carbon.sql("drop table if exists stream_table99")
> Expected Result:table should be deleted if exists.
> Actual Result:
> 18/02/07 14:47:17 WARN DropTableCommand: org.apache.spark.sql.AnalysisException: Table or view not found: stream_table99;
> org.apache.spark.sql.AnalysisException: Table or view not found: stream_table99;
> at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.org$apache$spark$sql$catalyst$analysis$Analyzer$ResolveRelations$$lookupTableFromCatalog(Analyzer.scala:649)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.resolveRelation(Analyzer.scala:601)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:631)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$$anonfun$apply$8.applyOrElse(Analyzer.scala:624)
> at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:62)
> at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan$$anonfun$resolveOperators$1.apply(LogicalPlan.scala:62)
> at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
> at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.resolveOperators(LogicalPlan.scala:61)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:624)
> at org.apache.spark.sql.catalyst.analysis.Analyzer$ResolveRelations$.apply(Analyzer.scala:570)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:85)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:82)
> at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124)
> at scala.collection.immutable.List.foldLeft(List.scala:84)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:82)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:74)
> at scala.collection.immutable.List.foreach(List.scala:381)
> at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:74)
> at org.apache.spark.sql.hive.CarbonAnalyzer.execute(CarbonSessionState.scala:187)
> at org.apache.spark.sql.hive.CarbonAnalyzer.execute(CarbonSessionState.scala:182)
> at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:69)
> at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:67)
> at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:50)
> at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:67)
> at org.apache.spark.sql.SparkSession.table(SparkSession.scala:618)
> at org.apache.spark.sql.execution.command.DropTableCommand.run(ddl.scala:203)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:58)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:56)
> at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:67)
> at org.apache.spark.sql.Dataset.<init>(Dataset.scala:183)
> at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:68)
> at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:632)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:31)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:36)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:38)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:40)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:42)
> at $line33.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:44)
> at $line33.$read$$iw$$iw$$iw$$iw.<init>(<console>:46)
> at $line33.$read$$iw$$iw$$iw.<init>(<console>:48)
> at $line33.$read$$iw$$iw.<init>(<console>:50)
> at $line33.$read$$iw.<init>(<console>:52)
> at $line33.$read.<init>(<console>:54)
> at $line33.$read$.<init>(<console>:58)
> at $line33.$read$.<clinit>(<console>)
> at $line33.$eval$.$print$lzycompute(<console>:7)
> at $line33.$eval$.$print(<console>:6)
> at $line33.$eval.$print(<console>)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:497)
> at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:786)
> at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1047)
> at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:638)
> at scala.tools.nsc.interpreter.IMain$WrappedRequest$$anonfun$loadAndRunReq$1.apply(IMain.scala:637)
> at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
> at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:19)
> at scala.tools.nsc.interpreter.IMain$WrappedRequest.loadAndRunReq(IMain.scala:637)
> at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:569)
> at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:565)
> at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:807)
> at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:681)
> at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:395)
> at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:415)
> at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply$mcZ$sp(ILoop.scala:923)
> at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
> at scala.tools.nsc.interpreter.ILoop$$anonfun$process$1.apply(ILoop.scala:909)
> at scala.reflect.internal.util.ScalaClassLoader$.savingContextLoader(ScalaClassLoader.scala:97)
> at scala.tools.nsc.interpreter.ILoop.process(ILoop.scala:909)
> at org.apache.spark.repl.Main$.doMain(Main.scala:74)
> at org.apache.spark.repl.Main$.main(Main.scala:54)
> at org.apache.spark.repl.Main.main(Main.scala)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:497)
> at org.apache.spark.deploy.SparkSubmit$.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:775)
> at org.apache.spark.deploy.SparkSubmit$.doRunMain$1(SparkSubmit.scala:180)
> at org.apache.spark.deploy.SparkSubmit$.submit(SparkSubmit.scala:205)
> at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:119)
> at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)
> res9: org.apache.spark.sql.DataFrame = []
--
This message was sent by Atlassian JIRA
(v7.6.3#76005)