You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sean Owen (JIRA)" <ji...@apache.org> on 2019/03/01 20:58:00 UTC

[jira] [Updated] (SPARK-27011) reset command fails after cache table

     [ https://issues.apache.org/jira/browse/SPARK-27011?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Sean Owen updated SPARK-27011:
------------------------------
    Priority: Minor  (was: Critical)

> reset command fails after cache table
> -------------------------------------
>
>                 Key: SPARK-27011
>                 URL: https://issues.apache.org/jira/browse/SPARK-27011
>             Project: Spark
>          Issue Type: Bug
>          Components: Spark Core, SQL
>    Affects Versions: 2.3.3, 2.4.0, 3.0.0
>            Reporter: Ajith S
>            Priority: Minor
>
>  
> h3. Commands to reproduce 
> spark-sql> create table abcde ( a int);
> spark-sql> reset; // can work success
> spark-sql> cache table abcde;
> spark-sql> reset; //fails with exception
> h3. Below is the stack
> {{org.apache.spark.sql.catalyst.errors.package$TreeNodeException: makeCopy, tree:}}
> {{ResetCommand$}}{{at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:56)}}
> {{ at org.apache.spark.sql.catalyst.trees.TreeNode.makeCopy(TreeNode.scala:379)}}
> {{ at org.apache.spark.sql.catalyst.plans.QueryPlan.canonicalized$lzycompute(QueryPlan.scala:216)}}
> {{ at org.apache.spark.sql.catalyst.plans.QueryPlan.canonicalized(QueryPlan.scala:211)}}
> {{ at org.apache.spark.sql.catalyst.plans.QueryPlan.sameResult(QueryPlan.scala:259)}}
> {{ at org.apache.spark.sql.execution.CacheManager.$anonfun$lookupCachedData$3(CacheManager.scala:236)}}
> {{ at org.apache.spark.sql.execution.CacheManager.$anonfun$lookupCachedData$3$adapted(CacheManager.scala:236)}}
> {{ at scala.collection.Iterator.find(Iterator.scala:993)}}
> {{ at scala.collection.Iterator.find$(Iterator.scala:990)}}
> {{ at scala.collection.AbstractIterator.find(Iterator.scala:1429)}}
> {{ at scala.collection.IterableLike.find(IterableLike.scala:81)}}
> {{ at scala.collection.IterableLike.find$(IterableLike.scala:80)}}
> {{ at scala.collection.AbstractIterable.find(Iterable.scala:56)}}
> {{ at org.apache.spark.sql.execution.CacheManager.$anonfun$lookupCachedData$2(CacheManager.scala:236)}}
> {{ at org.apache.spark.sql.execution.CacheManager.readLock(CacheManager.scala:59)}}
> {{ at org.apache.spark.sql.execution.CacheManager.lookupCachedData(CacheManager.scala:236)}}
> {{ at org.apache.spark.sql.execution.CacheManager$$anonfun$1.applyOrElse(CacheManager.scala:250)}}
> {{ at org.apache.spark.sql.execution.CacheManager$$anonfun$1.applyOrElse(CacheManager.scala:241)}}
> {{ at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$transformDown$1(TreeNode.scala:258)}}
> {{ at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:72)}}
> {{ at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:258)}}
> {{ at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.org$apache$spark$sql$catalyst$plans$logical$AnalysisHelper$$super$transformDown(LogicalPlan.scala:29)}}
> {{ at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown(AnalysisHelper.scala:149)}}
> {{ at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper.transformDown$(AnalysisHelper.scala:147)}}
> {{ at org.apache.spark.sql.catalyst.plans.logical.LogicalPlan.transformDown(LogicalPlan.scala:29)}}
> {{ at org.apache.spark.sql.execution.CacheManager.useCachedData(CacheManager.scala:241)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.withCachedData$lzycompute(QueryExecution.scala:68)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.withCachedData(QueryExecution.scala:65)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.$anonfun$optimizedPlan$1(QueryExecution.scala:72)}}
> {{ at org.apache.spark.sql.catalyst.QueryPlanningTracker.measurePhase(QueryPlanningTracker.scala:111)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.optimizedPlan$lzycompute(QueryExecution.scala:72)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.optimizedPlan(QueryExecution.scala:71)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.$anonfun$writePlans$4(QueryExecution.scala:139)}}
> {{ at org.apache.spark.sql.catalyst.plans.QueryPlan$.append(QueryPlan.scala:316)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.org$apache$spark$sql$execution$QueryExecution$$writePlans(QueryExecution.scala:139)}}
> {{ at org.apache.spark.sql.execution.QueryExecution.toString(QueryExecution.scala:146)}}
> {{ at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:82)}}
> {{ at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:147)}}
> {{ at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:74)}}
> {{ at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3346)}}
> {{ at org.apache.spark.sql.Dataset.<init>(Dataset.scala:203)}}
> {{ at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:88)}}
> {{ at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:656)}}
> {{ at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:685)}}
> {{ at org.apache.spark.sql.hive.thriftserver.SparkSQLDriver.run(SparkSQLDriver.scala:63)}}
> {{ at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.processCmd(SparkSQLCLIDriver.scala:372)}}
> {{ at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:376)}}
> {{ at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver$.main(SparkSQLCLIDriver.scala:275)}}
> {{ at org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver.main(SparkSQLCLIDriver.scala)}}
> {{ at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)}}
> {{ at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)}}
> {{ at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)}}
> {{ at java.lang.reflect.Method.invoke(Method.java:498)}}
> {{ at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)}}
> {{ at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:847)}}
> {{ at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:162)}}
> {{ at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:185)}}
> {{ at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:87)}}
> {{ at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:926)}}
> {{ at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:935)}}
> {{ at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)}}
> {{Caused by: java.lang.RuntimeException: No valid constructor for ResetCommand$}}
> {{ at scala.sys.package$.error(package.scala:30)}}
> {{ at org.apache.spark.sql.catalyst.trees.TreeNode.$anonfun$makeCopy$1(TreeNode.scala:383)}}
> {{ at org.apache.spark.sql.catalyst.errors.package$.attachTree(package.scala:52)}}
> {{ ... 60 more}}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org