You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Patrick Wendell (JIRA)" <ji...@apache.org> on 2014/09/04 04:00:54 UTC

[jira] [Updated] (SPARK-3349) [Spark SQL] Incorrect partitioning after LIMIT operator

     [ https://issues.apache.org/jira/browse/SPARK-3349?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Patrick Wendell updated SPARK-3349:
-----------------------------------
    Component/s: SQL

> [Spark SQL] Incorrect partitioning after LIMIT operator
> -------------------------------------------------------
>
>                 Key: SPARK-3349
>                 URL: https://issues.apache.org/jira/browse/SPARK-3349
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>            Reporter: Eric Liang
>
> Reproduced by the following example:
> import org.apache.spark.sql.catalyst.plans.Inner
> import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
> import org.apache.spark.sql.catalyst.expressions._
> val series = sql("select distinct year from sales order by year asc limit 10")
> val results = sql("select * from sales")
> series.registerTempTable("series")
> results.registerTempTable("results")
> sql("select * from results inner join series where results.year = series.year").count
> -----------
> java.lang.IllegalArgumentException: Can't zip RDDs with unequal numbers of partitions
> 	at org.apache.spark.rdd.ZippedPartitionsBaseRDD.getPartitions(ZippedPartitionsRDD.scala:56)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:204)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:202)
> 	at scala.Option.getOrElse(Option.scala:120)
> 	at org.apache.spark.rdd.RDD.partitions(RDD.scala:202)
> 	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:204)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:202)
> 	at scala.Option.getOrElse(Option.scala:120)
> 	at org.apache.spark.rdd.RDD.partitions(RDD.scala:202)
> 	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:204)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:202)
> 	at scala.Option.getOrElse(Option.scala:120)
> 	at org.apache.spark.rdd.RDD.partitions(RDD.scala:202)
> 	at org.apache.spark.rdd.MapPartitionsRDD.getPartitions(MapPartitionsRDD.scala:32)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:204)
> 	at org.apache.spark.rdd.RDD$$anonfun$partitions$2.apply(RDD.scala:202)
> 	at scala.Option.getOrElse(Option.scala:120)
> 	at org.apache.spark.rdd.RDD.partitions(RDD.scala:202)
> 	at org.apache.spark.ShuffleDependency.<init>(Dependency.scala:79)
> 	at org.apache.spark.rdd.ShuffledRDD.getDependencies(ShuffledRDD.scala:80)
> 	at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:191)
> 	at org.apache.spark.rdd.RDD$$anonfun$dependencies$2.apply(RDD.scala:189)
> 	at scala.Option.getOrElse(Option.scala:120)
> 	at org.apache.spark.rdd.RDD.dependencies(RDD.scala:189)
> 	at org.apache.spark.scheduler.DAGScheduler.visit$1(DAGScheduler.scala:298)
> 	at org.apache.spark.scheduler.DAGScheduler.getParentStages(DAGScheduler.scala:310)
> 	at org.apache.spark.scheduler.DAGScheduler.newStage(DAGScheduler.scala:246)
> 	at org.apache.spark.scheduler.DAGScheduler.handleJobSubmitted(DAGScheduler.scala:723)
> 	at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1333)
> 	at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
> 	at akka.actor.ActorCell.invoke(ActorCell.scala:456)
> 	at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
> 	at akka.dispatch.Mailbox.run(Mailbox.scala:219)
> 	at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
> 	at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
> 	at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
> 	at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
> 	at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org