You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Michael Armbrust (JIRA)" <ji...@apache.org> on 2015/08/11 20:22:48 UTC

[jira] [Created] (SPARK-9831) TPCDS Q73 Fails

Michael Armbrust created SPARK-9831:
---------------------------------------

             Summary: TPCDS Q73 Fails
                 Key: SPARK-9831
                 URL: https://issues.apache.org/jira/browse/SPARK-9831
             Project: Spark
          Issue Type: Sub-task
          Components: SQL
            Reporter: Michael Armbrust
            Priority: Blocker


{code}
select c_last_name
      ,c_first_name
      ,c_salutation
      ,c_preferred_cust_flag
      ,ss_ticket_number
      ,cnt from
        (select ss_ticket_number
          ,ss_customer_sk
          ,count(*) cnt
          from store_sales
          JOIN date_dim ON store_sales.ss_sold_date_sk = date_dim.d_date_sk
        JOIN store ON store_sales.ss_store_sk = store.s_store_sk
        JOIN household_demographics ON store_sales.ss_hdemo_sk = household_demographics.hd_demo_sk
        where
        date_dim.d_dom between 1 and 2
        and (household_demographics.hd_buy_potential = '>10000' or
    household_demographics.hd_buy_potential = 'unknown')
    and household_demographics.hd_vehicle_count > 0
    and case when household_demographics.hd_vehicle_count > 0 then
      household_demographics.hd_dep_count/ household_demographics.hd_vehicle_count else null end > 1
    and date_dim.d_year in (1998,1998+1,1998+2)
    and store.s_county in ('Williamson County','Williamson County','Williamson County','Williamson County')
    group by ss_ticket_number,ss_customer_sk) dj
    JOIN customer ON dj.ss_customer_sk = customer.c_customer_sk
    where
    cnt between 5 and 10
    order by cnt desc
{code}

{code}
Job aborted due to stage failure: Task 63 in stage 57.0 failed 4 times, most recent failure: Lost task 63.3 in stage 57.0 (TID 5103, 10.0.197.102): java.io.IOException: java.lang.IllegalArgumentException: Initial capacity must be greater than 0
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1264)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation.readExternal(HashedRelation.scala:280)
	at java.io.ObjectInputStream.readExternalData(ObjectInputStream.java:1837)
	at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:1796)
	at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1350)
	at java.io.ObjectInputStream.readObject(ObjectInputStream.java:370)
	at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:72)
	at org.apache.spark.broadcast.TorrentBroadcast$.unBlockifyObject(TorrentBroadcast.scala:217)
	at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$readBroadcastBlock$1.apply(TorrentBroadcast.scala:178)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1276)
	at org.apache.spark.broadcast.TorrentBroadcast.readBroadcastBlock(TorrentBroadcast.scala:165)
	at org.apache.spark.broadcast.TorrentBroadcast._value$lzycompute(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast._value(TorrentBroadcast.scala:64)
	at org.apache.spark.broadcast.TorrentBroadcast.getValue(TorrentBroadcast.scala:88)
	at org.apache.spark.broadcast.Broadcast.value(Broadcast.scala:70)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:91)
	at org.apache.spark.sql.execution.joins.BroadcastHashJoin$$anonfun$2.apply(BroadcastHashJoin.scala:90)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.RDD$$anonfun$mapPartitions$1$$anonfun$apply$17.apply(RDD.scala:706)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:297)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
	at org.apache.spark.scheduler.Task.run(Task.scala:88)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.IllegalArgumentException: Initial capacity must be greater than 0
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:185)
	at org.apache.spark.unsafe.map.BytesToBytesMap.<init>(BytesToBytesMap.java:203)
	at org.apache.spark.sql.execution.joins.UnsafeHashedRelation$$anonfun$readExternal$1.apply$mcV$sp(HashedRelation.scala:295)
	at org.apache.spark.util.Utils$.tryOrIOException(Utils.scala:1261)
	... 40 more
{code}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org