You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Will Chen (JIRA)" <ji...@apache.org> on 2015/06/02 12:13:19 UTC

[jira] [Created] (SPARK-8039) OOM when using DataFrame join operation

Will Chen created SPARK-8039:
--------------------------------

             Summary: OOM when using DataFrame join operation
                 Key: SPARK-8039
                 URL: https://issues.apache.org/jira/browse/SPARK-8039
             Project: Spark
          Issue Type: Bug
          Components: Spark Submit
    Affects Versions: 1.3.0
            Reporter: Will Chen


Code:
val sumRdd = sqlContext.sql("select d.id,changeType, sum(p.pointsNum) as total from " +
      "app_driver_point_change p join app_driver_points dp join app_user_basic d join app_user_register_log reg " +
      "on reg.add_time between \"2015-05-30\" and \"2015-06-01\" and  p.pointId = dp.Id and dp.driverId = d.id" +
      " and reg.id = d.id group by d.id, changeType")

    /*
     * [2135031,DRIVER_ADD_CAR_LENGTH,5]
     * StructType(StructField(id,LongType,false), StructField(changeType,StringType,true), StructField(total,LongType,true))
     */

    sumRdd.registerTempTable("sum_rdd")

    val re = sumRdd.as("sr").join(app_user_basic.as("ub"), $"sr.id" === $"ub.id")
      .select("sr.*", "ub.phone").as("d")
      .join(app_vehicles.as("av"), $"d.id" === $"av.belong_uid", "left")
      .selectExpr("d.id", "d.phone","av.type as car_type", "av.length as car_length","av.capacity as car_capacity",
        "av.number as car_number", "av.license_auth as car_auth").as("tmp")
      .join(app_user_info.as("ui"))
      .select("tmp.*", "ui.id_card", "ui.id_card_auth").as("tmp") // driver + car
      .join(getCarAreaDF(sc, sqlContext), $"tmp.car_number".startsWith($"car_num"), "left")
      .select("tmp.*", "car_area").as("tmp")
      .join(getMobileAreaDF(sc, sqlContext).as("ma"), $"tmp.phone".startsWith($"ma.phonePrefix"), "left")
      .select("tmp.*", "ma.phone_area") // driver + car + driver_area + car)_area


There will be tow kinds of OOM exception thrown by this code... Does the join action invoked in distributed spark? I wonder if the broadcast operation delivered too much data...How should I solve this problen
Exception:
15/06/02 18:02:44 WARN TaskSetManager: Lost task 17.0 in stage 12.0 (TID 431, eunke-dp-ana-005): java.lang.OutOfMemoryError: GC overhead limit exceeded
        at java.nio.ByteBuffer.wrap(ByteBuffer.java:369)
        at com.mysql.jdbc.StringUtils.toString(StringUtils.java:1871)
        at com.mysql.jdbc.ResultSetRow.getString(ResultSetRow.java:821)
        at com.mysql.jdbc.ByteArrayRow.getString(ByteArrayRow.java:70)
        at com.mysql.jdbc.ResultSetImpl.getStringInternal(ResultSetImpl.java:5816)
        at com.mysql.jdbc.ResultSetImpl.getString(ResultSetImpl.java:5693)
        at org.apache.spark.sql.jdbc.JDBCRDD$$anon$1.getNext(JDBCRDD.scala:346)
        at org.apache.spark.sql.jdbc.JDBCRDD$$anon$1.hasNext(JDBCRDD.scala:399)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
        at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
        at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
        at scala.collection.Iterator$class.foreach(Iterator.scala:727)
        at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
        at org.apache.spark.sql.execution.joins.BroadcastNestedLoopJoin$$anonfun$2.apply(BroadcastNestedLoopJoin.scala:80)
        at org.apache.spark.sql.execution.joins.BroadcastNestedLoopJoin$$anonfun$2.apply(BroadcastNestedLoopJoin.scala:71)
        at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
        at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

15/06/02 18:02:44 ERROR TaskSchedulerImpl: Lost executor 5 on eunke-dp-ana-005: remote Akka client disassociated
15/06/02 18:02:44 INFO TaskSetManager: Re-queueing tasks for 5 from TaskSet 12.0
15/06/02 18:02:44 WARN TaskSetManager: Lost task 30.0 in stage 12.0 (TID 450, eunke-dp-ana-005): ExecutorLostFailure (executor 5 lost)
15/06/02 18:02:44 WARN TaskSetManager: Lost task 8.0 in stage 12.0 (TID 422, eunke-dp-ana-005): ExecutorLostFailure (executor 5 lost)
15/06/02 18:02:44 WARN TaskSetManager: Lost task 26.0 in stage 12.0 (TID 440, eunke-dp-ana-005): ExecutorLostFailure (executor 5 lost)
15/06/02 18:02:44 INFO DAGScheduler: Executor lost: 5 (epoch 26)
15/06/02 18:02:44 INFO AppClient$ClientActor: Executor updated: app-20150602175801-0477/5 is now EXITED (Command exited with code 52)
15/06/02 18:02:44 INFO SparkDeploySchedulerBackend: Executor app-20150602175801-0477/5 removed: Command exited with code 52
15/06/02 18:02:44 INFO BlockManagerMasterActor: Trying to remove executor 5 from BlockManagerMaster.
15/06/02 18:02:44 ERROR SparkDeploySchedulerBackend: Asked to remove non-existent executor 5
15/06/02 18:02:44 INFO BlockManagerMasterActor: Removing block manager BlockManagerId(5, eunke-dp-ana-005, 57455)
15/06/02 18:02:44 INFO BlockManagerMaster: Removed 5 successfully in removeExecutor
15/06/02 18:02:44 INFO AppClient$ClientActor: Executor added: app-20150602175801-0477/11 on worker-20150527101123-eunke-dp-ana-005-7078 (eunke-dp-ana-005:7078) with 3 cores
15/06/02 18:02:44 INFO SparkDeploySchedulerBackend: Granted executor ID app-20150602175801-0477/11 on hostPort eunke-dp-ana-005:7078 with 3 cores, 2.0 GB RAM
15/06/02 18:02:44 INFO Stage: Stage 8 is now unavailable on executor 5 (167/200, false)
15/06/02 18:02:44 INFO Stage: Stage 9 is now unavailable on executor 5 (141/200, false)
15/06/02 18:02:44 INFO AppClient$ClientActor: Executor updated: app-20150602175801-0477/11 is now RUNNING
15/06/02 18:02:44 INFO AppClient$ClientActor: Executor updated: app-20150602175801-0477/11 is now LOADING
15/06/02 18:02:45 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 9 to sparkExecutor@eunke-dp-ana-002:55246
15/06/02 18:02:45 INFO MapOutputTrackerMaster: Size of output statuses for shuffle 9 is 148 bytes
15/06/02 18:02:45 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 8 to sparkExecutor@eunke-dp-ana-002:55246
15/06/02 18:02:45 INFO MapOutputTrackerMaster: Size of output statuses for shuffle 8 is 170 bytes
15/06/02 18:02:45 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 3 to sparkExecutor@eunke-dp-ana-002:55246
15/06/02 18:02:45 INFO MapOutputTrackerMaster: Size of output statuses for shuffle 3 is 1140 bytes
15/06/02 18:02:45 INFO TaskSetManager: Starting task 26.1 in stage 12.0 (TID 451, eunke-dp-ana-002, PROCESS_LOCAL, 1978 bytes)
15/06/02 18:02:45 WARN TaskSetManager: Lost task 18.1 in stage 12.0 (TID 446, eunke-dp-ana-002): FetchFailed(null, shuffleId=3, mapId=-1, reduceId=18, message=
org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 3
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:385)
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:382)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
        at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
        at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
        at org.apache.spark.MapOutputTracker$.org$apache$spark$MapOutputTracker$$convertMapStatuses(MapOutputTracker.scala:381)
        at org.apache.spark.MapOutputTracker.getServerStatuses(MapOutputTracker.scala:177)
        at org.apache.spark.shuffle.hash.BlockStoreShuffleFetcher$.fetch(BlockStoreShuffleFetcher.scala:42)
        at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:40)
        at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:92)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.CartesianRDD.compute(CartesianRDD.scala:75)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

)
15/06/02 18:02:45 INFO DAGScheduler: Marking Stage 12 (count at BroadcastNestedLoopJoin.scala:114) as failed due to a fetch failure from Stage 9 (mapPartitions at Exchange.scala:64)
15/06/02 18:02:45 WARN TaskSetManager: Lost task 9.1 in stage 12.0 (TID 445, eunke-dp-ana-002): FetchFailed(null, shuffleId=3, mapId=-1, reduceId=9, message=
org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 3
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:385)
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:382)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
        at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
        at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
        at org.apache.spark.MapOutputTracker$.org$apache$spark$MapOutputTracker$$convertMapStatuses(MapOutputTracker.scala:381)
        at org.apache.spark.MapOutputTracker.getServerStatuses(MapOutputTracker.scala:177)
        at org.apache.spark.shuffle.hash.BlockStoreShuffleFetcher$.fetch(BlockStoreShuffleFetcher.scala:42)
        at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:40)
        at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:92)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.CartesianRDD.compute(CartesianRDD.scala:75)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

)
15/06/02 18:02:45 WARN TaskSetManager: Lost task 0.1 in stage 12.0 (TID 444, eunke-dp-ana-002): FetchFailed(null, shuffleId=3, mapId=-1, reduceId=0, message=
org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 3
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:385)
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:382)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
        at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
        at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
        at org.apache.spark.MapOutputTracker$.org$apache$spark$MapOutputTracker$$convertMapStatuses(MapOutputTracker.scala:381)
        at org.apache.spark.MapOutputTracker.getServerStatuses(MapOutputTracker.scala:177)
        at org.apache.spark.shuffle.hash.BlockStoreShuffleFetcher$.fetch(BlockStoreShuffleFetcher.scala:42)
        at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:40)
        at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:92)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.CartesianRDD.compute(CartesianRDD.scala:75)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

)
15/06/02 18:02:45 INFO DAGScheduler: Stage 12 (count at BroadcastNestedLoopJoin.scala:114) failed in 237.459 s
15/06/02 18:02:45 INFO DAGScheduler: Resubmitting Stage 9 (mapPartitions at Exchange.scala:64) and Stage 12 (count at BroadcastNestedLoopJoin.scala:114) due to fetch failure
15/06/02 18:02:45 WARN TaskSetManager: Lost task 26.1 in stage 12.0 (TID 451, eunke-dp-ana-002): FetchFailed(null, shuffleId=3, mapId=-1, reduceId=26, message=
org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 3
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:385)
        at org.apache.spark.MapOutputTracker$$anonfun$org$apache$spark$MapOutputTracker$$convertMapStatuses$1.apply(MapOutputTracker.scala:382)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:244)
        at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33)
        at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:108)
        at scala.collection.TraversableLike$class.map(TraversableLike.scala:244)
        at scala.collection.mutable.ArrayOps$ofRef.map(ArrayOps.scala:108)
        at org.apache.spark.MapOutputTracker$.org$apache$spark$MapOutputTracker$$convertMapStatuses(MapOutputTracker.scala:381)
        at org.apache.spark.MapOutputTracker.getServerStatuses(MapOutputTracker.scala:186)
        at org.apache.spark.shuffle.hash.BlockStoreShuffleFetcher$.fetch(BlockStoreShuffleFetcher.scala:42)
        at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:40)
        at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:92)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.ZippedPartitionsRDD2.compute(ZippedPartitionsRDD.scala:88)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.CartesianRDD.compute(CartesianRDD.scala:75)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)

)
15/06/02 18:02:45 INFO DAGScheduler: Resubmitting failed stages
15/06/02 18:02:45 INFO DAGScheduler: Submitting Stage 8 (MapPartitionsRDD[60] at mapPartitions at Exchange.scala:64), which has no missing parents
15/06/02 18:02:45 INFO MemoryStore: ensureFreeSpace(9040) called with curMem=45430098, maxMem=278302556
15/06/02 18:02:45 INFO MemoryStore: Block broadcast_19 stored as values in memory (estimated size 8.8 KB, free 222.1 MB)
15/06/02 18:02:45 INFO MemoryStore: ensureFreeSpace(4506) called with curMem=45439138, maxMem=278302556
15/06/02 18:02:45 INFO MemoryStore: Block broadcast_19_piece0 stored as bytes in memory (estimated size 4.4 KB, free 222.1 MB)
15/06/02 18:02:45 INFO BlockManagerInfo: Added broadcast_19_piece0 in memory on eunke-dp-ana-001:48259 (size: 4.4 KB, free: 261.7 MB)
15/06/02 18:02:45 INFO BlockManagerMaster: Updated info of block broadcast_19_piece0
15/06/02 18:02:45 INFO SparkContext: Created broadcast 19 from broadcast at DAGScheduler.scala:839
15/06/02 18:02:45 INFO DAGScheduler: Submitting 33 missing tasks from Stage 8 (MapPartitionsRDD[60] at mapPartitions at Exchange.scala:64)
15/06/02 18:02:45 INFO TaskSchedulerImpl: Adding task set 8.1 with 33 tasks
15/06/02 18:02:45 INFO TaskSetManager: Starting task 0.0 in stage 8.1 (TID 452, eunke-dp-ana-002, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:45 INFO TaskSetManager: Starting task 1.0 in stage 8.1 (TID 453, eunke-dp-ana-002, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:45 INFO TaskSetManager: Starting task 2.0 in stage 8.1 (TID 454, eunke-dp-ana-002, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:45 INFO BlockManagerInfo: Added broadcast_19_piece0 in memory on eunke-dp-ana-002:55132 (size: 4.4 KB, free: 1060.3 MB)
15/06/02 18:02:45 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 6 to sparkExecutor@eunke-dp-ana-002:55246
15/06/02 18:02:45 INFO MapOutputTrackerMaster: Size of output statuses for shuffle 6 is 148 bytes
15/06/02 18:02:45 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 5 to sparkExecutor@eunke-dp-ana-002:55246
15/06/02 18:02:45 INFO MapOutputTrackerMaster: Size of output statuses for shuffle 5 is 218 bytes
15/06/02 18:02:47 INFO SparkDeploySchedulerBackend: Registered executor: Actor[akka.tcp://sparkExecutor@eunke-dp-ana-005:35440/user/Executor#292835265] with ID 11
15/06/02 18:02:47 INFO TaskSetManager: Starting task 3.0 in stage 8.1 (TID 455, eunke-dp-ana-005, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:47 INFO TaskSetManager: Starting task 4.0 in stage 8.1 (TID 456, eunke-dp-ana-005, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:47 INFO TaskSetManager: Starting task 5.0 in stage 8.1 (TID 457, eunke-dp-ana-005, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:02:47 INFO BlockManagerMasterActor: Registering block manager eunke-dp-ana-005:37342 with 1060.3 MB RAM, BlockManagerId(11, eunke-dp-ana-005, 37342)
15/06/02 18:02:48 INFO BlockManagerInfo: Added broadcast_19_piece0 in memory on eunke-dp-ana-005:37342 (size: 4.4 KB, free: 1060.3 MB)
15/06/02 18:02:49 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 6 to sparkExecutor@eunke-dp-ana-005:35440
15/06/02 18:02:49 INFO MapOutputTrackerMasterActor: Asked to send map output locations for shuffle 5 to sparkExecutor@eunke-dp-ana-005:35440
15/06/02 18:03:00 WARN BlockManagerMasterActor: Removing BlockManager BlockManagerId(2, eunke-dp-ana-003, 51001) with no recent heart beats: 167084ms exceeds 120000ms
15/06/02 18:03:00 WARN BlockManagerMasterActor: Removing BlockManager BlockManagerId(0, eunke-dp-ana-010, 34792) with no recent heart beats: 154521ms exceeds 120000ms
15/06/02 18:03:00 INFO BlockManagerMasterActor: Removing block manager BlockManagerId(0, eunke-dp-ana-010, 34792)
15/06/02 18:03:00 INFO BlockManagerMasterActor: Removing block manager BlockManagerId(2, eunke-dp-ana-003, 51001)
15/06/02 18:03:18 INFO TaskSetManager: Starting task 6.0 in stage 8.1 (TID 458, eunke-dp-ana-007, PROCESS_LOCAL, 1600 bytes)
15/06/02 18:03:18 WARN TaskSetManager: Lost task 13.0 in stage 12.0 (TID 427, eunke-dp-ana-007): java.lang.OutOfMemoryError: GC overhead limit exceeded
        at org.apache.spark.sql.catalyst.expressions.SpecificMutableRow.copy(SpecificMutableRow.scala:230)
        at org.apache.spark.sql.execution.joins.CartesianProduct$$anonfun$2.apply(CartesianProduct.scala:33)
        at org.apache.spark.sql.execution.joins.CartesianProduct$$anonfun$2.apply(CartesianProduct.scala:33)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
        at scala.collection.Iterator$$anon$13.next(Iterator.scala:372)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
        at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
        at scala.collection.Iterator$class.foreach(Iterator.scala:727)
        at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
        at org.apache.spark.sql.execution.joins.BroadcastNestedLoopJoin$$anonfun$2.apply(BroadcastNestedLoopJoin.scala:80)
        at org.apache.spark.sql.execution.joins.BroadcastNestedLoopJoin$$anonfun$2.apply(BroadcastNestedLoopJoin.scala:71)
        at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
        at org.apache.spark.rdd.RDD$$anonfun$14.apply(RDD.scala:634)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
        at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
        at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
        at org.apache.spark.scheduler.Task.run(Task.scala:64)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:203)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
        at java.lang.Thread.run(Thread.java:745)




--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org