You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Jungtaek Lim (Jira)" <ji...@apache.org> on 2020/01/03 12:53:00 UTC

[jira] [Commented] (SPARK-29135) Flaky Test: o.a.s.streaming.StreamingContextSuite.SPARK-18560 Receiver data should be deserialized properly

    [ https://issues.apache.org/jira/browse/SPARK-29135?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17007459#comment-17007459 ] 

Jungtaek Lim commented on SPARK-29135:
--------------------------------------

Another occurrence: [https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/116068/testReport/]

> Flaky Test: o.a.s.streaming.StreamingContextSuite.SPARK-18560 Receiver data should be deserialized properly
> -----------------------------------------------------------------------------------------------------------
>
>                 Key: SPARK-29135
>                 URL: https://issues.apache.org/jira/browse/SPARK-29135
>             Project: Spark
>          Issue Type: Bug
>          Components: DStreams, Tests
>    Affects Versions: 3.0.0
>            Reporter: Jungtaek Lim
>            Priority: Major
>
> [https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/110735/testReport/]
> {code:java}
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 85.0 failed 4 times, most recent failure: Lost task 2.3 in stage 85.0 (TID 205, amp-jenkins-worker-04.amp, executor 0): java.lang.Exception: Could not compute split, block input-0-1568696121600 of RDD 250 not found  at org.apache.spark.rdd.BlockRDD.compute(BlockRDD.scala:50)  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)  at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)  at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)  at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)  at org.apache.spark.rdd.UnionRDD.compute(UnionRDD.scala:105)  at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)  at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)  at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:94)  at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)  at org.apache.spark.scheduler.Task.run(Task.scala:127)  at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:449)  at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)  at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:452)  at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)  at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)  at java.lang.Thread.run(Thread.java:748)  Driver stacktrace: Stack Tracesbt.ForkMain$ForkError: org.apache.spark.SparkException: Job aborted due to stage failure: Task 2 in stage 85.0 failed 4 times, most recent failure: Lost task 2.3 in stage 85.0 (TID 205, amp-jenkins-worker-04.amp, executor 0): java.lang.Exception: Could not compute split, block input-0-1568696121600 of RDD 250 not found
> 	at org.apache.spark.rdd.BlockRDD.compute(BlockRDD.scala:50)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.rdd.UnionRDD.compute(UnionRDD.scala:105)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:94)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> 	at org.apache.spark.scheduler.Task.run(Task.scala:127)
> 	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:449)
> 	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:452)
> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> 	at java.lang.Thread.run(Thread.java:748)
> Driver stacktrace:
> 	at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:1958)
> 	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:1946)
> 	at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:1945)
> 	at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> 	at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> 	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> 	at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1945)
> 	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:945)
> 	at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:945)
> 	at scala.Option.foreach(Option.scala:274)
> 	at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:945)
> 	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2175)
> 	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2124)
> 	at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2113)
> 	at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> 	at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:747)
> 	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2079)
> 	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2100)
> 	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2119)
> 	at org.apache.spark.SparkContext.runJob(SparkContext.scala:2144)
> 	at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:982)
> 	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> 	at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> 	at org.apache.spark.rdd.RDD.withScope(RDD.scala:366)
> 	at org.apache.spark.rdd.RDD.collect(RDD.scala:981)
> 	at org.apache.spark.streaming.StreamingContextSuite.$anonfun$new$133(StreamingContextSuite.scala:841)
> 	at org.apache.spark.streaming.StreamingContextSuite.$anonfun$new$133$adapted(StreamingContextSuite.scala:839)
> 	at org.apache.spark.streaming.dstream.DStream.$anonfun$foreachRDD$2(DStream.scala:628)
> 	at org.apache.spark.streaming.dstream.DStream.$anonfun$foreachRDD$2$adapted(DStream.scala:628)
> 	at org.apache.spark.streaming.dstream.ForEachDStream.$anonfun$generateJob$2(ForEachDStream.scala:51)
> 	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> 	at org.apache.spark.streaming.dstream.DStream.createRDDWithLocalProperties(DStream.scala:416)
> 	at org.apache.spark.streaming.dstream.ForEachDStream.$anonfun$generateJob$1(ForEachDStream.scala:51)
> 	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> 	at scala.util.Try$.apply(Try.scala:213)
> 	at org.apache.spark.streaming.scheduler.Job.run(Job.scala:39)
> 	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.$anonfun$run$1(JobScheduler.scala:256)
> 	at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
> 	at scala.util.DynamicVariable.withValue(DynamicVariable.scala:62)
> 	at org.apache.spark.streaming.scheduler.JobScheduler$JobHandler.run(JobScheduler.scala:256)
> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> 	at java.lang.Thread.run(Thread.java:748)
> Caused by: sbt.ForkMain$ForkError: java.lang.Exception: Could not compute split, block input-0-1568696121600 of RDD 250 not found
> 	at org.apache.spark.rdd.BlockRDD.compute(BlockRDD.scala:50)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.rdd.UnionRDD.compute(UnionRDD.scala:105)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:327)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:291)
> 	at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:94)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:52)
> 	at org.apache.spark.scheduler.Task.run(Task.scala:127)
> 	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:449)
> 	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1377)
> 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:452)
> 	... 3 more {code}



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org