You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Jose Torres (JIRA)" <ji...@apache.org> on 2018/02/27 01:57:00 UTC

[jira] [Comment Edited] (SPARK-23504) Flaky test: RateSourceV2Suite.basic microbatch execution

    [ https://issues.apache.org/jira/browse/SPARK-23504?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16377897#comment-16377897 ] 

Jose Torres edited comment on SPARK-23504 at 2/27/18 1:56 AM:
--------------------------------------------------------------

I noticed this happening too, but I've been unable to reproduce it locally after hundreds of runs.


was (Author: joseph.torres):
I've 

> Flaky test: RateSourceV2Suite.basic microbatch execution
> --------------------------------------------------------
>
>                 Key: SPARK-23504
>                 URL: https://issues.apache.org/jira/browse/SPARK-23504
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL, Tests
>    Affects Versions: 2.4.0
>            Reporter: Marcelo Vanzin
>            Priority: Major
>
> Seen on an unrelated change:
> https://amplab.cs.berkeley.edu/jenkins/job/SparkPullRequestBuilder/87635/testReport/org.apache.spark.sql.execution.streaming/RateSourceV2Suite/basic_microbatch_execution/
> {noformat}
> Error Message
> org.scalatest.exceptions.TestFailedException:   == Results == !== Correct Answer - 10 ==     == Spark Answer - 0 == !struct<_1:timestamp,_2:int>   struct<> ![1969-12-31 16:00:00.0,0]      ![1969-12-31 16:00:00.1,1]      ![1969-12-31 16:00:00.2,2]      ![1969-12-31 16:00:00.3,3]      ![1969-12-31 16:00:00.4,4]      ![1969-12-31 16:00:00.5,5]      ![1969-12-31 16:00:00.6,6]      ![1969-12-31 16:00:00.7,7]      ![1969-12-31 16:00:00.8,8]      ![1969-12-31 16:00:00.9,9]            == Progress ==    AdvanceRateManualClock(1) => CheckLastBatch: [1969-12-31 16:00:00.0,0],[1969-12-31 16:00:00.1,1],[1969-12-31 16:00:00.2,2],[1969-12-31 16:00:00.3,3],[1969-12-31 16:00:00.4,4],[1969-12-31 16:00:00.5,5],[1969-12-31 16:00:00.6,6],[1969-12-31 16:00:00.7,7],[1969-12-31 16:00:00.8,8],[1969-12-31 16:00:00.9,9]    StopStream    StartStream(ProcessingTime(0),org.apache.spark.util.SystemClock@22bc97a,Map(),null)    AdvanceRateManualClock(2)    CheckLastBatch: [1969-12-31 16:00:01.0,10],[1969-12-31 16:00:01.1,11],[1969-12-31 16:00:01.2,12],[1969-12-31 16:00:01.3,13],[1969-12-31 16:00:01.4,14],[1969-12-31 16:00:01.5,15],[1969-12-31 16:00:01.6,16],[1969-12-31 16:00:01.7,17],[1969-12-31 16:00:01.8,18],[1969-12-31 16:00:01.9,19]  == Stream == Output Mode: Append Stream state: {org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292: {"0":{"value":-1,"runTimeMs":0}}} Thread state: alive Thread stack trace: sun.misc.Unsafe.park(Native Method) java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:836) java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:997) java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1304) scala.concurrent.impl.Promise$DefaultPromise.tryAwait(Promise.scala:202) scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:218) scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:153) org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:222) org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:633) org.apache.spark.SparkContext.runJob(SparkContext.scala:2030) org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.doExecute(WriteToDataSourceV2.scala:84) org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131) org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127) org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155) org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152) org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127) org.apache.spark.sql.execution.SparkPlan.getByteArrayRdd(SparkPlan.scala:247) org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:294) org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272) org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2722) org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2722) org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253) org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252) org.apache.spark.sql.Dataset.collect(Dataset.scala:2722) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch$3$$anonfun$apply$18.apply(MicroBatchExecution.scala:493) org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch$3.apply(MicroBatchExecution.scala:488) org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:271) org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58) org.apache.spark.sql.execution.streaming.MicroBatchExecution.org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch(MicroBatchExecution.scala:487) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply$mcV$sp(MicroBatchExecution.scala:143) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:131) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:131) org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:271) org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58) org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1.apply$mcZ$sp(MicroBatchExecution.scala:131) org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:56) org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:127) org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279) org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)   == Sink == 0:    == Plan == == Parsed Logical Plan == WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778 +- Project [timestamp#421856 AS timestamp#421823, value#421857L AS value#421824L]    +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292  == Analyzed Logical Plan == WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778 +- Project [timestamp#421856 AS timestamp#421823, value#421857L AS value#421824L]    +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292  == Optimized Logical Plan == WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778 +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292  == Physical Plan == WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778 +- *(1) DataSourceV2Scan [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292                    
> Stacktrace
> sbt.ForkMain$ForkError: org.scalatest.exceptions.TestFailedException: 
> == Results ==
> !== Correct Answer - 10 ==     == Spark Answer - 0 ==
> !struct<_1:timestamp,_2:int>   struct<>
> ![1969-12-31 16:00:00.0,0]     
> ![1969-12-31 16:00:00.1,1]     
> ![1969-12-31 16:00:00.2,2]     
> ![1969-12-31 16:00:00.3,3]     
> ![1969-12-31 16:00:00.4,4]     
> ![1969-12-31 16:00:00.5,5]     
> ![1969-12-31 16:00:00.6,6]     
> ![1969-12-31 16:00:00.7,7]     
> ![1969-12-31 16:00:00.8,8]     
> ![1969-12-31 16:00:00.9,9]     
>     
> == Progress ==
>    AdvanceRateManualClock(1)
> => CheckLastBatch: [1969-12-31 16:00:00.0,0],[1969-12-31 16:00:00.1,1],[1969-12-31 16:00:00.2,2],[1969-12-31 16:00:00.3,3],[1969-12-31 16:00:00.4,4],[1969-12-31 16:00:00.5,5],[1969-12-31 16:00:00.6,6],[1969-12-31 16:00:00.7,7],[1969-12-31 16:00:00.8,8],[1969-12-31 16:00:00.9,9]
>    StopStream
>    StartStream(ProcessingTime(0),org.apache.spark.util.SystemClock@22bc97a,Map(),null)
>    AdvanceRateManualClock(2)
>    CheckLastBatch: [1969-12-31 16:00:01.0,10],[1969-12-31 16:00:01.1,11],[1969-12-31 16:00:01.2,12],[1969-12-31 16:00:01.3,13],[1969-12-31 16:00:01.4,14],[1969-12-31 16:00:01.5,15],[1969-12-31 16:00:01.6,16],[1969-12-31 16:00:01.7,17],[1969-12-31 16:00:01.8,18],[1969-12-31 16:00:01.9,19]
> == Stream ==
> Output Mode: Append
> Stream state: {org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292: {"0":{"value":-1,"runTimeMs":0}}}
> Thread state: alive
> Thread stack trace: sun.misc.Unsafe.park(Native Method)
> java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
> java.util.concurrent.locks.AbstractQueuedSynchronizer.parkAndCheckInterrupt(AbstractQueuedSynchronizer.java:836)
> java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedInterruptibly(AbstractQueuedSynchronizer.java:997)
> java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1304)
> scala.concurrent.impl.Promise$DefaultPromise.tryAwait(Promise.scala:202)
> scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:218)
> scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:153)
> org.apache.spark.util.ThreadUtils$.awaitReady(ThreadUtils.scala:222)
> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:633)
> org.apache.spark.SparkContext.runJob(SparkContext.scala:2030)
> org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2Exec.doExecute(WriteToDataSourceV2.scala:84)
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:131)
> org.apache.spark.sql.execution.SparkPlan$$anonfun$execute$1.apply(SparkPlan.scala:127)
> org.apache.spark.sql.execution.SparkPlan$$anonfun$executeQuery$1.apply(SparkPlan.scala:155)
> org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:152)
> org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:127)
> org.apache.spark.sql.execution.SparkPlan.getByteArrayRdd(SparkPlan.scala:247)
> org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:294)
> org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:3272)
> org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2722)
> org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2722)
> org.apache.spark.sql.Dataset$$anonfun$52.apply(Dataset.scala:3253)
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
> org.apache.spark.sql.Dataset.withAction(Dataset.scala:3252)
> org.apache.spark.sql.Dataset.collect(Dataset.scala:2722)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch$3$$anonfun$apply$18.apply(MicroBatchExecution.scala:493)
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:77)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch$3.apply(MicroBatchExecution.scala:488)
> org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:271)
> org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution.org$apache$spark$sql$execution$streaming$MicroBatchExecution$$runBatch(MicroBatchExecution.scala:487)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply$mcV$sp(MicroBatchExecution.scala:143)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:131)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1$$anonfun$apply$mcZ$sp$1.apply(MicroBatchExecution.scala:131)
> org.apache.spark.sql.execution.streaming.ProgressReporter$class.reportTimeTaken(ProgressReporter.scala:271)
> org.apache.spark.sql.execution.streaming.StreamExecution.reportTimeTaken(StreamExecution.scala:58)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution$$anonfun$runActivatedStream$1.apply$mcZ$sp(MicroBatchExecution.scala:131)
> org.apache.spark.sql.execution.streaming.ProcessingTimeExecutor.execute(TriggerExecutor.scala:56)
> org.apache.spark.sql.execution.streaming.MicroBatchExecution.runActivatedStream(MicroBatchExecution.scala:127)
> org.apache.spark.sql.execution.streaming.StreamExecution.org$apache$spark$sql$execution$streaming$StreamExecution$$runStream(StreamExecution.scala:279)
> org.apache.spark.sql.execution.streaming.StreamExecution$$anon$1.run(StreamExecution.scala:189)
> == Sink ==
> 0: 
> == Plan ==
> == Parsed Logical Plan ==
> WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778
> +- Project [timestamp#421856 AS timestamp#421823, value#421857L AS value#421824L]
>    +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292
> == Analyzed Logical Plan ==
> WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778
> +- Project [timestamp#421856 AS timestamp#421823, value#421857L AS value#421824L]
>    +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292
> == Optimized Logical Plan ==
> WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778
> +- StreamingDataSourceV2Relation [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292
> == Physical Plan ==
> WriteToDataSourceV2 org.apache.spark.sql.execution.streaming.sources.MicroBatchWriter@36f7e778
> +- *(1) DataSourceV2Scan [timestamp#421856, value#421857L], org.apache.spark.sql.execution.streaming.sources.RateStreamMicroBatchReader@75b88292
>          
>          
> 	at org.scalatest.Assertions$class.newAssertionFailedException(Assertions.scala:528)
> 	at org.scalatest.FunSuite.newAssertionFailedException(FunSuite.scala:1560)
> 	at org.scalatest.Assertions$class.fail(Assertions.scala:1089)
> 	at org.scalatest.FunSuite.fail(FunSuite.scala:1560)
> 	at org.apache.spark.sql.streaming.StreamTest$class.failTest$1(StreamTest.scala:430)
> 	at org.apache.spark.sql.streaming.StreamTest$$anonfun$executeAction$1$18.apply(StreamTest.scala:669)
> 	at org.apache.spark.sql.streaming.StreamTest$$anonfun$executeAction$1$18.apply(StreamTest.scala:669)
> 	at scala.Option.foreach(Option.scala:257)
> 	at org.apache.spark.sql.streaming.StreamTest$class.executeAction$1(StreamTest.scala:668)
> 	at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:704)
> 	at org.apache.spark.sql.streaming.StreamTest$$anonfun$liftedTree1$1$1.apply(StreamTest.scala:693)
> 	at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> 	at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> 	at org.apache.spark.sql.streaming.StreamTest$class.liftedTree1$1(StreamTest.scala:693)
> 	at org.apache.spark.sql.streaming.StreamTest$class.testStream(StreamTest.scala:692)
> 	at org.apache.spark.sql.execution.streaming.RateSourceV2Suite.testStream(RateSourceV2Suite.scala:34)
> 	at org.apache.spark.sql.execution.streaming.RateSourceV2Suite$$anonfun$1.apply$mcV$sp(RateSourceV2Suite.scala:66)
> 	at org.apache.spark.sql.execution.streaming.RateSourceV2Suite$$anonfun$1.apply(RateSourceV2Suite.scala:59)
> 	at org.apache.spark.sql.execution.streaming.RateSourceV2Suite$$anonfun$1.apply(RateSourceV2Suite.scala:59)
> {noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org