You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Sean R. Owen (Jira)" <ji...@apache.org> on 2020/12/07 15:50:00 UTC
[jira] [Resolved] (SPARK-33280) Spark 3.0 serialization issue
[ https://issues.apache.org/jira/browse/SPARK-33280?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Sean R. Owen resolved SPARK-33280.
----------------------------------
Resolution: Not A Problem
Agreed, you would have to ensure you don't carry around sc. Access it in your code without a val, or make it lazy + transient.
> Spark 3.0 serialization issue
> -----------------------------
>
> Key: SPARK-33280
> URL: https://issues.apache.org/jira/browse/SPARK-33280
> Project: Spark
> Issue Type: Bug
> Components: Spark Core
> Affects Versions: 3.0.1
> Environment: OS: MacOS Catalina 10.15.7
> +Environment 1:+
> Spark: spark-3.0.1-bin-hadoop2.7
> code compiled with : scala 2.12
> +Environment 2:+
> spark-2.4.7-bin-hadoop2.7
> code compiled with: scala 2.12
> +Environment 3 (This succeeds):+
> spark-2.4.7-bin-hadoop2.7
> code compiled with: scala 2.11
> Reproducible on YARN cluster with 3.0.1
> Reporter: Kiran Kumar Joseph
> Priority: Major
>
> This code which does a simple RDD map operation and saves the data fails on executor with a deserialization error.
> {noformat}
> package test.pkg
> import org.apache.spark.SparkContext
> import org.apache.spark.rdd.RDD
> trait Stage {
> val sc = SparkContext.getOrCreate()
> sc.setCheckpointDir(sc.getConf.get("spark.checkpoint.dir",
> "/tmp/checkpoints"))
> val app = new App(sc)
> }
> class App(sc: SparkContext) {
> def write(data: RDD[String]): Unit = {
> if (!data.isEmpty())
> data.saveAsTextFile("/tmp/test-output")
> }
> }
> object SampleStage extends Stage {
> def main(args: Array[String]): Unit = {
> val sampleList = Seq("a", "b", "c")
> writeList(sampleList)
> sc.stop()
> }
> def writeList(data: Seq[String]) = {
> val dataRDD = sc.parallelize(data).map(x => x.toLowerCase())
> app.write(dataRDD)
> }
> } {noformat}
>
> If I change the line
> {noformat}
> val dataRDD = sc.parallelize(data).map(x => x.toLowerCase()) {noformat}
> to
> {noformat}
> val dataRDD = sc.parallelize(data) {noformat}
> there is no exception and job succeeds
> Sparksubmit command:
> {noformat}
> $SPARK_HOME/bin/spark-submit --master spark://masterhost:port --conf spark.app.name=SampleStage --executor-memory 2g --num-executors 2 --class test.pkg.SampleStage --deploy-mode client --conf spark.driver.cores=4 sampleStage.jar {noformat}
>
> Executor stacktrace:
> {noformat}
> Driver stacktrace:
> 20/10/28 22:55:34 INFO DAGScheduler: Job 0 failed: isEmpty at App.scala:13, took 1.931078 s
> Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 4 times, most recent failure: Lost task 0.3 in stage 0.0 (TID 3, 192.168.86.20, executor 0): java.lang.NoClassDefFoundError: test.pkg.SampleStage$ (initialization failure)
> at java.lang.J9VMInternals.initializationAlreadyFailed(J9VMInternals.java:98)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at java.lang.invoke.SerializedLambda.readResolve(SerializedLambda.java:230)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at java.io.ObjectStreamClass.invokeReadResolve(ObjectStreamClass.java:1274)
> at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2258)
> at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1746)
> at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2472)
> at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2394)
> at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2247)
> at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1746)
> at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2472)
> at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2394)
> at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2247)
> at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1746)
> at java.io.ObjectInputStream.defaultReadFields(ObjectInputStream.java:2472)
> at java.io.ObjectInputStream.readSerialData(ObjectInputStream.java:2394)
> at java.io.ObjectInputStream.readOrdinaryObject(ObjectInputStream.java:2247)
> at java.io.ObjectInputStream.readObject0(ObjectInputStream.java:1746)
> at java.io.ObjectInputStream.readObject(ObjectInputStream.java:548)
> at java.io.ObjectInputStream.readObject(ObjectInputStream.java:458)
> at org.apache.spark.serializer.JavaDeserializationStream.readObject(JavaSerializer.scala:75)
> at org.apache.spark.serializer.JavaSerializerInstance.deserialize(JavaSerializer.scala:114)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:83)
> at org.apache.spark.scheduler.Task.run(Task.scala:123)
> at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:408)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:821)
> Caused by: org.apache.spark.SparkException: A master URL must be set in your configuration
> at org.apache.spark.SparkContext.<init>(SparkContext.scala:368)
> at org.apache.spark.SparkContext.<init>(SparkContext.scala:117)
> at org.apache.spark.SparkContext$.getOrCreate(SparkContext.scala:2544)
> at test.pkg.Stage.$init$(App.scala:6)
> at test.pkg.SampleStage$.<init>(App.scala:17)
> at test.pkg.SampleStage$.<clinit>(App.scala)
> ... 36 more
> {noformat}
> Driver stacktrace
> {noformat}
> at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1925)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1913)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1912)
> at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1912)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948)
> at scala.Option.foreach(Option.scala:257)
> at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
> at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1409)
> at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
> at org.apache.spark.rdd.RDD.take(RDD.scala:1382)
> at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply$mcZ$sp(RDD.scala:1517)
> at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply(RDD.scala:1517)
> at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply(RDD.scala:1517)
> at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
> at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
> at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
> at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1516)
> at test.pkg.App.write(App.scala:13)
> at test.pkg.SampleStage$.writeList(App.scala:25)
> at test.pkg.SampleStage$.main(App.scala:20)
> at test.pkg.SampleStage.main(App.scala)
> at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
> at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
> at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
> at java.lang.reflect.Method.invoke(Method.java:498)
> at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)
> at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:845)
> at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:161)
> at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:184)
> at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:86)
> at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:920)
> at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:929)
> at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala) {noformat}
--
This message was sent by Atlassian Jira
(v8.3.4#803005)
---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org