You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Vlad Frolov (JIRA)" <ji...@apache.org> on 2014/06/18 00:57:01 UTC
[jira] [Updated] (SPARK-2172) PySpark cannot import mllib modules
in YARN-client mode
[ https://issues.apache.org/jira/browse/SPARK-2172?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Vlad Frolov updated SPARK-2172:
-------------------------------
Description:
Here is the simple reproduce code:
{noformat}
$ HADOOP_CONF_DIR=/etc/hadoop/conf MASTER=yarn-client ./bin/pyspark
{noformat}
{code:title=issue.py|borderStyle=solid}
>>> from pyspark.mllib.regression import LabeledPoint
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).count()
{code}
Note: The same issue occurs with .collect() instead of .count()
{code:title=TraceBack|borderStyle=solid}
Py4JJavaError: An error occurred while calling o110.collect.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 8.0:0 failed 4 times, most recent failure: Exception failure in TID 52 on host ares: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/worker.py", line 73, in main
command = pickleSer._read_with_length(infile)
File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/serializers.py", line 146, in _read_with_length
return self.loads(obj)
ImportError: No module named mllib.regression
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:115)
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:145)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:78)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:111)
org.apache.spark.scheduler.Task.run(Task.scala:51)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:187)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1207)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
{code}
However, this code works as expected:
{code:title=noissue.py|borderStyle=solid}
>>> from pyspark.mllib.regression import LabeledPoint
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).first()
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).take(3)
{code}
was:
Here is the simple reproduce code:
{code:title=issue.py|borderStyle=solid}
>>> from pyspark.mllib.regression import LabeledPoint
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).count()
{code}
Note: The same issue occurs with .collect() instead of .count()
{code:title=TraceBack|borderStyle=solid}
Py4JJavaError: An error occurred while calling o110.collect.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 8.0:0 failed 4 times, most recent failure: Exception failure in TID 52 on host ares: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/worker.py", line 73, in main
command = pickleSer._read_with_length(infile)
File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/serializers.py", line 146, in _read_with_length
return self.loads(obj)
ImportError: No module named mllib.regression
org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:115)
org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:145)
org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:78)
org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:111)
org.apache.spark.scheduler.Task.run(Task.scala:51)
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:187)
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
at scala.Option.foreach(Option.scala:236)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:633)
at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1207)
at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
at akka.actor.ActorCell.invoke(ActorCell.scala:456)
at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
at akka.dispatch.Mailbox.run(Mailbox.scala:219)
at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
{code}
However, this code works as expected:
{code:title=noissue.py|borderStyle=solid}
>>> from pyspark.mllib.regression import LabeledPoint
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).first()
>>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).take(3)
{code}
> PySpark cannot import mllib modules in YARN-client mode
> -------------------------------------------------------
>
> Key: SPARK-2172
> URL: https://issues.apache.org/jira/browse/SPARK-2172
> Project: Spark
> Issue Type: Bug
> Components: MLlib, PySpark, Spark Core, YARN
> Affects Versions: 1.0.0, 1.1.0
> Environment: Ubuntu 14.04
> Java 7
> Python 2.7
> CDH 5.0.2 (Hadoop 2.3.0): HDFS, YARN
> Spark 1.0.0 and git master
> Reporter: Vlad Frolov
> Labels: mllib, python
>
> Here is the simple reproduce code:
> {noformat}
> $ HADOOP_CONF_DIR=/etc/hadoop/conf MASTER=yarn-client ./bin/pyspark
> {noformat}
> {code:title=issue.py|borderStyle=solid}
> >>> from pyspark.mllib.regression import LabeledPoint
> >>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).count()
> {code}
> Note: The same issue occurs with .collect() instead of .count()
> {code:title=TraceBack|borderStyle=solid}
> Py4JJavaError: An error occurred while calling o110.collect.
> : org.apache.spark.SparkException: Job aborted due to stage failure: Task 8.0:0 failed 4 times, most recent failure: Exception failure in TID 52 on host ares: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
> File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/worker.py", line 73, in main
> command = pickleSer._read_with_length(infile)
> File "/mnt/storage/bigisle/yarn/1/yarn/local/usercache/blb/filecache/18/spark-assembly-1.0.0-hadoop2.2.0.jar/pyspark/serializers.py", line 146, in _read_with_length
> return self.loads(obj)
> ImportError: No module named mllib.regression
> org.apache.spark.api.python.PythonRDD$$anon$1.read(PythonRDD.scala:115)
> org.apache.spark.api.python.PythonRDD$$anon$1.<init>(PythonRDD.scala:145)
> org.apache.spark.api.python.PythonRDD.compute(PythonRDD.scala:78)
> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:262)
> org.apache.spark.rdd.RDD.iterator(RDD.scala:229)
> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:111)
> org.apache.spark.scheduler.Task.run(Task.scala:51)
> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:187)
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
> at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1033)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1017)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1015)
> at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
> at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1015)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:633)
> at scala.Option.foreach(Option.scala:236)
> at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:633)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessActor$$anonfun$receive$2.applyOrElse(DAGScheduler.scala:1207)
> at akka.actor.ActorCell.receiveMessage(ActorCell.scala:498)
> at akka.actor.ActorCell.invoke(ActorCell.scala:456)
> at akka.dispatch.Mailbox.processMailbox(Mailbox.scala:237)
> at akka.dispatch.Mailbox.run(Mailbox.scala:219)
> at akka.dispatch.ForkJoinExecutorConfigurator$AkkaForkJoinTask.exec(AbstractDispatcher.scala:386)
> at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
> at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
> at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
> at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
> {code}
> However, this code works as expected:
> {code:title=noissue.py|borderStyle=solid}
> >>> from pyspark.mllib.regression import LabeledPoint
> >>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).first()
> >>> sc.parallelize([1,2,3]).map(lambda x: LabeledPoint(1, [2])).take(3)
> {code}
--
This message was sent by Atlassian JIRA
(v6.2#6252)