You are viewing a plain text version of this content. The canonical link for it is here.
Posted to users@zeppelin.apache.org by Adamantios Corais <ad...@gmail.com> on 2018/07/02 16:31:17 UTC

org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Hi,

I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
extracted, and started as follows: `./bin/zeppelin.sh`

Next, I tried a very simple example:

`spark.read.parquet("./bin/userdata1.parquet").show()`

Which unfortunately returns the following error. Note that the same example
works fine with the official docker version of Zeppelin (ver. 0.7.3). Any
ideas?

org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
> in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in stage
> 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
> at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
> at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
> at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
> at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
> at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
> at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
> at
> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
> at
> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
> at
> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
> at org.apache.spark.scheduler.Task.run(Task.scala:108)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
> at
> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at
> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> Driver stacktrace:
>   at org.apache.spark.scheduler.DAGScheduler.org
> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>   at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>   at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>   at
> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>   at
> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>   at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>   at
> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>   at scala.Option.foreach(Option.scala:257)
>   at
> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>   at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>   at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>   at
> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>   at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>   at
> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>   at
> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>   at org.apache.spark.sql.Dataset.org
> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>   at
> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>   ... 52 elided
> Caused by: java.lang.NoSuchMethodError:
> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>   at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>   at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>   at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at
> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>   at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>   at
> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>   at
> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>   at
> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>   at
> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>   at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>   at
> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>   ... 3 more

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Andrea Santurbano <sa...@gmail.com>.
Thanks Adamantios,
I created a Dockerfile in order to aoutomate the process, feel free to use
it:

https://gist.github.com/conker84/4ffc9a2f0125c808b4dfcf3b7d70b043



Il giorno gio 5 lug 2018 alle ore 13:00 Adamantios Corais <
adamantios.corais@gmail.com> ha scritto:

> Hi Andrea,
>
> The following workaround works for me (but maybe there are other
> alternatives too):
>
> - downloaded spark spark-2.3.1-bin-hadoop2.7
> - renamed the zeppelin-env.sh.template to zeppelin-env.sh
> - appended the following line in the above file: export
> SPARK_HOME=../../spark-2.3.1-bin-hadoop2.7/
>
> Hope this helps,
>
>
>
>
> *// **Adamantios Corais*
>
> On Thu, Jul 5, 2018 at 1:51 PM, Andrea Santurbano <sa...@gmail.com>
> wrote:
>
>> Thanks Jeff,
>> is there a workaround in order to make it work now?
>>
>> Il giorno gio 5 lug 2018 alle ore 12:42 Jeff Zhang <zj...@gmail.com> ha
>> scritto:
>>
>>>
>>> This is due to hadoop version used in embedded spark is 2.3 which is too
>>> lower. I created https://issues.apache.org/jira/browse/ZEPPELIN-3586 for
>>> this issue. Suppose it will be fixed in o.8.1
>>>
>>>
>>>
>>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 下午3:35写道:
>>>
>>>> I agree that is not for production, but if want to do a simple blog
>>>> post (and that's what I'm doing) I think it's a well suited solution.
>>>> Is it possible to fix this?
>>>> Thanks
>>>> Andrea
>>>>
>>>> Il giorno gio 5 lug 2018 alle ore 02:29 Jeff Zhang <zj...@gmail.com>
>>>> ha scritto:
>>>>
>>>>>
>>>>> This might be due to the embedded spark version.  I would recommend
>>>>> you to specify SPARK_HOME instead of using the embedded spark, the embedded
>>>>> spark is not for production.
>>>>>
>>>>>
>>>>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:
>>>>>
>>>>>> I have the same issue...
>>>>>> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
>>>>>> adamantios.corais@gmail.com> ha scritto:
>>>>>>
>>>>>>> Hi Jeff, I am using the embedded Spark.
>>>>>>>
>>>>>>> FYI, this is how I start the dockerized (yet old) version of
>>>>>>> Zeppelin that works as expected.
>>>>>>>
>>>>>>> #!/bin/bash
>>>>>>>> docker run --rm \
>>>>>>>> --name zepelin \
>>>>>>>> -p 127.0.0.1:9090:8080 \
>>>>>>>> -p 127.0.0.1:5050:4040 \
>>>>>>>> -v $(pwd):/zeppelin/notebook \
>>>>>>>> apache/zeppelin:0.7.3
>>>>>>>
>>>>>>>
>>>>>>> And this is how I start the binarized (yet stable) version of
>>>>>>> Zeppelin that is supposed to work (but it doesn't).
>>>>>>>
>>>>>>> #!/bin/bash
>>>>>>>> wget
>>>>>>>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>>>>>>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>>>>>>> cd   ./zeppelin-0.8.0-bin-all/
>>>>>>>> bash ./bin/zeppelin.sh
>>>>>>>
>>>>>>>
>>>>>>> Thanks.
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>> *// **Adamantios Corais*
>>>>>>>
>>>>>>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>>>>>>
>>>>>>>>
>>>>>>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>>>>>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二
>>>>>>>> 上午12:32写道:
>>>>>>>>
>>>>>>>>> Hi,
>>>>>>>>>
>>>>>>>>> I have downloaded the latest binary package of Zeppelin (ver.
>>>>>>>>> 0.8.0), extracted, and started as follows: `./bin/zeppelin.sh`
>>>>>>>>>
>>>>>>>>> Next, I tried a very simple example:
>>>>>>>>>
>>>>>>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>>>>>>
>>>>>>>>> Which unfortunately returns the following error. Note that the
>>>>>>>>> same example works fine with the official docker version of Zeppelin (ver.
>>>>>>>>> 0.7.3). Any ideas?
>>>>>>>>>
>>>>>>>>> org.apache.spark.SparkException: Job aborted due to stage failure:
>>>>>>>>>> Task 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in
>>>>>>>>>> stage 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>> at
>>>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>>> at
>>>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>>>> at
>>>>>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>>>> at
>>>>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>>>> at
>>>>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>>>>> at
>>>>>>>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>>>>>>>> at
>>>>>>>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>>>>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>>>>>>> Driver stacktrace:
>>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>>>>>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.org
>>>>>>>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>>>>>>   ... 52 elided
>>>>>>>>>> Caused by: java.lang.NoSuchMethodError:
>>>>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>>>>>   at
>>>>>>>>>> scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>>>>   at
>>>>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>>>>>   ... 3 more
>>>>>>>>>
>>>>>>>>>
>>>>>>>>>
>>>>>>>
>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Adamantios Corais <ad...@gmail.com>.
Hi Andrea,

The following workaround works for me (but maybe there are other
alternatives too):

- downloaded spark spark-2.3.1-bin-hadoop2.7
- renamed the zeppelin-env.sh.template to zeppelin-env.sh
- appended the following line in the above file: export
SPARK_HOME=../../spark-2.3.1-bin-hadoop2.7/

Hope this helps,




*// **Adamantios Corais*

On Thu, Jul 5, 2018 at 1:51 PM, Andrea Santurbano <sa...@gmail.com> wrote:

> Thanks Jeff,
> is there a workaround in order to make it work now?
>
> Il giorno gio 5 lug 2018 alle ore 12:42 Jeff Zhang <zj...@gmail.com> ha
> scritto:
>
>>
>> This is due to hadoop version used in embedded spark is 2.3 which is too
>> lower. I created https://issues.apache.org/jira/browse/ZEPPELIN-3586 for
>> this issue. Suppose it will be fixed in o.8.1
>>
>>
>>
>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 下午3:35写道:
>>
>>> I agree that is not for production, but if want to do a simple blog post
>>> (and that's what I'm doing) I think it's a well suited solution.
>>> Is it possible to fix this?
>>> Thanks
>>> Andrea
>>>
>>> Il giorno gio 5 lug 2018 alle ore 02:29 Jeff Zhang <zj...@gmail.com>
>>> ha scritto:
>>>
>>>>
>>>> This might be due to the embedded spark version.  I would recommend you
>>>> to specify SPARK_HOME instead of using the embedded spark, the embedded
>>>> spark is not for production.
>>>>
>>>>
>>>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:
>>>>
>>>>> I have the same issue...
>>>>> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
>>>>> adamantios.corais@gmail.com> ha scritto:
>>>>>
>>>>>> Hi Jeff, I am using the embedded Spark.
>>>>>>
>>>>>> FYI, this is how I start the dockerized (yet old) version of Zeppelin
>>>>>> that works as expected.
>>>>>>
>>>>>> #!/bin/bash
>>>>>>> docker run --rm \
>>>>>>> --name zepelin \
>>>>>>> -p 127.0.0.1:9090:8080 \
>>>>>>> -p 127.0.0.1:5050:4040 \
>>>>>>> -v $(pwd):/zeppelin/notebook \
>>>>>>> apache/zeppelin:0.7.3
>>>>>>
>>>>>>
>>>>>> And this is how I start the binarized (yet stable) version of
>>>>>> Zeppelin that is supposed to work (but it doesn't).
>>>>>>
>>>>>> #!/bin/bash
>>>>>>> wget http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/
>>>>>>> zeppelin-0.8.0-bin-all.tgz
>>>>>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>>>>>> cd   ./zeppelin-0.8.0-bin-all/
>>>>>>> bash ./bin/zeppelin.sh
>>>>>>
>>>>>>
>>>>>> Thanks.
>>>>>>
>>>>>>
>>>>>>
>>>>>>
>>>>>> *// **Adamantios Corais*
>>>>>>
>>>>>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>>>>>
>>>>>>>
>>>>>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>>>>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二
>>>>>>> 上午12:32写道:
>>>>>>>
>>>>>>>> Hi,
>>>>>>>>
>>>>>>>> I have downloaded the latest binary package of Zeppelin (ver.
>>>>>>>> 0.8.0), extracted, and started as follows: `./bin/zeppelin.sh`
>>>>>>>>
>>>>>>>> Next, I tried a very simple example:
>>>>>>>>
>>>>>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>>>>>
>>>>>>>> Which unfortunately returns the following error. Note that the same
>>>>>>>> example works fine with the official docker version of Zeppelin (ver.
>>>>>>>> 0.7.3). Any ideas?
>>>>>>>>
>>>>>>>> org.apache.spark.SparkException: Job aborted due to stage failure:
>>>>>>>>> Task 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in
>>>>>>>>> stage 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.
>>>>>>>>> getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$
>>>>>>>>> Statistics$StatisticsData;
>>>>>>>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$
>>>>>>>>> anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$
>>>>>>>>> anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>> at scala.collection.TraversableLike$$anonfun$map$
>>>>>>>>> 1.apply(TraversableLike.scala:234)
>>>>>>>>> at scala.collection.TraversableLike$$anonfun$map$
>>>>>>>>> 1.apply(TraversableLike.scala:234)
>>>>>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>>> at scala.collection.IterableLike$class.foreach(IterableLike.
>>>>>>>>> scala:72)
>>>>>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>>> at scala.collection.TraversableLike$class.map(
>>>>>>>>> TraversableLike.scala:234)
>>>>>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.
>>>>>>>>> apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>>> at org.apache.spark.deploy.SparkHadoopUtil.
>>>>>>>>> getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>>> at org.apache.spark.sql.execution.datasources.
>>>>>>>>> FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>>> at org.apache.spark.sql.execution.datasources.FileScanRDD.compute(
>>>>>>>>> FileScanRDD.scala:71)
>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>> at org.apache.spark.rdd.MapPartitionsRDD.compute(
>>>>>>>>> MapPartitionsRDD.scala:38)
>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>> at org.apache.spark.rdd.MapPartitionsRDD.compute(
>>>>>>>>> MapPartitionsRDD.scala:38)
>>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.
>>>>>>>>> scala:87)
>>>>>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>>> at org.apache.spark.executor.Executor$TaskRunner.run(
>>>>>>>>> Executor.scala:335)
>>>>>>>>> at java.util.concurrent.ThreadPoolExecutor.runWorker(
>>>>>>>>> ThreadPoolExecutor.java:1149)
>>>>>>>>> at java.util.concurrent.ThreadPoolExecutor$Worker.run(
>>>>>>>>> ThreadPoolExecutor.java:624)
>>>>>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>>>>>> Driver stacktrace:
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$
>>>>>>>>> scheduler$DAGScheduler$$failJobAndIndependentStages(
>>>>>>>>> DAGScheduler.scala:1499)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$
>>>>>>>>> abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$
>>>>>>>>> abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>>>>>   at scala.collection.mutable.ResizableArray$class.foreach(
>>>>>>>>> ResizableArray.scala:59)
>>>>>>>>>   at scala.collection.mutable.ArrayBuffer.foreach(
>>>>>>>>> ArrayBuffer.scala:48)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.abortStage(
>>>>>>>>> DAGScheduler.scala:1486)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$
>>>>>>>>> handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$
>>>>>>>>> handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(
>>>>>>>>> DAGScheduler.scala:814)
>>>>>>>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.
>>>>>>>>> doOnReceive(DAGScheduler.scala:1714)
>>>>>>>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.
>>>>>>>>> onReceive(DAGScheduler.scala:1669)
>>>>>>>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.
>>>>>>>>> onReceive(DAGScheduler.scala:1658)
>>>>>>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(
>>>>>>>>> EventLoop.scala:48)
>>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.runJob(
>>>>>>>>> DAGScheduler.scala:630)
>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>>>>>   at org.apache.spark.sql.execution.SparkPlan.
>>>>>>>>> executeTake(SparkPlan.scala:336)
>>>>>>>>>   at org.apache.spark.sql.execution.CollectLimitExec.
>>>>>>>>> executeCollect(limit.scala:38)
>>>>>>>>>   at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$
>>>>>>>>> collectFromPlan(Dataset.scala:2853)
>>>>>>>>>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.
>>>>>>>>> scala:2153)
>>>>>>>>>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.
>>>>>>>>> scala:2153)
>>>>>>>>>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.
>>>>>>>>> scala:2837)
>>>>>>>>>   at org.apache.spark.sql.execution.SQLExecution$.
>>>>>>>>> withNewExecutionId(SQLExecution.scala:65)
>>>>>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>>>>>   ... 52 elided
>>>>>>>>> Caused by: java.lang.NoSuchMethodError: org.apache.hadoop.fs.
>>>>>>>>> FileSystem$Statistics.getThreadStatistics()Lorg/
>>>>>>>>> apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$
>>>>>>>>> anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$
>>>>>>>>> anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>>   at scala.collection.TraversableLike$$anonfun$map$
>>>>>>>>> 1.apply(TraversableLike.scala:234)
>>>>>>>>>   at scala.collection.TraversableLike$$anonfun$map$
>>>>>>>>> 1.apply(TraversableLike.scala:234)
>>>>>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>>>   at scala.collection.AbstractIterator.foreach(
>>>>>>>>> Iterator.scala:1336)
>>>>>>>>>   at scala.collection.IterableLike$class.foreach(IterableLike.
>>>>>>>>> scala:72)
>>>>>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>>>   at scala.collection.TraversableLike$class.map(
>>>>>>>>> TraversableLike.scala:234)
>>>>>>>>>   at scala.collection.AbstractTraversable.map(
>>>>>>>>> Traversable.scala:104)
>>>>>>>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.
>>>>>>>>> apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>>>   at org.apache.spark.deploy.SparkHadoopUtil.
>>>>>>>>> getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>>>   at org.apache.spark.sql.execution.datasources.
>>>>>>>>> FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>>>   at org.apache.spark.sql.execution.datasources.
>>>>>>>>> FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.
>>>>>>>>> scala:323)
>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>   at org.apache.spark.rdd.MapPartitionsRDD.compute(
>>>>>>>>> MapPartitionsRDD.scala:38)
>>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.
>>>>>>>>> scala:323)
>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>   at org.apache.spark.rdd.MapPartitionsRDD.compute(
>>>>>>>>> MapPartitionsRDD.scala:38)
>>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.
>>>>>>>>> scala:323)
>>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.
>>>>>>>>> scala:87)
>>>>>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>>>   at org.apache.spark.executor.Executor$TaskRunner.run(
>>>>>>>>> Executor.scala:335)
>>>>>>>>>   ... 3 more
>>>>>>>>
>>>>>>>>
>>>>>>>>
>>>>>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Andrea Santurbano <sa...@gmail.com>.
Thanks Jeff,
is there a workaround in order to make it work now?

Il giorno gio 5 lug 2018 alle ore 12:42 Jeff Zhang <zj...@gmail.com> ha
scritto:

>
> This is due to hadoop version used in embedded spark is 2.3 which is too
> lower. I created https://issues.apache.org/jira/browse/ZEPPELIN-3586 for
> this issue. Suppose it will be fixed in o.8.1
>
>
>
> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 下午3:35写道:
>
>> I agree that is not for production, but if want to do a simple blog post
>> (and that's what I'm doing) I think it's a well suited solution.
>> Is it possible to fix this?
>> Thanks
>> Andrea
>>
>> Il giorno gio 5 lug 2018 alle ore 02:29 Jeff Zhang <zj...@gmail.com> ha
>> scritto:
>>
>>>
>>> This might be due to the embedded spark version.  I would recommend you
>>> to specify SPARK_HOME instead of using the embedded spark, the embedded
>>> spark is not for production.
>>>
>>>
>>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:
>>>
>>>> I have the same issue...
>>>> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
>>>> adamantios.corais@gmail.com> ha scritto:
>>>>
>>>>> Hi Jeff, I am using the embedded Spark.
>>>>>
>>>>> FYI, this is how I start the dockerized (yet old) version of Zeppelin
>>>>> that works as expected.
>>>>>
>>>>> #!/bin/bash
>>>>>> docker run --rm \
>>>>>> --name zepelin \
>>>>>> -p 127.0.0.1:9090:8080 \
>>>>>> -p 127.0.0.1:5050:4040 \
>>>>>> -v $(pwd):/zeppelin/notebook \
>>>>>> apache/zeppelin:0.7.3
>>>>>
>>>>>
>>>>> And this is how I start the binarized (yet stable) version of Zeppelin that
>>>>> is supposed to work (but it doesn't).
>>>>>
>>>>> #!/bin/bash
>>>>>> wget
>>>>>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>>>>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>>>>> cd   ./zeppelin-0.8.0-bin-all/
>>>>>> bash ./bin/zeppelin.sh
>>>>>
>>>>>
>>>>> Thanks.
>>>>>
>>>>>
>>>>>
>>>>>
>>>>> *// **Adamantios Corais*
>>>>>
>>>>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>>>>
>>>>>>
>>>>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>>>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>>>>
>>>>>>
>>>>>>
>>>>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二
>>>>>> 上午12:32写道:
>>>>>>
>>>>>>> Hi,
>>>>>>>
>>>>>>> I have downloaded the latest binary package of Zeppelin (ver.
>>>>>>> 0.8.0), extracted, and started as follows: `./bin/zeppelin.sh`
>>>>>>>
>>>>>>> Next, I tried a very simple example:
>>>>>>>
>>>>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>>>>
>>>>>>> Which unfortunately returns the following error. Note that the same
>>>>>>> example works fine with the official docker version of Zeppelin (ver.
>>>>>>> 0.7.3). Any ideas?
>>>>>>>
>>>>>>> org.apache.spark.SparkException: Job aborted due to stage failure:
>>>>>>>> Task 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in
>>>>>>>> stage 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>> at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>> at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>> at
>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>> at
>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>> at
>>>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>> at
>>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>> at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>> at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>> at
>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>> at
>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>> at
>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>> at
>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>> at
>>>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>> at
>>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>>> at
>>>>>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>>>>>> at
>>>>>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>>>>> Driver stacktrace:
>>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>>>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>>>>   at
>>>>>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>>>>>   at
>>>>>>>> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>>>>>   at org.apache.spark.sql.Dataset.org
>>>>>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>>>>   ... 52 elided
>>>>>>>> Caused by: java.lang.NoSuchMethodError:
>>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>>   at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>   at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>>   at
>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>   at
>>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>>   at
>>>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>>   at
>>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>>   at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>>   at
>>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>>   at
>>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>   at
>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>   at
>>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>>   at
>>>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>>   at
>>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>>>   ... 3 more
>>>>>>>
>>>>>>>
>>>>>>>
>>>>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Jeff Zhang <zj...@gmail.com>.
This is due to hadoop version used in embedded spark is 2.3 which is too
lower. I created https://issues.apache.org/jira/browse/ZEPPELIN-3586 for
this issue. Suppose it will be fixed in o.8.1



Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 下午3:35写道:

> I agree that is not for production, but if want to do a simple blog post
> (and that's what I'm doing) I think it's a well suited solution.
> Is it possible to fix this?
> Thanks
> Andrea
>
> Il giorno gio 5 lug 2018 alle ore 02:29 Jeff Zhang <zj...@gmail.com> ha
> scritto:
>
>>
>> This might be due to the embedded spark version.  I would recommend you
>> to specify SPARK_HOME instead of using the embedded spark, the embedded
>> spark is not for production.
>>
>>
>> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:
>>
>>> I have the same issue...
>>> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
>>> adamantios.corais@gmail.com> ha scritto:
>>>
>>>> Hi Jeff, I am using the embedded Spark.
>>>>
>>>> FYI, this is how I start the dockerized (yet old) version of Zeppelin
>>>> that works as expected.
>>>>
>>>> #!/bin/bash
>>>>> docker run --rm \
>>>>> --name zepelin \
>>>>> -p 127.0.0.1:9090:8080 \
>>>>> -p 127.0.0.1:5050:4040 \
>>>>> -v $(pwd):/zeppelin/notebook \
>>>>> apache/zeppelin:0.7.3
>>>>
>>>>
>>>> And this is how I start the binarized (yet stable) version of Zeppelin that
>>>> is supposed to work (but it doesn't).
>>>>
>>>> #!/bin/bash
>>>>> wget
>>>>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>>>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>>>> cd   ./zeppelin-0.8.0-bin-all/
>>>>> bash ./bin/zeppelin.sh
>>>>
>>>>
>>>> Thanks.
>>>>
>>>>
>>>>
>>>>
>>>> *// **Adamantios Corais*
>>>>
>>>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>>>
>>>>>
>>>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>>>
>>>>>
>>>>>
>>>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:
>>>>>
>>>>>> Hi,
>>>>>>
>>>>>> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
>>>>>> extracted, and started as follows: `./bin/zeppelin.sh`
>>>>>>
>>>>>> Next, I tried a very simple example:
>>>>>>
>>>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>>>
>>>>>> Which unfortunately returns the following error. Note that the same
>>>>>> example works fine with the official docker version of Zeppelin (ver.
>>>>>> 0.7.3). Any ideas?
>>>>>>
>>>>>> org.apache.spark.SparkException: Job aborted due to stage failure:
>>>>>>> Task 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in
>>>>>>> stage 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>> at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>> at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>> at
>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>> at
>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>> at
>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>> at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>> at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>> at
>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>> at
>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>> at
>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>> at
>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>> at
>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>> at
>>>>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>>>>> at
>>>>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>>>> Driver stacktrace:
>>>>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>>>   at
>>>>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>>>>   at
>>>>>>> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>>>>   at org.apache.spark.sql.Dataset.org
>>>>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>>>   ... 52 elided
>>>>>>> Caused by: java.lang.NoSuchMethodError:
>>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>>   at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>   at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>>   at
>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>   at
>>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>>   at
>>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>>   at
>>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>>   at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>>   at
>>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>>   at
>>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>   at
>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>   at
>>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>>   at
>>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>>   at
>>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>>   ... 3 more
>>>>>>
>>>>>>
>>>>>>
>>>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Andrea Santurbano <sa...@gmail.com>.
I agree that is not for production, but if want to do a simple blog post
(and that's what I'm doing) I think it's a well suited solution.
Is it possible to fix this?
Thanks
Andrea

Il giorno gio 5 lug 2018 alle ore 02:29 Jeff Zhang <zj...@gmail.com> ha
scritto:

>
> This might be due to the embedded spark version.  I would recommend you to
> specify SPARK_HOME instead of using the embedded spark, the embedded spark
> is not for production.
>
>
> Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:
>
>> I have the same issue...
>> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
>> adamantios.corais@gmail.com> ha scritto:
>>
>>> Hi Jeff, I am using the embedded Spark.
>>>
>>> FYI, this is how I start the dockerized (yet old) version of Zeppelin
>>> that works as expected.
>>>
>>> #!/bin/bash
>>>> docker run --rm \
>>>> --name zepelin \
>>>> -p 127.0.0.1:9090:8080 \
>>>> -p 127.0.0.1:5050:4040 \
>>>> -v $(pwd):/zeppelin/notebook \
>>>> apache/zeppelin:0.7.3
>>>
>>>
>>> And this is how I start the binarized (yet stable) version of Zeppelin that
>>> is supposed to work (but it doesn't).
>>>
>>> #!/bin/bash
>>>> wget
>>>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>>> cd   ./zeppelin-0.8.0-bin-all/
>>>> bash ./bin/zeppelin.sh
>>>
>>>
>>> Thanks.
>>>
>>>
>>>
>>>
>>> *// **Adamantios Corais*
>>>
>>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>>
>>>>
>>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>>
>>>>
>>>>
>>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:
>>>>
>>>>> Hi,
>>>>>
>>>>> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
>>>>> extracted, and started as follows: `./bin/zeppelin.sh`
>>>>>
>>>>> Next, I tried a very simple example:
>>>>>
>>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>>
>>>>> Which unfortunately returns the following error. Note that the same
>>>>> example works fine with the official docker version of Zeppelin (ver.
>>>>> 0.7.3). Any ideas?
>>>>>
>>>>> org.apache.spark.SparkException: Job aborted due to stage failure:
>>>>>> Task 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in
>>>>>> stage 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>> at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>> at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>> at
>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>> at
>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>> at
>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>> at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>> at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>> at
>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>> at
>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>> at
>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>> at
>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>> at
>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>> at
>>>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>>>> at
>>>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>>> Driver stacktrace:
>>>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>>   at
>>>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>>>   at
>>>>>> scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>>   at
>>>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>>>   at
>>>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>>>   at org.apache.spark.sql.Dataset.org
>>>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>>>   at
>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>   at
>>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>>   at
>>>>>> org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>>>   at
>>>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>>   ... 52 elided
>>>>>> Caused by: java.lang.NoSuchMethodError:
>>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>>   at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>   at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>>   at
>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>   at
>>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>>   at
>>>>>> scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>>   at
>>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>>   at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>>   at
>>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>>   at
>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>>   at
>>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>   at
>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>   at
>>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>>   at
>>>>>> org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>>   at
>>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>>   ... 3 more
>>>>>
>>>>>
>>>>>
>>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Jeff Zhang <zj...@gmail.com>.
This might be due to the embedded spark version.  I would recommend you to
specify SPARK_HOME instead of using the embedded spark, the embedded spark
is not for production.


Andrea Santurbano <sa...@gmail.com>于2018年7月5日周四 上午12:07写道:

> I have the same issue...
> Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
> adamantios.corais@gmail.com> ha scritto:
>
>> Hi Jeff, I am using the embedded Spark.
>>
>> FYI, this is how I start the dockerized (yet old) version of Zeppelin
>> that works as expected.
>>
>> #!/bin/bash
>>> docker run --rm \
>>> --name zepelin \
>>> -p 127.0.0.1:9090:8080 \
>>> -p 127.0.0.1:5050:4040 \
>>> -v $(pwd):/zeppelin/notebook \
>>> apache/zeppelin:0.7.3
>>
>>
>> And this is how I start the binarized (yet stable) version of Zeppelin that
>> is supposed to work (but it doesn't).
>>
>> #!/bin/bash
>>> wget
>>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>>> cd   ./zeppelin-0.8.0-bin-all/
>>> bash ./bin/zeppelin.sh
>>
>>
>> Thanks.
>>
>>
>>
>>
>> *// **Adamantios Corais*
>>
>> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>>
>>>
>>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>>> SPARK_HOME, which spark version and hadoop version do you use ?
>>>
>>>
>>>
>>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:
>>>
>>>> Hi,
>>>>
>>>> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
>>>> extracted, and started as follows: `./bin/zeppelin.sh`
>>>>
>>>> Next, I tried a very simple example:
>>>>
>>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>>
>>>> Which unfortunately returns the following error. Note that the same
>>>> example works fine with the official docker version of Zeppelin (ver.
>>>> 0.7.3). Any ideas?
>>>>
>>>> org.apache.spark.SparkException: Job aborted due to stage failure: Task
>>>>> 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in stage
>>>>> 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>> at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>> at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>> at
>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>> at
>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>> at
>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>> at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>> at
>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>> at
>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>> at
>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>> at
>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>> at
>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>> at
>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>> at
>>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>>> at
>>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>>> at java.lang.Thread.run(Thread.java:748)
>>>>> Driver stacktrace:
>>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>>   at
>>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>>   at scala.Option.foreach(Option.scala:257)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>>   at
>>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>>   at
>>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>>   at
>>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>>   at org.apache.spark.sql.Dataset.org
>>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>>   at
>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>   at
>>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>>   at
>>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>>   ... 52 elided
>>>>> Caused by: java.lang.NoSuchMethodError:
>>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>>   at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>   at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>>   at
>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>   at
>>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>>   at
>>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>>   at
>>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>>   at
>>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>>   at
>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>>   at
>>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>   at
>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>   at
>>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>>   at
>>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>>   ... 3 more
>>>>
>>>>
>>>>
>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Andrea Santurbano <sa...@gmail.com>.
I have the same issue...
Il giorno mar 3 lug 2018 alle 23:18 Adamantios Corais <
adamantios.corais@gmail.com> ha scritto:

> Hi Jeff, I am using the embedded Spark.
>
> FYI, this is how I start the dockerized (yet old) version of Zeppelin that
> works as expected.
>
> #!/bin/bash
>> docker run --rm \
>> --name zepelin \
>> -p 127.0.0.1:9090:8080 \
>> -p 127.0.0.1:5050:4040 \
>> -v $(pwd):/zeppelin/notebook \
>> apache/zeppelin:0.7.3
>
>
> And this is how I start the binarized (yet stable) version of Zeppelin that
> is supposed to work (but it doesn't).
>
> #!/bin/bash
>> wget
>> http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/zeppelin-0.8.0-bin-all.tgz
>> tar  zxvf zeppelin-0.8.0-bin-all.tgz
>> cd   ./zeppelin-0.8.0-bin-all/
>> bash ./bin/zeppelin.sh
>
>
> Thanks.
>
>
>
>
> *// **Adamantios Corais*
>
> On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:
>
>>
>> Do you use the embeded spark or specify SPARK_HOME ? If you set
>> SPARK_HOME, which spark version and hadoop version do you use ?
>>
>>
>>
>> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:
>>
>>> Hi,
>>>
>>> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
>>> extracted, and started as follows: `./bin/zeppelin.sh`
>>>
>>> Next, I tried a very simple example:
>>>
>>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>>
>>> Which unfortunately returns the following error. Note that the same
>>> example works fine with the official docker version of Zeppelin (ver.
>>> 0.7.3). Any ideas?
>>>
>>> org.apache.spark.SparkException: Job aborted due to stage failure: Task
>>>> 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in stage
>>>> 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>> at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>> at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>> at
>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>> at
>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>> at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>> at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>> at
>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>> at
>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>> at
>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>> at
>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>> at
>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>> at
>>>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>>>> at
>>>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>>>> at java.lang.Thread.run(Thread.java:748)
>>>> Driver stacktrace:
>>>>   at org.apache.spark.scheduler.DAGScheduler.org
>>>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>>>   at
>>>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>>>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>>>   at scala.Option.foreach(Option.scala:257)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>>>   at
>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>>>   at
>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>>>   at
>>>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>>   at
>>>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>>   at
>>>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>>>   at
>>>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>>>   at org.apache.spark.sql.Dataset.org
>>>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>>>   at
>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>   at
>>>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>>>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>>   at
>>>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>>   ... 52 elided
>>>> Caused by: java.lang.NoSuchMethodError:
>>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>>>   at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>   at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>>   at
>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>   at
>>>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>>   at
>>>> scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>>   at
>>>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>>>   at
>>>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>>>   at
>>>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>>>   at
>>>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>   at
>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>   at
>>>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>>   at
>>>> org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>>>   ... 3 more
>>>
>>>
>>>
>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Adamantios Corais <ad...@gmail.com>.
Hi Jeff, I am using the embedded Spark.

FYI, this is how I start the dockerized (yet old) version of Zeppelin that
works as expected.

#!/bin/bash
> docker run --rm \
> --name zepelin \
> -p 127.0.0.1:9090:8080 \
> -p 127.0.0.1:5050:4040 \
> -v $(pwd):/zeppelin/notebook \
> apache/zeppelin:0.7.3


And this is how I start the binarized (yet stable) version of Zeppelin that
is supposed to work (but it doesn't).

#!/bin/bash
> wget http://www-eu.apache.org/dist/zeppelin/zeppelin-0.8.0/
> zeppelin-0.8.0-bin-all.tgz
> tar  zxvf zeppelin-0.8.0-bin-all.tgz
> cd   ./zeppelin-0.8.0-bin-all/
> bash ./bin/zeppelin.sh


Thanks.




*// **Adamantios Corais*

On Tue, Jul 3, 2018 at 2:24 AM, Jeff Zhang <zj...@gmail.com> wrote:

>
> Do you use the embeded spark or specify SPARK_HOME ? If you set
> SPARK_HOME, which spark version and hadoop version do you use ?
>
>
>
> Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:
>
>> Hi,
>>
>> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
>> extracted, and started as follows: `./bin/zeppelin.sh`
>>
>> Next, I tried a very simple example:
>>
>> `spark.read.parquet("./bin/userdata1.parquet").show()`
>>
>> Which unfortunately returns the following error. Note that the same
>> example works fine with the official docker version of Zeppelin (ver.
>> 0.7.3). Any ideas?
>>
>> org.apache.spark.SparkException: Job aborted due to stage failure: Task
>>> 0 in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in stage
>>> 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatisti
>>> cs()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$
>>> apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$
>>> apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>> at scala.collection.TraversableLike$$anonfun$map$1.apply(
>>> TraversableLike.scala:234)
>>> at scala.collection.TraversableLike$$anonfun$map$1.apply(
>>> TraversableLike.scala:234)
>>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>> at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>> at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$
>>> mcJ$sp(SparkHadoopUtil.scala:149)
>>> at org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThre
>>> adCallback(SparkHadoopUtil.scala:150)
>>> at org.apache.spark.sql.execution.datasources.FileScanRDD$$
>>> anon$1.<init>(FileScanRDD.scala:78)
>>> at org.apache.spark.sql.execution.datasources.FileScanRDD.
>>> compute(FileScanRDD.scala:71)
>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsR
>>> DD.scala:38)
>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsR
>>> DD.scala:38)
>>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPool
>>> Executor.java:1149)
>>> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoo
>>> lExecutor.java:624)
>>> at java.lang.Thread.run(Thread.java:748)
>>> Driver stacktrace:
>>>   at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$sch
>>> eduler$DAGScheduler$$failJobAndIndependentStages(DAGSchedule
>>> r.scala:1499)
>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$
>>> 1.apply(DAGScheduler.scala:1487)
>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$
>>> 1.apply(DAGScheduler.scala:1486)
>>>   at scala.collection.mutable.ResizableArray$class.foreach(Resiza
>>> bleArray.scala:59)
>>>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>>   at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGSchedu
>>> ler.scala:1486)
>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskS
>>> etFailed$1.apply(DAGScheduler.scala:814)
>>>   at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskS
>>> etFailed$1.apply(DAGScheduler.scala:814)
>>>   at scala.Option.foreach(Option.scala:257)
>>>   at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(
>>> DAGScheduler.scala:814)
>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOn
>>> Receive(DAGScheduler.scala:1714)
>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onRe
>>> ceive(DAGScheduler.scala:1669)
>>>   at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onRe
>>> ceive(DAGScheduler.scala:1658)
>>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>>   at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.
>>> scala:630)
>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>>   at org.apache.spark.sql.execution.SparkPlan.executeTake(
>>> SparkPlan.scala:336)
>>>   at org.apache.spark.sql.execution.CollectLimitExec.executeColle
>>> ct(limit.scala:38)
>>>   at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$c
>>> ollectFromPlan(Dataset.scala:2853)
>>>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.s
>>> cala:2153)
>>>   at org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.s
>>> cala:2153)
>>>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>>   at org.apache.spark.sql.execution.SQLExecution$.withNewExecutio
>>> nId(SQLExecution.scala:65)
>>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>>   ... 52 elided
>>> Caused by: java.lang.NoSuchMethodError: org.apache.hadoop.fs.FileSyste
>>> m$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/
>>> FileSystem$Statistics$StatisticsData;
>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$
>>> apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$
>>> apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>>   at scala.collection.TraversableLike$$anonfun$map$1.apply(
>>> TraversableLike.scala:234)
>>>   at scala.collection.TraversableLike$$anonfun$map$1.apply(
>>> TraversableLike.scala:234)
>>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>>   at scala.collection.TraversableLike$class.map(TraversableLike.
>>> scala:234)
>>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>>   at org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$
>>> mcJ$sp(SparkHadoopUtil.scala:149)
>>>   at org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThre
>>> adCallback(SparkHadoopUtil.scala:150)
>>>   at org.apache.spark.sql.execution.datasources.FileScanRDD$$
>>> anon$1.<init>(FileScanRDD.scala:78)
>>>   at org.apache.spark.sql.execution.datasources.FileScanRDD.
>>> compute(FileScanRDD.scala:71)
>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>   at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsR
>>> DD.scala:38)
>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>   at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsR
>>> DD.scala:38)
>>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>>   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.
>>> scala:335)
>>>   ... 3 more
>>
>>
>>

Re: org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()

Posted by Jeff Zhang <zj...@gmail.com>.
Do you use the embeded spark or specify SPARK_HOME ? If you set SPARK_HOME,
which spark version and hadoop version do you use ?



Adamantios Corais <ad...@gmail.com>于2018年7月3日周二 上午12:32写道:

> Hi,
>
> I have downloaded the latest binary package of Zeppelin (ver. 0.8.0),
> extracted, and started as follows: `./bin/zeppelin.sh`
>
> Next, I tried a very simple example:
>
> `spark.read.parquet("./bin/userdata1.parquet").show()`
>
> Which unfortunately returns the following error. Note that the same
> example works fine with the official docker version of Zeppelin (ver.
> 0.7.3). Any ideas?
>
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0
>> in stage 7.0 failed 1 times, most recent failure: Lost task 0.0 in stage
>> 7.0 (TID 7, localhost, executor driver): java.lang.NoSuchMethodError:
>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>> at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>> at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>> at
>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>> at
>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>> at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>> at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>> at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>> at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>> at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>> at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>> at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>> at
>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>> at
>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>> at
>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>> at
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>> at
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>> at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>> at org.apache.spark.scheduler.Task.run(Task.scala:108)
>> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>> at
>> java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
>> at
>> java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
>> at java.lang.Thread.run(Thread.java:748)
>> Driver stacktrace:
>>   at org.apache.spark.scheduler.DAGScheduler.org
>> $apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1499)
>>   at
>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1487)
>>   at
>> org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1486)
>>   at
>> scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>   at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>>   at
>> org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1486)
>>   at
>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>   at
>> org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>>   at scala.Option.foreach(Option.scala:257)
>>   at
>> org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>>   at
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1714)
>>   at
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1669)
>>   at
>> org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1658)
>>   at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>   at
>> org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2022)
>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2043)
>>   at org.apache.spark.SparkContext.runJob(SparkContext.scala:2062)
>>   at
>> org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:336)
>>   at
>> org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:38)
>>   at org.apache.spark.sql.Dataset.org
>> $apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2853)
>>   at
>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>   at
>> org.apache.spark.sql.Dataset$$anonfun$head$1.apply(Dataset.scala:2153)
>>   at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2837)
>>   at
>> org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>>   at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2836)
>>   at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
>>   at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
>>   at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
>>   at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
>>   ... 52 elided
>> Caused by: java.lang.NoSuchMethodError:
>> org.apache.hadoop.fs.FileSystem$Statistics.getThreadStatistics()Lorg/apache/hadoop/fs/FileSystem$Statistics$StatisticsData;
>>   at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>   at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1$$anonfun$apply$mcJ$sp$1.apply(SparkHadoopUtil.scala:149)
>>   at
>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>   at
>> scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
>>   at scala.collection.Iterator$class.foreach(Iterator.scala:893)
>>   at scala.collection.AbstractIterator.foreach(Iterator.scala:1336)
>>   at scala.collection.IterableLike$class.foreach(IterableLike.scala:72)
>>   at scala.collection.AbstractIterable.foreach(Iterable.scala:54)
>>   at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
>>   at scala.collection.AbstractTraversable.map(Traversable.scala:104)
>>   at
>> org.apache.spark.deploy.SparkHadoopUtil$$anonfun$1.apply$mcJ$sp(SparkHadoopUtil.scala:149)
>>   at
>> org.apache.spark.deploy.SparkHadoopUtil.getFSBytesReadOnThreadCallback(SparkHadoopUtil.scala:150)
>>   at
>> org.apache.spark.sql.execution.datasources.FileScanRDD$$anon$1.<init>(FileScanRDD.scala:78)
>>   at
>> org.apache.spark.sql.execution.datasources.FileScanRDD.compute(FileScanRDD.scala:71)
>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>   at
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>   at
>> org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>>   at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>>   at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>>   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>>   at org.apache.spark.scheduler.Task.run(Task.scala:108)
>>   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:335)
>>   ... 3 more
>
>
>