You are viewing a plain text version of this content. The canonical link for it is here.
Posted to user@spark.apache.org by Yadid Ayzenberg <ya...@media.mit.edu> on 2015/08/22 21:57:32 UTC

spark 1.4.1 - LZFException


Hi All,

We have a spark standalone cluster running 1.4.1 and we are setting 
spark.io.compression.codec to lzf.
I have a long running interactive application which behaves as normal, 
but after a few days I get the following exception in multiple jobs. Any 
ideas on what could be causing this ?

Yadid



Job aborted due to stage failure: Task 27 in stage 286.0 failed 4 times, most recent failure: Lost task 27.3 in stage 286.0 (TID 516817, xx.yy.zz.ww): com.esotericsoftware.kryo.KryoException: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
	at com.esotericsoftware.kryo.io.Input.fill(Input.java:142)
	at com.esotericsoftware.kryo.io.Input.require(Input.java:155)
	at com.esotericsoftware.kryo.io.Input.readInt(Input.java:337)
	at com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)
	at com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:610)
	at com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:721)
	at org.apache.spark.serializer.KryoDeserializationStream.readObject(KryoSerializer.scala:182)
	at org.apache.spark.serializer.DeserializationStream.readKey(Serializer.scala:169)
	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:200)
	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:197)
	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
	at org.apache.spark.util.collection.ExternalAppendOnlyMap.insertAll(ExternalAppendOnlyMap.scala:127)
	at org.apache.spark.Aggregator.combineValuesByKey(Aggregator.scala:60)
	at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:46)
	at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:90)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
	at org.apache.spark.scheduler.Task.run(Task.scala:70)
	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
	at java.lang.Thread.run(Thread.java:745)
Caused by: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
	at com.ning.compress.lzf.ChunkDecoder._reportCorruptHeader(ChunkDecoder.java:267)
	at com.ning.compress.lzf.impl.UnsafeChunkDecoder.decodeChunk(UnsafeChunkDecoder.java:55)
	at com.ning.compress.lzf.LZFInputStream.readyBuffer(LZFInputStream.java:363)
	at com.ning.compress.lzf.LZFInputStream.read(LZFInputStream.java:193)
	at com.esotericsoftware.kryo.io.Input.fill(Input.java:140)
	... 37 more




Re: spark 1.4.1 - LZFException

Posted by Yadid Ayzenberg <ya...@media.mit.edu>.
Also, Im seeing a new type of error:

com.esotericsoftware.kryo.KryoException: 
com.ning.compress.lzf.LZFException: EOF in 29 byte (compressed) block: 
could only read 27 bytes
         at com.esotericsoftware.kryo.io.Input.fill(Input.java:142)
         at com.esotericsoftware.kryo.io.Input.require(Input.java:155)
         at com.esotericsoftware.kryo.io.Input.readInt(Input.java:337)
         at 
com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)
         at com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:610)
         at com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:721)
         at 
org.apache.spark.serializer.KryoDeserializationStream.readObject(KryoSerializer.scala:182)
         at 
org.apache.spark.serializer.DeserializationStream.readKey(Serializer.scala:169)
         at 
org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:200)
         at 
org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:197)
         at 
org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
         at 
org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
         at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
         at 
org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
         at 
org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
         at 
org.apache.spark.util.collection.ExternalAppendOnlyMap.insertAll(ExternalAppendOnlyMap.scala:127)
         at 
org.apache.spark.Aggregator.combineValuesByKey(Aggregator.scala:60)
         at 
org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:46)
         at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:90)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
         at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
         at 
org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
         at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
         at 
org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
         at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
         at 
org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
         at org.apache.spark.scheduler.Task.run(Task.scala:70)
         at 
org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
         at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
         at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
         at java.lang.Thread.run(Thread.java:745)

On 9/3/15 2:25 PM, Yadid Ayzenberg wrote:
> Hi Akhil,
>
> No, it seems I have plenty of more disk space available on that node.
> I look at the logs and one minute before that exception I am seeing 
> the following exception.
>
> 15/09/03 12:51:39 ERROR TransportChannelHandler: Connection to 
> /x.y.z.w:44892 has been quiet for 120000 ms while there are 
> outstanding requests. Assuming connection is dead; please adjust 
> spark.network.timeout if this is wrong.
> 15/09/03 12:51:39 ERROR TransportResponseHandler: Still have 8 
> requests outstanding when connection from /18.85.28.197:44892 is closed
> 15/09/03 12:51:39 ERROR OneForOneBlockFetcher: Failed while starting 
> block fetches
> java.io.IOException: Connection from /x.y.z.w:44892 closed
>         at 
> org.apache.spark.network.client.TransportResponseHandler.channelUnregistered(TransportResponseHandler.java:104)
>         at 
> org.apache.spark.network.server.TransportChannelHandler.channelUnregistered(TransportChannelHandler.java:91)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
>         at 
> io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
>         at 
> io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
>         at 
> io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
>         at 
> io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
>         at 
> io.netty.channel.DefaultChannelPipeline.fireChannelUnregistered(DefaultChannelPipeline.java:738)
>         at 
> io.netty.channel.AbstractChannel$AbstractUnsafe$6.run(AbstractChannel.java:606)
>         at 
> io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:380)
>         at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357)
>         at 
> io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116)
>         at java.lang.Thread.run(Thread.java:745)
>
> You think that is related to the problem ?
>
> Yadid
>
> On 8/28/15 1:31 AM, Akhil Das wrote:
>> Is it filling up your disk space? Can you look a bit more in the 
>> executor logs to see whats going on
>>
>> Thanks
>> Best Regards
>>
>> On Sun, Aug 23, 2015 at 1:27 AM, Yadid Ayzenberg <yadid@media.mit.edu 
>> <ma...@media.mit.edu>> wrote:
>>
>>
>>
>>     Hi All,
>>
>>     We have a spark standalone cluster running 1.4.1 and we are
>>     setting spark.io.compression.codec to lzf.
>>     I have a long running interactive application which behaves as
>>     normal, but after a few days I get the following exception in
>>     multiple jobs. Any ideas on what could be causing this ?
>>
>>     Yadid
>>
>>
>>
>>     Job aborted due to stage failure: Task 27 in stage 286.0 failed 4 times, most recent failure: Lost task 27.3 in stage 286.0 (TID 516817, xx.yy.zz.ww): com.esotericsoftware.kryo.KryoException: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
>>     	at com.esotericsoftware.kryo.io.Input.fill(Input.java:142)
>>     	at com.esotericsoftware.kryo.io.Input.require(Input.java:155)
>>     	at com.esotericsoftware.kryo.io.Input.readInt(Input.java:337)
>>     	at com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)
>>     	at com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:610)
>>     	at com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:721)
>>     	at org.apache.spark.serializer.KryoDeserializationStream.readObject(KryoSerializer.scala:182)
>>     	at org.apache.spark.serializer.DeserializationStream.readKey(Serializer.scala:169)
>>     	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:200)
>>     	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:197)
>>     	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
>>     	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
>>     	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
>>     	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
>>     	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
>>     	at org.apache.spark.util.collection.ExternalAppendOnlyMap.insertAll(ExternalAppendOnlyMap.scala:127)
>>     	at org.apache.spark.Aggregator.combineValuesByKey(Aggregator.scala:60)
>>     	at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:46)
>>     	at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:90)
>>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
>>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
>>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>>     	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
>>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
>>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>>     	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
>>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
>>     	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
>>     	at org.apache.spark.scheduler.Task.run(Task.scala:70)
>>     	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
>>     	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>>     	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>>     	at java.lang.Thread.run(Thread.java:745)
>>     Caused by: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
>>     	at com.ning.compress.lzf.ChunkDecoder._reportCorruptHeader(ChunkDecoder.java:267)
>>     	at com.ning.compress.lzf.impl.UnsafeChunkDecoder.decodeChunk(UnsafeChunkDecoder.java:55)
>>     	at com.ning.compress.lzf.LZFInputStream.readyBuffer(LZFInputStream.java:363)
>>     	at com.ning.compress.lzf.LZFInputStream.read(LZFInputStream.java:193)
>>     	at com.esotericsoftware.kryo.io.Input.fill(Input.java:140)
>>     	... 37 more
>>
>>
>>
>>
>


Re: spark 1.4.1 - LZFException

Posted by Yadid Ayzenberg <ya...@media.mit.edu>.
Hi Akhil,

No, it seems I have plenty of more disk space available on that node.
I look at the logs and one minute before that exception I am seeing the 
following exception.

15/09/03 12:51:39 ERROR TransportChannelHandler: Connection to 
/x.y.z.w:44892 has been quiet for 120000 ms while there are outstanding 
requests. Assuming connection is dead; please adjust 
spark.network.timeout if this is wrong.
15/09/03 12:51:39 ERROR TransportResponseHandler: Still have 8 requests 
outstanding when connection from /18.85.28.197:44892 is closed
15/09/03 12:51:39 ERROR OneForOneBlockFetcher: Failed while starting 
block fetches
java.io.IOException: Connection from /x.y.z.w:44892 closed
         at 
org.apache.spark.network.client.TransportResponseHandler.channelUnregistered(TransportResponseHandler.java:104)
         at 
org.apache.spark.network.server.TransportChannelHandler.channelUnregistered(TransportChannelHandler.java:91)
         at 
io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
         at 
io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
         at 
io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
         at 
io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
         at 
io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
         at 
io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
         at 
io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
         at 
io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
         at 
io.netty.channel.ChannelInboundHandlerAdapter.channelUnregistered(ChannelInboundHandlerAdapter.java:53)
         at 
io.netty.channel.AbstractChannelHandlerContext.invokeChannelUnregistered(AbstractChannelHandlerContext.java:183)
         at 
io.netty.channel.AbstractChannelHandlerContext.fireChannelUnregistered(AbstractChannelHandlerContext.java:169)
         at 
io.netty.channel.DefaultChannelPipeline.fireChannelUnregistered(DefaultChannelPipeline.java:738)
         at 
io.netty.channel.AbstractChannel$AbstractUnsafe$6.run(AbstractChannel.java:606)
         at 
io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:380)
         at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:357)
         at 
io.netty.util.concurrent.SingleThreadEventExecutor$2.run(SingleThreadEventExecutor.java:116)
         at java.lang.Thread.run(Thread.java:745)

You think that is related to the problem ?

Yadid

On 8/28/15 1:31 AM, Akhil Das wrote:
> Is it filling up your disk space? Can you look a bit more in the 
> executor logs to see whats going on
>
> Thanks
> Best Regards
>
> On Sun, Aug 23, 2015 at 1:27 AM, Yadid Ayzenberg <yadid@media.mit.edu 
> <ma...@media.mit.edu>> wrote:
>
>
>
>     Hi All,
>
>     We have a spark standalone cluster running 1.4.1 and we are
>     setting spark.io.compression.codec to lzf.
>     I have a long running interactive application which behaves as
>     normal, but after a few days I get the following exception in
>     multiple jobs. Any ideas on what could be causing this ?
>
>     Yadid
>
>
>
>     Job aborted due to stage failure: Task 27 in stage 286.0 failed 4 times, most recent failure: Lost task 27.3 in stage 286.0 (TID 516817, xx.yy.zz.ww): com.esotericsoftware.kryo.KryoException: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
>     	at com.esotericsoftware.kryo.io.Input.fill(Input.java:142)
>     	at com.esotericsoftware.kryo.io.Input.require(Input.java:155)
>     	at com.esotericsoftware.kryo.io.Input.readInt(Input.java:337)
>     	at com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)
>     	at com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:610)
>     	at com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:721)
>     	at org.apache.spark.serializer.KryoDeserializationStream.readObject(KryoSerializer.scala:182)
>     	at org.apache.spark.serializer.DeserializationStream.readKey(Serializer.scala:169)
>     	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:200)
>     	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:197)
>     	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
>     	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
>     	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
>     	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
>     	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
>     	at org.apache.spark.util.collection.ExternalAppendOnlyMap.insertAll(ExternalAppendOnlyMap.scala:127)
>     	at org.apache.spark.Aggregator.combineValuesByKey(Aggregator.scala:60)
>     	at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:46)
>     	at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:90)
>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>     	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
>     	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
>     	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
>     	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
>     	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
>     	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
>     	at org.apache.spark.scheduler.Task.run(Task.scala:70)
>     	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
>     	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     	at java.lang.Thread.run(Thread.java:745)
>     Caused by: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
>     	at com.ning.compress.lzf.ChunkDecoder._reportCorruptHeader(ChunkDecoder.java:267)
>     	at com.ning.compress.lzf.impl.UnsafeChunkDecoder.decodeChunk(UnsafeChunkDecoder.java:55)
>     	at com.ning.compress.lzf.LZFInputStream.readyBuffer(LZFInputStream.java:363)
>     	at com.ning.compress.lzf.LZFInputStream.read(LZFInputStream.java:193)
>     	at com.esotericsoftware.kryo.io.Input.fill(Input.java:140)
>     	... 37 more
>
>
>
>


Re: spark 1.4.1 - LZFException

Posted by Akhil Das <ak...@sigmoidanalytics.com>.
Is it filling up your disk space? Can you look a bit more in the executor
logs to see whats going on

Thanks
Best Regards

On Sun, Aug 23, 2015 at 1:27 AM, Yadid Ayzenberg <ya...@media.mit.edu>
wrote:

>
>
> Hi All,
>
> We have a spark standalone cluster running 1.4.1 and we are setting
> spark.io.compression.codec to lzf.
> I have a long running interactive application which behaves as normal, but
> after a few days I get the following exception in multiple jobs. Any ideas
> on what could be causing this ?
>
> Yadid
>
>
>
> Job aborted due to stage failure: Task 27 in stage 286.0 failed 4 times, most recent failure: Lost task 27.3 in stage 286.0 (TID 516817, xx.yy.zz.ww): com.esotericsoftware.kryo.KryoException: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
> 	at com.esotericsoftware.kryo.io.Input.fill(Input.java:142)
> 	at com.esotericsoftware.kryo.io.Input.require(Input.java:155)
> 	at com.esotericsoftware.kryo.io.Input.readInt(Input.java:337)
> 	at com.esotericsoftware.kryo.util.DefaultClassResolver.readClass(DefaultClassResolver.java:109)
> 	at com.esotericsoftware.kryo.Kryo.readClass(Kryo.java:610)
> 	at com.esotericsoftware.kryo.Kryo.readClassAndObject(Kryo.java:721)
> 	at org.apache.spark.serializer.KryoDeserializationStream.readObject(KryoSerializer.scala:182)
> 	at org.apache.spark.serializer.DeserializationStream.readKey(Serializer.scala:169)
> 	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:200)
> 	at org.apache.spark.serializer.DeserializationStream$$anon$2.getNext(Serializer.scala:197)
> 	at org.apache.spark.util.NextIterator.hasNext(NextIterator.scala:71)
> 	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
> 	at scala.collection.Iterator$$anon$13.hasNext(Iterator.scala:371)
> 	at org.apache.spark.util.CompletionIterator.hasNext(CompletionIterator.scala:32)
> 	at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
> 	at org.apache.spark.util.collection.ExternalAppendOnlyMap.insertAll(ExternalAppendOnlyMap.scala:127)
> 	at org.apache.spark.Aggregator.combineValuesByKey(Aggregator.scala:60)
> 	at org.apache.spark.shuffle.hash.HashShuffleReader.read(HashShuffleReader.scala:46)
> 	at org.apache.spark.rdd.ShuffledRDD.compute(ShuffledRDD.scala:90)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:244)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
> 	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:35)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:277)
> 	at org.apache.spark.CacheManager.getOrCompute(CacheManager.scala:69)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:242)
> 	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:63)
> 	at org.apache.spark.scheduler.Task.run(Task.scala:70)
> 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:213)
> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
> 	at java.lang.Thread.run(Thread.java:745)
> Caused by: com.ning.compress.lzf.LZFException: Corrupt input data, block did not start with 2 byte signature ('ZV') followed by type byte, 2-byte length)
> 	at com.ning.compress.lzf.ChunkDecoder._reportCorruptHeader(ChunkDecoder.java:267)
> 	at com.ning.compress.lzf.impl.UnsafeChunkDecoder.decodeChunk(UnsafeChunkDecoder.java:55)
> 	at com.ning.compress.lzf.LZFInputStream.readyBuffer(LZFInputStream.java:363)
> 	at com.ning.compress.lzf.LZFInputStream.read(LZFInputStream.java:193)
> 	at com.esotericsoftware.kryo.io.Input.fill(Input.java:140)
> 	... 37 more
>
>
>
>