You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by "Jacky Li (JIRA)" <ji...@apache.org> on 2018/01/19 13:08:00 UTC

[jira] [Resolved] (CARBONDATA-2058) Streaming throw NullPointerException after batch loading

     [ https://issues.apache.org/jira/browse/CARBONDATA-2058?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Jacky Li resolved CARBONDATA-2058.
----------------------------------
       Resolution: Fixed
    Fix Version/s: 1.3.0

> Streaming throw NullPointerException after batch loading
> --------------------------------------------------------
>
>                 Key: CARBONDATA-2058
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-2058
>             Project: CarbonData
>          Issue Type: Bug
>            Reporter: QiangCai
>            Priority: Critical
>             Fix For: 1.3.0
>
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> Driver stacktrace:
> at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1478)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1466)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1465)
> at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
> at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1465)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:813)
> at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:813)
> at scala.Option.foreach(Option.scala:257)
> at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:813)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1693)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1648)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1637)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
> at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:639)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1949)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1962)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:1982)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileJob$1.apply$mcV$sp(CarbonAppendableStreamSink.scala:197)
> ... 20 more
> Caused by: org.apache.carbondata.streaming.CarbonStreamException: Task failed while writing rows
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$.writeDataFileTask(CarbonAppendableStreamSink.scala:295)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileJob$1$$anonfun$apply$mcV$sp$1.apply(CarbonAppendableStreamSink.scala:199)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileJob$1$$anonfun$apply$mcV$sp$1.apply(CarbonAppendableStreamSink.scala:198)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
> at org.apache.spark.scheduler.Task.run(Task.scala:99)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:322)
> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> at java.lang.Thread.run(Thread.java:748)
> Caused by: java.lang.NullPointerException
> at org.apache.carbondata.hadoop.streaming.CarbonStreamRecordWriter.appendBlockletToDataFile(CarbonStreamRecordWriter.java:287)
> at org.apache.carbondata.hadoop.streaming.CarbonStreamRecordWriter.close(CarbonStreamRecordWriter.java:300)
> at org.apache.carbondata.streaming.segment.StreamSegment.appendBatchData(StreamSegment.java:276)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileTask$1.apply$mcV$sp(CarbonAppendableStreamSink.scala:286)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileTask$1.apply(CarbonAppendableStreamSink.scala:276)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$$anonfun$writeDataFileTask$1.apply(CarbonAppendableStreamSink.scala:276)
> at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1388)
> at org.apache.spark.sql.execution.streaming.CarbonAppendableStreamSink$.writeDataFileTask(CarbonAppendableStreamSink.scala:288)
> ... 8 more



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)