You are viewing a plain text version of this content. The canonical link for it is here.
Posted to users@zeppelin.apache.org by Mark Mikolajczak - 07855 306 064 <ma...@flayranalytics.co.uk> on 2016/10/16 11:03:27 UTC

Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:


down vote
 <>favorite
 <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
 <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>	
Hi,

I have setup a spark dataframe but I have issues when trying to run query on some of the data. If I run on META_ID it will work but when I try to run on any of the customerEvent it fails.  I am running apache Zeppelin in EMR with Spark 1.6.2. I not sure why this error is happening so let me know if you have seen this before? I was thinking that its possibly the automatic data frame not creating the correct schema so should this be done manually?


Scheme
root
 |-- META_ID: string (nullable = true)
 |-- businessEventType: string (nullable = true)
 |-- customerEvent: struct (nullable = true)
 |    |-- customerContext: struct (nullable = true)
 |    |    |-- anonymousCustomerChanges: struct (nullable = true)
 |    |    |    |-- analyticsId: struct (nullable = true)
 |    |    |    |    |-- id: string (nullable = true)
 |    |    |    |    |-- system: string (nullable = true)

Error
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
    at org.apache.spark.scheduler.Task.run(Task.scala:89)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
    at scala.Option.foreach(Option.scala:236)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
    at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
    at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
    at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
    at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
    at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
    at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
    at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
    at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
    at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
    at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
    at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
    at $iwC$$iwC$$iwC.<init>(<console>:64)
    at $iwC$$iwC.<init>(<console>:66)
    at $iwC.<init>(<console>:68)
    at <init>(<console>:70)
    at .<init>(<console>:74)
    at .<clinit>(<console>)
    at .<init>(<console>:7)
    at .<clinit>(<console>)
    at $print(<console>)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
    at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
    at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
    at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
    at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
    at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
    at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
    at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
    at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
    at java.util.concurrent.FutureTask.run(FutureTask.java:262)
    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)





Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Mark Mikolajczak <ma...@flayranalytics.co.uk>.
thanks Divya,

I tried that and still get same error - array out of bounds.

Full code.

import org.apache.spark.sql.sources.EqualTo
import com.couchbase.spark.sql._
import com.couchbase.spark.sql.N1QLRelation

\val bkEvent = Map("bucket" -> "events")
val eve = sqlc.read.couchbase(schemaFilter =
EqualTo("doctype","BusinessEvent"), bkEvent)

*RESULT*
bkEvent: scala.collection.immutable.Map[String,String] = Map(bucket ->
events)

eve: org.apache.spark.sql.DataFrame = [META_ID: string, businessEventType:
string, customerEvent:
struct<customerContext:struct<anonymousCustomerChanges:struct<analyticsId:struct<id:string,system:string>,device:array<struct<browser:string,browserVersion:string,dateAdded:string,id:string,model:string,os:string,osVersion:string,source:string,type:string>>,doctype:string,id:string,referral:array<struct<accountOperator:string,mappedAccountId:string,referralDate:string,referralSource:string,

I am wondering if its because the way I am loading the schema? Should I be
doing this manually?

Thanks,
Mark

On Mon, Oct 17, 2016 at 7:10 AM, Divya Gehlot <di...@gmail.com>
wrote:

> Try with
> eve.select(col("id"),col("customerEvent.customerContext.anon
> ymousCustomerChanges.analyticsId.id
> <http://customerevent.customercontext.anonymouscustomerchanges.analyticsid.id/>
> ")).show
>
>
>
> This should work
>
>
> Thanks,
> Divya
>
> On 16 October 2016 at 19:59, Mark Mikolajczak - 07855 306 064 <
> mark@flayranalytics.co.uk> wrote:
>
>> Thanks Kim.
>>
>> Sorry It was a typo as I was doing the second part from my phone (Laptop
>> battery died).
>>
>>
>> The code I was using in zeppelin is:
>> eve.select($"id",$"customerEvent.customerContext.anonymousCu
>> stomerChanges.analyticsId.id").show
>>
>> I tried with and with out the $ sign but still get the same error.
>>
>> Any advice?
>>
>>
>> On 16 Oct 2016, at 12:41, Jun Kim <i2...@gmail.com> wrote:
>>
>> Hi, Mark Mikolajczak
>>
>> I think it should be
>>
>> eve.select("id", "customerEvent.customerContext
>> .anonymousCustomerChanges.analyticsId.id
>> <http://customerevent.customercontext.anonymouscustomerchanges.analyticsid.id/>
>> ")
>>
>> You missed 'customerEvent'!
>>
>> :-)
>>
>> 2016년 10월 16일 (일) 오후 8:32, Flayranalytics <ma...@flayranalytics.co.uk>님이
>> 작성:
>>
>> Hi,
>> Sorry forgot to include the code.
>>
>> eve.select($"id",$"customerContext.anonymousCustomerChanges.
>> analyticsId.id
>> <http://customercontext.anonymouscustomerchanges.analyticsid.id/>
>> ").show()
>>
>> If I just use Id it works. I am seeing this happen when the data is an
>> array within my data.
>>
>>
>>
>> Sent from my 📲
>>
>> On 16 Oct 2016, at 12:03, Mark Mikolajczak - 07855 306 064 <
>> mark@flayranalytics.co.uk> wrote:
>>
>>
>>
>> down votefavorite
>> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>>
>> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>>
>> Hi,
>>
>> I have setup a spark dataframe but I have issues when trying to run query
>> on some of the data. If I run on META_ID it will work but when I try to run
>> on any of the customerEvent it fails.  I am running apache Zeppelin in EMR
>> with Spark 1.6.2. I not sure why this error is happening so let me know if
>> you have seen this before? I was thinking that its possibly the automatic
>> data frame not creating the correct schema so should this be done manually?
>>
>> Scheme
>>
>> root
>>  |-- META_ID: string (nullable = true)
>>  |-- businessEventType: string (nullable = true)
>>  |-- customerEvent: struct (nullable = true)
>>  |    |-- customerContext: struct (nullable = true)
>>  |    |    |-- anonymousCustomerChanges: struct (nullable = true)
>>  |    |    |    |-- analyticsId: struct (nullable = true)
>>  |    |    |    |    |-- id: string (nullable = true)
>>  |    |    |    |    |-- system: string (nullable = true)
>>
>>
>> Error
>>
>> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>>     at org.apache.spark.scheduler.Task.run(Task.scala:89)
>>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
>>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>>     at java.lang.Thread.run(Thread.java:745)
>> Driver stacktrace:
>>     at org.apache.spark.scheduler.DAGScheduler.org <http://org.apache.spark.scheduler.dagscheduler.org/>$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
>>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>>     at scala.Option.foreach(Option.scala:236)
>>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
>>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
>>     at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>>     at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>>     at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>>     at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>>     at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>>     at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>>     at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
>>     at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
>>     at $iwC$$iwC$$iwC.<init>(<console>:64)
>>     at $iwC$$iwC.<init>(<console>:66)
>>     at $iwC.<init>(<console>:68)
>>     at <init>(<console>:70)
>>     at .<init>(<console>:74)
>>     at .<clinit>(<console>)
>>     at .<init>(<console>:7)
>>     at .<clinit>(<console>)
>>     at $print(<console>)
>>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>>     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>>     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>>     at java.lang.reflect.Method.invoke(Method.java:606)
>>     at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>>     at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>>     at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
>>     at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
>>     at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
>>     at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
>>     at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
>> at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
>>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>>     at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
>>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
>>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>>     at java.lang.Thread.run(Thread.java:745)
>> Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>>
>>
>>
>>
>>
>> --
>> Taejun Kim
>>
>> Data Mining Lab.
>> School of Electrical and Computer Engineering
>> University of Seoul
>>
>>
>>
>

Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Divya Gehlot <di...@gmail.com>.
Try with
eve.select(col("id"),col("customerEvent.customerContext.
anonymousCustomerChanges.analyticsId.id
<http://customerevent.customercontext.anonymouscustomerchanges.analyticsid.id/>
")).show



This should work


Thanks,
Divya

On 16 October 2016 at 19:59, Mark Mikolajczak - 07855 306 064 <
mark@flayranalytics.co.uk> wrote:

> Thanks Kim.
>
> Sorry It was a typo as I was doing the second part from my phone (Laptop
> battery died).
>
>
> The code I was using in zeppelin is:
> eve.select($"id",$"customerEvent.customerContext.anonymousCustomerChanges.
> analyticsId.id").show
>
> I tried with and with out the $ sign but still get the same error.
>
> Any advice?
>
>
> On 16 Oct 2016, at 12:41, Jun Kim <i2...@gmail.com> wrote:
>
> Hi, Mark Mikolajczak
>
> I think it should be
>
> eve.select("id", "customerEvent.customerContext.anonymousCustomerChanges.
> analyticsId.id
> <http://customerevent.customercontext.anonymouscustomerchanges.analyticsid.id/>
> ")
>
> You missed 'customerEvent'!
>
> :-)
>
> 2016년 10월 16일 (일) 오후 8:32, Flayranalytics <ma...@flayranalytics.co.uk>님이
> 작성:
>
> Hi,
> Sorry forgot to include the code.
>
> eve.select($"id",$"customerContext.anonymousCustomerChanges.analyticsId.id
> <http://customercontext.anonymouscustomerchanges.analyticsid.id/>").show()
>
> If I just use Id it works. I am seeing this happen when the data is an
> array within my data.
>
>
>
> Sent from my 📲
>
> On 16 Oct 2016, at 12:03, Mark Mikolajczak - 07855 306 064 <
> mark@flayranalytics.co.uk> wrote:
>
>
>
> down votefavorite
> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>
> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>
> Hi,
>
> I have setup a spark dataframe but I have issues when trying to run query
> on some of the data. If I run on META_ID it will work but when I try to run
> on any of the customerEvent it fails.  I am running apache Zeppelin in EMR
> with Spark 1.6.2. I not sure why this error is happening so let me know if
> you have seen this before? I was thinking that its possibly the automatic
> data frame not creating the correct schema so should this be done manually?
>
> Scheme
>
> root
>  |-- META_ID: string (nullable = true)
>  |-- businessEventType: string (nullable = true)
>  |-- customerEvent: struct (nullable = true)
>  |    |-- customerContext: struct (nullable = true)
>  |    |    |-- anonymousCustomerChanges: struct (nullable = true)
>  |    |    |    |-- analyticsId: struct (nullable = true)
>  |    |    |    |    |-- id: string (nullable = true)
>  |    |    |    |    |-- system: string (nullable = true)
>
>
> Error
>
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>     at org.apache.spark.scheduler.Task.run(Task.scala:89)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>     at org.apache.spark.scheduler.DAGScheduler.org <http://org.apache.spark.scheduler.dagscheduler.org/>$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at scala.Option.foreach(Option.scala:236)
>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
>     at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>     at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>     at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>     at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>     at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
>     at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
>     at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
>     at $iwC$$iwC$$iwC.<init>(<console>:64)
>     at $iwC$$iwC.<init>(<console>:66)
>     at $iwC.<init>(<console>:68)
>     at <init>(<console>:70)
>     at .<init>(<console>:74)
>     at .<clinit>(<console>)
>     at .<init>(<console>:7)
>     at .<clinit>(<console>)
>     at $print(<console>)
>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>     at java.lang.reflect.Method.invoke(Method.java:606)
>     at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>     at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>     at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
>     at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
>     at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
>     at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
>     at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
> at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>
>
>
>
>
> --
> Taejun Kim
>
> Data Mining Lab.
> School of Electrical and Computer Engineering
> University of Seoul
>
>
>

Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Mark Mikolajczak - 07855 306 064 <ma...@flayranalytics.co.uk>.
Thanks Kim.

Sorry It was a typo as I was doing the second part from my phone (Laptop battery died). 


The code I was using in zeppelin is:
eve.select($"id",$"customerEvent.customerContext.anonymousCustomerChanges.analyticsId.id").show

I tried with and with out the $ sign but still get the same error. 

Any advice?


> On 16 Oct 2016, at 12:41, Jun Kim <i2...@gmail.com> wrote:
> 
> Hi, Mark Mikolajczak
> 
> I think it should be 
> 
> eve.select("id", "customerEvent.customerContext.anonymousCustomerChanges.analyticsId.id <http://customerevent.customercontext.anonymouscustomerchanges.analyticsid.id/>")
> 
> You missed 'customerEvent'!
> 
> :-)
> 
> 2016년 10월 16일 (일) 오후 8:32, Flayranalytics <mark@flayranalytics.co.uk <ma...@flayranalytics.co.uk>>님이 작성:
> Hi,
> Sorry forgot to include the code. 
> 
> eve.select($"id",$"customerContext.anonymousCustomerChanges.analyticsId.id <http://customercontext.anonymouscustomerchanges.analyticsid.id/>").show()
> 
> If I just use Id it works. I am seeing this happen when the data is an array within my data. 
> 
> 
> 
> Sent from my 📲 
> 
> On 16 Oct 2016, at 12:03, Mark Mikolajczak - 07855 306 064 <mark@flayranalytics.co.uk <ma...@flayranalytics.co.uk>> wrote:
> 
>> 
>> 
>> down vote
>>  <>favorite
>>  <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>>  <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>	
>> Hi,
>> 
>> I have setup a spark dataframe but I have issues when trying to run query on some of the data. If I run on META_ID it will work but when I try to run on any of the customerEvent it fails.  I am running apache Zeppelin in EMR with Spark 1.6.2. I not sure why this error is happening so let me know if you have seen this before? I was thinking that its possibly the automatic data frame not creating the correct schema so should this be done manually?
>> 
>> 
>> Scheme
>> root
>>  |-- META_ID: string (nullable = true)
>>  |-- businessEventType: string (nullable = true)
>>  |-- customerEvent: struct (nullable = true)
>>  |    |-- customerContext: struct (nullable = true)
>>  |    |    |-- anonymousCustomerChanges: struct (nullable = true)
>>  |    |    |    |-- analyticsId: struct (nullable = true)
>>  |    |    |    |    |-- id: string (nullable = true)
>>  |    |    |    |    |-- system: string (nullable = true)
>> 
>> Error
>> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu <http://ip-10-90-200-51.eu/>-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>>     at scala.collection.TraversableOnce$class.to <http://class.to/>(TraversableOnce.scala:273)
>>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>>     at org.apache.spark.scheduler.Task.run(Task.scala:89)
>>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
>>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>>     at java.lang.Thread.run(Thread.java:745)
>> Driver stacktrace:
>>     at org.apache.spark.scheduler.DAGScheduler.org <http://org.apache.spark.scheduler.dagscheduler.org/>$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
>>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>>     at scala.Option.foreach(Option.scala:236)
>>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
>>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
>>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
>>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
>>     at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>>     at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>>     at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>>     at org.apache.spark.sql.DataFrame.org <http://org.apache.spark.sql.dataframe.org/>$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>>     at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>>     at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>>     at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>>     at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
>>     at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
>>     at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
>>     at $iwC$$iwC$$iwC.<init>(<console>:64)
>>     at $iwC$$iwC.<init>(<console>:66)
>>     at $iwC.<init>(<console>:68)
>>     at <init>(<console>:70)
>>     at .<init>(<console>:74)
>>     at .<clinit>(<console>)
>>     at .<init>(<console>:7)
>>     at .<clinit>(<console>)
>>     at $print(<console>)
>>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>>     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>>     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>>     at java.lang.reflect.Method.invoke(Method.java:606)
>>     at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>>     at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>>     at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
>>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
>>     at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
>>     at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
>>     at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
>>     at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
>> at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
>>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>>     at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
>>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
>>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>>     at java.lang.Thread.run(Thread.java:745)
>> Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>>     at scala.collection.TraversableOnce$class.to <http://class.to/>(TraversableOnce.scala:273)
>>     at scala.collection.AbstractIterator.to <http://scala.collection.abstractiterator.to/>(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>> 
>> 
>> 
>> 
> 
> -- 
> Taejun Kim
> 
> Data Mining Lab.
> School of Electrical and Computer Engineering
> University of Seoul


Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Jun Kim <i2...@gmail.com>.
Hi, Mark Mikolajczak

I think it should be

eve.select("id", "
customerEvent.customerContext.anonymousCustomerChanges.analyticsId.id")

You missed 'customerEvent'!

:-)

2016년 10월 16일 (일) 오후 8:32, Flayranalytics <ma...@flayranalytics.co.uk>님이 작성:

Hi,
Sorry forgot to include the code.

eve.select($"id",$"customerContext.anonymousCustomerChanges.analyticsId.id
").show()

If I just use Id it works. I am seeing this happen when the data is an
array within my data.



Sent from my 📲

On 16 Oct 2016, at 12:03, Mark Mikolajczak - 07855 306 064 <
mark@flayranalytics.co.uk> wrote:



down votefavorite
<http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
<http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>

Hi,

I have setup a spark dataframe but I have issues when trying to run query
on some of the data. If I run on META_ID it will work but when I try to run
on any of the customerEvent it fails.  I am running apache Zeppelin in EMR
with Spark 1.6.2. I not sure why this error is happening so let me know if
you have seen this before? I was thinking that its possibly the automatic
data frame not creating the correct schema so should this be done manually?

Scheme

root
 |-- META_ID: string (nullable = true)
 |-- businessEventType: string (nullable = true)
 |-- customerEvent: struct (nullable = true)
 |    |-- customerContext: struct (nullable = true)
 |    |    |-- anonymousCustomerChanges: struct (nullable = true)
 |    |    |    |-- analyticsId: struct (nullable = true)
 |    |    |    |    |-- id: string (nullable = true)
 |    |    |    |    |-- system: string (nullable = true)


Error

org.apache.spark.SparkException: Job aborted due to stage failure:
Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task
0.3 in stage 92.0 (TID 137,
ip-10-90-200-51.eu-west-1.compute.internal):
java.lang.ArrayIndexOutOfBoundsException: 1
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
    at org.apache.spark.scheduler.Task.run(Task.scala:89)
    at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
Driver stacktrace:
    at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
    at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
    at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
    at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
    at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
    at scala.Option.foreach(Option.scala:236)
    at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
    at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
    at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
    at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
    at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
    at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
    at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
    at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
    at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
    at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
    at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
    at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
    at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
    at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
    at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
    at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
    at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
    at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
    at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
    at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
    at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
    at $iwC$$iwC$$iwC.<init>(<console>:64)
    at $iwC$$iwC.<init>(<console>:66)
    at $iwC.<init>(<console>:68)
    at <init>(<console>:70)
    at .<init>(<console>:74)
    at .<clinit>(<console>)
    at .<init>(<console>:7)
    at .<clinit>(<console>)
    at $print(<console>)
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:606)
    at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
    at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
    at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
    at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
    at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
    at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
    at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
    at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
    at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
    at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
    at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
    at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
    at java.util.concurrent.FutureTask.run(FutureTask.java:262)
    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
    at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
    at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
    at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
    at java.lang.Thread.run(Thread.java:745)
Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
    at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
    at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
    at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
    at scala.collection.Iterator$class.foreach(Iterator.scala:727)
    at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
    at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
    at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
    at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
    at scala.collection.AbstractIterator.to(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
    at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
    at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
    at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
    at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)





-- 
Taejun Kim

Data Mining Lab.
School of Electrical and Computer Engineering
University of Seoul

Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Flayranalytics <ma...@flayranalytics.co.uk>.
Hi,
Sorry forgot to include the code. 

eve.select($"id",$"customerContext.anonymousCustomerChanges.analyticsId.id").show()

If I just use Id it works. I am seeing this happen when the data is an array within my data. 



Sent from my 📲 

> On 16 Oct 2016, at 12:03, Mark Mikolajczak - 07855 306 064 <ma...@flayranalytics.co.uk> wrote:
> 
> 
> 
> down vote
> favorite
> 
> Hi,
> 
> I have setup a spark dataframe but I have issues when trying to run query on some of the data. If I run on META_ID it will work but when I try to run on any of the customerEvent it fails.  I am running apache Zeppelin in EMR with Spark 1.6.2. I not sure why this error is happening so let me know if you have seen this before? I was thinking that its possibly the automatic data frame not creating the correct schema so should this be done manually?
> 
> 
> Scheme
> root
>  |-- META_ID: string (nullable = true)
>  |-- businessEventType: string (nullable = true)
>  |-- customerEvent: struct (nullable = true)
>  |    |-- customerContext: struct (nullable = true)
>  |    |    |-- anonymousCustomerChanges: struct (nullable = true)
>  |    |    |    |-- analyticsId: struct (nullable = true)
>  |    |    |    |    |-- id: string (nullable = true)
>  |    |    |    |    |-- system: string (nullable = true)
> 
> Error
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>     at org.apache.spark.scheduler.Task.run(Task.scala:89)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>     at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at scala.Option.foreach(Option.scala:236)
>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
>     at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>     at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>     at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>     at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>     at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>     at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>     at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
>     at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
>     at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
>     at $iwC$$iwC$$iwC.<init>(<console>:64)
>     at $iwC$$iwC.<init>(<console>:66)
>     at $iwC.<init>(<console>:68)
>     at <init>(<console>:70)
>     at .<init>(<console>:74)
>     at .<clinit>(<console>)
>     at .<init>(<console>:7)
>     at .<clinit>(<console>)
>     at $print(<console>)
>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>     at java.lang.reflect.Method.invoke(Method.java:606)
>     at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>     at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>     at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
>     at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
>     at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
>     at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
>     at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
> at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
> 
> 
> 
> 

Re: Dataframe in apache spark Error java.lang.ArrayIndexOutOfBoundsException:

Posted by Divya Gehlot <di...@gmail.com>.
http://stackoverflow.com/questions/33864389/how-can-i-create-a-spark-dataframe-from-a-nested-array-of-struct-element
This is similar to your use case

Hope this helps


Thanks,
Divya

On 16 October 2016 at 19:03, Mark Mikolajczak - 07855 306 064 <
mark@flayranalytics.co.uk> wrote:

>
>
> down votefavorite
> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>
> <http://stackoverflow.com/questions/40064621/dataframe-in-apache-spark-error-java-lang-arrayindexoutofboundsexception#>
>
> Hi,
>
> I have setup a spark dataframe but I have issues when trying to run query
> on some of the data. If I run on META_ID it will work but when I try to run
> on any of the customerEvent it fails.  I am running apache Zeppelin in EMR
> with Spark 1.6.2. I not sure why this error is happening so let me know if
> you have seen this before? I was thinking that its possibly the automatic
> data frame not creating the correct schema so should this be done manually?
>
> Scheme
>
> root
>  |-- META_ID: string (nullable = true)
>  |-- businessEventType: string (nullable = true)
>  |-- customerEvent: struct (nullable = true)
>  |    |-- customerContext: struct (nullable = true)
>  |    |    |-- anonymousCustomerChanges: struct (nullable = true)
>  |    |    |    |-- analyticsId: struct (nullable = true)
>  |    |    |    |    |-- id: string (nullable = true)
>  |    |    |    |    |-- system: string (nullable = true)
>
>
> Error
>
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 92.0 failed 4 times, most recent failure: Lost task 0.3 in stage 92.0 (TID 137, ip-10-90-200-51.eu-west-1.compute.internal): java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>     at org.apache.spark.scheduler.Task.run(Task.scala:89)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>     at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1431)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1419)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1418)
>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:47)
>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1418)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:799)
>     at scala.Option.foreach(Option.scala:236)
>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:799)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1640)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1599)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1588)
>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:620)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1832)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1845)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:1858)
>     at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.Limit.executeCollect(basicOperators.scala:165)
>     at org.apache.spark.sql.execution.SparkPlan.executeCollectPublic(SparkPlan.scala:174)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.DataFrame$$anonfun$org$apache$spark$sql$DataFrame$$execute$1$1.apply(DataFrame.scala:1499)
>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:56)
>     at org.apache.spark.sql.DataFrame.withNewExecutionId(DataFrame.scala:2086)
>     at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$execute$1(DataFrame.scala:1498)
>     at org.apache.spark.sql.DataFrame.org$apache$spark$sql$DataFrame$$collect(DataFrame.scala:1505)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1375)
>     at org.apache.spark.sql.DataFrame$$anonfun$head$1.apply(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.withCallback(DataFrame.scala:2099)
>     at org.apache.spark.sql.DataFrame.head(DataFrame.scala:1374)
>     at org.apache.spark.sql.DataFrame.take(DataFrame.scala:1456)
>     at org.apache.spark.sql.DataFrame.showString(DataFrame.scala:170)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:350)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:311)
>     at org.apache.spark.sql.DataFrame.show(DataFrame.scala:319)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:43)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:48)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:50)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:52)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:54)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:56)
>     at $iwC$$iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:58)
>     at $iwC$$iwC$$iwC$$iwC$$iwC.<init>(<console>:60)
>     at $iwC$$iwC$$iwC$$iwC.<init>(<console>:62)
>     at $iwC$$iwC$$iwC.<init>(<console>:64)
>     at $iwC$$iwC.<init>(<console>:66)
>     at $iwC.<init>(<console>:68)
>     at <init>(<console>:70)
>     at .<init>(<console>:74)
>     at .<clinit>(<console>)
>     at .<init>(<console>:7)
>     at .<clinit>(<console>)
>     at $print(<console>)
>     at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
>     at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57)
>     at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
>     at java.lang.reflect.Method.invoke(Method.java:606)
>     at org.apache.spark.repl.SparkIMain$ReadEvalPrint.call(SparkIMain.scala:1065)
>     at org.apache.spark.repl.SparkIMain$Request.loadAndRun(SparkIMain.scala:1346)
>     at org.apache.spark.repl.SparkIMain.loadAndRunReq$1(SparkIMain.scala:840)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:871)
>     at org.apache.spark.repl.SparkIMain.interpret(SparkIMain.scala:819)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpretInput(SparkInterpreter.java:709)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:674)
>     at org.apache.zeppelin.spark.SparkInterpreter.interpret(SparkInterpreter.java:667)
>     at org.apache.zeppelin.interpreter.ClassloaderInterpreter.interpret(ClassloaderInterpreter.java:57)
>     at org.apache.zeppelin.interpreter.LazyOpenInterpreter.interpret(LazyOpenInterpreter.java:93)
>     at org.apache.zeppelin.interpreter.remote.RemoteInterpreterServer$InterpretJob.jobRun(RemoteInterpreterServer.java:300)
>     at org.apache.zeppelin.scheduler.Job.run(Job.java:169)
> at org.apache.zeppelin.scheduler.FIFOScheduler$1.run(FIFOScheduler.java:134)
>     at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471)
>     at java.util.concurrent.FutureTask.run(FutureTask.java:262)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$201(ScheduledThreadPoolExecutor.java:178)
>     at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:292)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>     at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ArrayIndexOutOfBoundsException: 1
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:260)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$StructConverter.toCatalystImpl(CatalystTypeConverters.scala:250)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$CatalystTypeConverter.toCatalyst(CatalystTypeConverters.scala:102)
>     at org.apache.spark.sql.catalyst.CatalystTypeConverters$$anonfun$createToCatalystConverter$2.apply(CatalystTypeConverters.scala:401)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:59)
>     at org.apache.spark.sql.execution.RDDConversions$$anonfun$rowToRowRdd$1$$anonfun$apply$2.apply(ExistingRDD.scala:56)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
>     at scala.collection.Iterator$$anon$10.next(Iterator.scala:312)
>     at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>     at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>     at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>     at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>     at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>     at scala.collection.AbstractIterator.to(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>     at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>     at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>     at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$5.apply(SparkPlan.scala:212)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:1858)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:66)
>
>
>
>
>
>