You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Hyukjin Kwon (JIRA)" <ji...@apache.org> on 2016/03/09 02:33:40 UTC

[jira] [Commented] (SPARK-13719) Bad JSON record raises java.lang.ClassCastException

    [ https://issues.apache.org/jira/browse/SPARK-13719?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15186276#comment-15186276 ] 

Hyukjin Kwon commented on SPARK-13719:
--------------------------------------

 [~rxin] Actually, shouldn't we maybe need modes such as {{DROPMALFORMED}} or {{PERMISSIVE}} for JSON data source as well?

> Bad JSON record raises java.lang.ClassCastException
> ----------------------------------------------------
>
>                 Key: SPARK-13719
>                 URL: https://issues.apache.org/jira/browse/SPARK-13719
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>    Affects Versions: 1.5.2, 1.6.0
>         Environment: OS X, Linux
>            Reporter: dmtran
>            Priority: Minor
>
> I have defined a JSON schema, using org.apache.spark.sql.types.StructType, that expects this kind of record :
> {noformat}
> {
>   "request": {
>     "user": {
>       "id": 123
>     }
>   }
> }
> {noformat}
> There's a bad record in my dataset, that defines field "user" as an array, instead of a JSON object :
> {noformat}
> {
>   "request": {
>     "user": []
>   }
> }
> {noformat}
> The following exception is raised because of that bad record :
> {noformat}
> Exception in thread "main" org.apache.spark.SparkException: Job aborted due to stage failure: Task 7 in stage 0.0 failed 4 times, most recent failure: Lost task 7.3 in stage 0.0 (TID 10, 192.168.1.170): java.lang.ClassCastException: org.apache.spark.sql.types.GenericArrayData cannot be cast to org.apache.spark.sql.catalyst.InternalRow
> 	at org.apache.spark.sql.catalyst.expressions.BaseGenericInternalRow$class.getStruct(rows.scala:50)
> 	at org.apache.spark.sql.catalyst.expressions.GenericMutableRow.getStruct(rows.scala:247)
> 	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificPredicate.eval(Unknown Source)
> 	at org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:67)
> 	at org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:67)
> 	at org.apache.spark.sql.execution.Filter$$anonfun$4$$anonfun$apply$4.apply(basicOperators.scala:117)
> 	at org.apache.spark.sql.execution.Filter$$anonfun$4$$anonfun$apply$4.apply(basicOperators.scala:115)
> 	at scala.collection.Iterator$$anon$14.hasNext(Iterator.scala:390)
> 	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
> 	at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1.org$apache$spark$sql$execution$aggregate$TungstenAggregate$$anonfun$$executePartition$1(TungstenAggregate.scala:97)
> 	at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$2.apply(TungstenAggregate.scala:119)
> 	at org.apache.spark.sql.execution.aggregate.TungstenAggregate$$anonfun$doExecute$1$$anonfun$2.apply(TungstenAggregate.scala:119)
> 	at org.apache.spark.rdd.MapPartitionsWithPreparationRDD.compute(MapPartitionsWithPreparationRDD.scala:64)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
> 	at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:300)
> 	at org.apache.spark.rdd.RDD.iterator(RDD.scala:264)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
> 	at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
> 	at org.apache.spark.scheduler.Task.run(Task.scala:88)
> 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:214)
> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> 	at java.lang.Thread.run(Thread.java:745)
> {noformat}
> Here's a code snippet that reproduces the exception :
> {noformat}
> import org.apache.spark.SparkContext
> import org.apache.spark.rdd.RDD
> import org.apache.spark.sql.{SQLContext, DataFrame}
> import org.apache.spark.sql.hive.HiveContext
> import org.apache.spark.sql.types.{StringType, StructField, StructType}
> object Snippet {
>   def main(args : Array[String]): Unit = {
>     val sc = new SparkContext()
>     implicit val sqlContext = new HiveContext(sc)
>     val rdd: RDD[String] = sc.parallelize(Seq(badRecord))
>     val df: DataFrame = sqlContext.read.schema(schema).json(rdd)
>     import sqlContext.implicits._
>     df.select("request.user.id")
>       .filter($"id".isNotNull)
>       .count()
>   }
>   val badRecord =
>     s"""{
>         |  "request": {
>         |    "user": []
>         |  }
>         |}""".stripMargin.replaceAll("\n", " ") // Convert the multiline string to a signe line string
>   val schema =
>     StructType(
>       StructField("request", StructType(
>         StructField("user", StructType(
>           StructField("id", StringType) :: Nil
>         )) :: Nil
>     )) :: Nil)
> }
> {noformat}



--
This message was sent by Atlassian JIRA
(v6.3.4#6332)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org