You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Jorge Machado (JIRA)" <ji...@apache.org> on 2017/03/22 12:41:42 UTC

[jira] [Commented] (SPARK-5236) java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.MutableAny cannot be cast to org.apache.spark.sql.catalyst.expressions.MutableInt

    [ https://issues.apache.org/jira/browse/SPARK-5236?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=15936222#comment-15936222 ] 

Jorge Machado commented on SPARK-5236:
--------------------------------------

[~marmbrus] Hi Michael, so I'm experience the same issue. I'm building a datasource for Hbase with some custom schema. 

{noformat}

/**
  *
  * @param schema this is how the row has to look like. The returned value from the next must match this schema
  * @param hBaseRelation
  * @param repositoryHistory
  * @param timeZoneId
  * @param tablePartitionInfo
  * @param from
  * @param to
  */
class TagValueSparkIterator(val hBaseRelation: HBaseRelation,
							val schema: StructType,
							val repositoryHistory: DeviceHistoryRepository,
							val timeZoneId: String,
							val tablePartitionInfo: TablePartitionInfo,
							val from: Long,
							val to: Long) extends Iterator[InternalRow] {

	private val internalItr: ClosableIterator[TagValue[Double]]= repositoryHistory.scanTagValues(from, to, tablePartitionInfo)

	override def hasNext: Boolean = internalItr.hasNext

	override def next(): InternalRow = {
		val tagValue = internalItr.next()
		val instant = ZonedDateTime.ofInstant(Instant.ofEpochSecond(tagValue.getTimestamp), ZoneId.of(timeZoneId)).toInstant
		val timestamp = Timestamp.from(instant)
		InternalRow.fromSeq(Array(tagValue.getTimestamp,tagValue.getGuid,tagValue.getGuid,tagValue.getValue))
		val mutableRow = new SpecificMutableRow(schema.fields.map(f=> f.dataType))
		for (i <- schema.fields.indices){
			updateMutableRow(i,tagValue,mutableRow, schema(i) )
		}
		mutableRow
	}

	def updateMutableRow(i: Int, tagValue: TagValue[Double], row: SpecificMutableRow, field:StructField): Unit = {
		//#TODO this is ugly.
		field.name match {
			case "Date" => row.setLong(i,tagValue.getTimestamp.toLong)
			case "Device" => row.update(i,UTF8String.fromString(tagValue.getGuid))
			case "Tag" => row.update(i,UTF8String.fromString(tagValue.getTagName))
			case "TagValue" => row.setDouble(i,tagValue.getValue)
		}
	}

	override def toString():String ={
		"Iterator for Region Name "+tablePartitionInfo.getRegionName+" Range:"+from+ "until" + "to"
	}
}
{noformat}

Then I get : 

{noformat}
Driver stacktrace:
org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0, localhost): java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.MutableAny cannot be cast to org.apache.spark.sql.catalyst.expressions.MutableLong
	at org.apache.spark.sql.catalyst.expressions.SpecificMutableRow.getLong(SpecificMutableRow.scala:301)
	at org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificPredicate.eval(Unknown Source)
	at org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:68)
	at org.apache.spark.sql.catalyst.expressions.codegen.GeneratePredicate$$anonfun$create$2.apply(GeneratePredicate.scala:68)
	at org.apache.spark.sql.execution.Filter$$anonfun$2$$anonfun$apply$2.apply(basicOperators.scala:74)
	at org.apache.spark.sql.execution.Filter$$anonfun$2$$anonfun$apply$2.apply(basicOperators.scala:72)
	at scala.collection.Iterator$$anon$14.hasNext(Iterator.scala:390)
	at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)

{noformat}

> java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.MutableAny cannot be cast to org.apache.spark.sql.catalyst.expressions.MutableInt
> ---------------------------------------------------------------------------------------------------------------------------------------------------------
>
>                 Key: SPARK-5236
>                 URL: https://issues.apache.org/jira/browse/SPARK-5236
>             Project: Spark
>          Issue Type: Bug
>          Components: SQL
>            Reporter: Alex Baretta
>
> {code}
> 15/01/14 05:39:27 WARN scheduler.TaskSetManager: Lost task 0.0 in stage 18.0 (TID 28, localhost): parquet.io.ParquetDecodingException: Can not read value at 0 in block 0 in file gs://pa-truven/20141205/parquet/P/part-r-00001.parquet
>         at parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:213)
>         at parquet.hadoop.ParquetRecordReader.nextKeyValue(ParquetRecordReader.java:204)
>         at org.apache.spark.rdd.NewHadoopRDD$$anon$1.hasNext(NewHadoopRDD.scala:145)
>         at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:39)
>         at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
>         at scala.collection.Iterator$$anon$11.hasNext(Iterator.scala:327)
>         at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:308)
>         at scala.collection.Iterator$class.foreach(Iterator.scala:727)
>         at scala.collection.AbstractIterator.foreach(Iterator.scala:1157)
>         at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:48)
>         at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:103)
>         at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:47)
>         at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:273)
>         at scala.collection.AbstractIterator.to(Iterator.scala:1157)
>         at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:265)
>         at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1157)
>         at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:252)
>         at scala.collection.AbstractIterator.toArray(Iterator.scala:1157)
>         at org.apache.spark.sql.execution.Limit$$anonfun$4.apply(basicOperators.scala:141)
>         at org.apache.spark.sql.execution.Limit$$anonfun$4.apply(basicOperators.scala:141)
>         at org.apache.spark.SparkContext$$anonfun$runJob$4.apply(SparkContext.scala:1331)
>         at org.apache.spark.SparkContext$$anonfun$runJob$4.apply(SparkContext.scala:1331)
>         at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:61)
>         at org.apache.spark.scheduler.Task.run(Task.scala:56)
>         at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:183)
>         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
>         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
>         at java.lang.Thread.run(Thread.java:745)
> Caused by: java.lang.ClassCastException: org.apache.spark.sql.catalyst.expressions.MutableAny cannot be cast to org.apache.spark.sql.catalyst.expressions.MutableInt
>         at org.apache.spark.sql.catalyst.expressions.SpecificMutableRow.setInt(SpecificMutableRow.scala:241)
>         at org.apache.spark.sql.parquet.CatalystPrimitiveRowConverter.updateInt(ParquetConverter.scala:375)
>         at org.apache.spark.sql.parquet.CatalystPrimitiveConverter.addInt(ParquetConverter.scala:434)
>         at parquet.column.impl.ColumnReaderImpl$2$3.writeValue(ColumnReaderImpl.java:237)
>         at parquet.column.impl.ColumnReaderImpl.writeCurrentValueToConverter(ColumnReaderImpl.java:353)
>         at parquet.io.RecordReaderImplementation.read(RecordReaderImplementation.java:402)
>         at parquet.hadoop.InternalParquetRecordReader.nextKeyValue(InternalParquetRecordReader.java:194)
>         ... 27 more
> {code}



--
This message was sent by Atlassian JIRA
(v6.3.15#6346)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org