You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Yimin Yang (Jira)" <ji...@apache.org> on 2021/12/23 12:00:00 UTC
[jira] [Updated] (SPARK-37728) reading nested columns with ORC vectorized reader can cause ArrayIndexOutOfBoundsException
[ https://issues.apache.org/jira/browse/SPARK-37728?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]
Yimin Yang updated SPARK-37728:
-------------------------------
Description:
When spark.sql.orc.enableNestedColumnVectorizedReader is set to true, reading nested columns of ORC files can cause ArrayIndexOutOfBoundsException. Here is a simple reproduction:
1) create an ORC file which contains records of type Array<Array<String>>:
{code:java}
./bin/spark-shell {code}
{code:java}
case class Item(record: Array[Array[String]])
val data = new Array[Array[Array[String]]](100)
for (i <- 0 to 99) {
val temp = new Array[Array[String]](50)
for (j <- 0 to 49) {
temp(j) = new Array[String](1000)
for (k <- 0 to 999) {
temp(j)(k) = k.toString
}
}
data(i) = temp
}
val rdd = spark.sparkContext.parallelize(data, 1)
val df = rdd.map(x => Item(x)).toDF
df.write.orc("file:///home/user_name/data") {code}
2) read the orc with spark.sql.orc.enableNestedColumnVectorizedReader=true
{code:java}
./bin/spark-shell --conf spark.sql.orc.enableVectorizedReader=true --conf spark.sql.codegen.wholeStage=true --conf spark.sql.orc.enableNestedColumnVectorizedReader=true --conf spark.sql.orc.columnarReaderBatchSize=4096 {code}
{code:java}
val df = spark.read.orc("file:///home/user_name/data")
df.show(100) {code}
Then Spark threw ArrayIndexOutOfBoundsException:
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2455)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2404)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2403)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2403)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1162)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1162)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1162)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2643)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2585)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2574)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:940)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2227)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2248)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2267)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:490)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:443)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:48)
at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3833)
at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3824)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:109)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:169)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:95)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3822)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.take(Dataset.scala:3053)
at org.apache.spark.sql.Dataset.getRows(Dataset.scala:288)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:327)
at org.apache.spark.sql.Dataset.show(Dataset.scala:807)
at org.apache.spark.sql.Dataset.show(Dataset.scala:766)
... 47 elided
Caused by: java.lang.ArrayIndexOutOfBoundsException: 4096
at org.apache.spark.sql.execution.datasources.orc.OrcArrayColumnVector.getArray(OrcArrayColumnVector.java:53)
at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:170)
at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:31)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:760)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:363)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:136)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:507)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1468)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:510)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
was:
When spark.sql.orc.enableNestedColumnVectorizedReader is set to true, reading nested columns of ORC files can cause ArrayIndexOutOfBoundsException. Here is a simple reproduction:
1) create an ORC file which contains records of type Array<Array<String>>:
{code:java}
./bin/spark-shell {code}
{code:java}
case class Item(record: Array[Array[String]])
val data = new Array[Array[Array[String]]](100)
for (i <- 0 to 99) {
val temp = new Array[Array[String]](50)
for (j <- 0 to 49) {
temp(j) = new Array[String](1000)
for (k <- 0 to 999) {
temp(j)(k) = k.toString
}
}
data(i) = temp
}
val rdd = spark.sparkContext.parallelize(data, 1)
val df = rdd.map(x => Item(x)).toDF
df.write.orc("file:///home/user_name/data") {code}
2) read the orc with spark.sql.orc.enableNestedColumnVectorizedReader=true
{code:java}
./bin/spark-shell --conf spark.sql.orc.enableVectorizedReader=true --conf spark.sql.codegen.wholeStage=true --conf spark.sql.orc.enableNestedColumnVectorizedReader=true --conf spark.sql.orc.columnarReaderBatchSize=4096 {code}
{code:java}
val df = spark.read.orc("file:///home/user_name/data")
df.show(100) {code}
Then Spark threw ArrayIndexOutOfBoundsException:
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2455)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2404)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2403)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2403)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1162)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1162)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1162)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2643)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2585)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2574)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:940)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2227)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2248)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2267)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:490)
at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:443)
at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:48)
at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3833)
at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3824)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:109)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:169)
at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:95)
at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3822)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.take(Dataset.scala:3053)
at org.apache.spark.sql.Dataset.getRows(Dataset.scala:288)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:327)
at org.apache.spark.sql.Dataset.show(Dataset.scala:807)
at org.apache.spark.sql.Dataset.show(Dataset.scala:766)
... 47 elided
Caused by: java.lang.ArrayIndexOutOfBoundsException: 4096
at org.apache.spark.sql.execution.datasources.orc.OrcArrayColumnVector.getArray(OrcArrayColumnVector.java:53)
at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:170)
at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:31)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:760)
at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:363)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)
at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)
at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
at org.apache.spark.scheduler.Task.run(Task.scala:136)
at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:507)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1468)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:510)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:745)
> reading nested columns with ORC vectorized reader can cause ArrayIndexOutOfBoundsException
> ------------------------------------------------------------------------------------------
>
> Key: SPARK-37728
> URL: https://issues.apache.org/jira/browse/SPARK-37728
> Project: Spark
> Issue Type: Bug
> Components: SQL
> Affects Versions: 3.0.3, 3.1.2, 3.2.0
> Reporter: Yimin Yang
> Priority: Major
>
> When spark.sql.orc.enableNestedColumnVectorizedReader is set to true, reading nested columns of ORC files can cause ArrayIndexOutOfBoundsException. Here is a simple reproduction:
> 1) create an ORC file which contains records of type Array<Array<String>>:
> {code:java}
> ./bin/spark-shell {code}
> {code:java}
> case class Item(record: Array[Array[String]])
> val data = new Array[Array[Array[String]]](100)
> for (i <- 0 to 99) {
> val temp = new Array[Array[String]](50)
> for (j <- 0 to 49) {
> temp(j) = new Array[String](1000)
> for (k <- 0 to 999) {
> temp(j)(k) = k.toString
> }
> }
> data(i) = temp
> }
> val rdd = spark.sparkContext.parallelize(data, 1)
> val df = rdd.map(x => Item(x)).toDF
> df.write.orc("file:///home/user_name/data") {code}
>
> 2) read the orc with spark.sql.orc.enableNestedColumnVectorizedReader=true
> {code:java}
> ./bin/spark-shell --conf spark.sql.orc.enableVectorizedReader=true --conf spark.sql.codegen.wholeStage=true --conf spark.sql.orc.enableNestedColumnVectorizedReader=true --conf spark.sql.orc.columnarReaderBatchSize=4096 {code}
> {code:java}
> val df = spark.read.orc("file:///home/user_name/data")
> df.show(100) {code}
>
> Then Spark threw ArrayIndexOutOfBoundsException:
> Driver stacktrace:
> at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2455)
> at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2404)
> at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2403)
> at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
> at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
> at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
> at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2403)
> at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1162)
> at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1162)
> at scala.Option.foreach(Option.scala:407)
> at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1162)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2643)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2585)
> at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2574)
> at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
> at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:940)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2227)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2248)
> at org.apache.spark.SparkContext.runJob(SparkContext.scala:2267)
> at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:490)
> at org.apache.spark.sql.execution.SparkPlan.executeTake(SparkPlan.scala:443)
> at org.apache.spark.sql.execution.CollectLimitExec.executeCollect(limit.scala:48)
> at org.apache.spark.sql.Dataset.collectFromPlan(Dataset.scala:3833)
> at org.apache.spark.sql.Dataset.$anonfun$head$1(Dataset.scala:2832)
> at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3824)
> at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$6(SQLExecution.scala:109)
> at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:169)
> at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:95)
> at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:779)
> at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:64)
> at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3822)
> at org.apache.spark.sql.Dataset.head(Dataset.scala:2832)
> at org.apache.spark.sql.Dataset.take(Dataset.scala:3053)
> at org.apache.spark.sql.Dataset.getRows(Dataset.scala:288)
> at org.apache.spark.sql.Dataset.showString(Dataset.scala:327)
> at org.apache.spark.sql.Dataset.show(Dataset.scala:807)
> at org.apache.spark.sql.Dataset.show(Dataset.scala:766)
> ... 47 elided
> Caused by: java.lang.ArrayIndexOutOfBoundsException: 4096
> at org.apache.spark.sql.execution.datasources.orc.OrcArrayColumnVector.getArray(OrcArrayColumnVector.java:53)
> at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:170)
> at org.apache.spark.sql.vectorized.ColumnarArray.getArray(ColumnarArray.java:31)
> at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source)
> at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
> at org.apache.spark.sql.execution.WholeStageCodegenExec$$anon$1.hasNext(WholeStageCodegenExec.scala:760)
> at org.apache.spark.sql.execution.SparkPlan.$anonfun$getByteArrayRdd$1(SparkPlan.scala:363)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2(RDD.scala:890)
> at org.apache.spark.rdd.RDD.$anonfun$mapPartitionsInternal$2$adapted(RDD.scala:890)
> at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:52)
> at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:365)
> at org.apache.spark.rdd.RDD.iterator(RDD.scala:329)
> at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> at org.apache.spark.scheduler.Task.run(Task.scala:136)
> at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:507)
> at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1468)
> at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:510)
> at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
> at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
> at java.lang.Thread.run(Thread.java:745)
>
--
This message was sent by Atlassian Jira
(v8.20.1#820001)
---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org