You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@spark.apache.org by "Wenchen Fan (Jira)" <ji...@apache.org> on 2022/02/09 14:30:00 UTC

[jira] [Assigned] (SPARK-37969) Hive Serde insert should check schema before execution

     [ https://issues.apache.org/jira/browse/SPARK-37969?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ]

Wenchen Fan reassigned SPARK-37969:
-----------------------------------

    Assignee: angerszhu

> Hive Serde insert should check schema before execution
> ------------------------------------------------------
>
>                 Key: SPARK-37969
>                 URL: https://issues.apache.org/jira/browse/SPARK-37969
>             Project: Spark
>          Issue Type: Task
>          Components: SQL
>    Affects Versions: 3.2.0
>            Reporter: angerszhu
>            Assignee: angerszhu
>            Priority: Major
>
> {code:java}
> [info]   Cause: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 0.0 failed 1 times, most recent failure: Lost task 0.0 in stage 0.0 (TID 0) (10.12.188.15 executor driver): java.lang.IllegalArgumentException: Error: : expected at the position 19 of 'struct<ID:bigint,IF(ID=1,ID,0):bigint,B:bigint>' but '(' is found.
> [info] 	at org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.expect(TypeInfoUtils.java:384)
> [info] 	at org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.expect(TypeInfoUtils.java:355)
> [info] 	at org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.parseType(TypeInfoUtils.java:507)
> [info] 	at org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils$TypeInfoParser.parseTypeInfos(TypeInfoUtils.java:329)
> [info] 	at org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils.getTypeInfosFromTypeString(TypeInfoUtils.java:814)
> [info] 	at org.apache.hadoop.hive.ql.io.orc.OrcSerde.initialize(OrcSerde.java:112)
> [info] 	at org.apache.spark.sql.hive.execution.HiveOutputWriter.<init>(HiveFileFormat.scala:122)
> [info] 	at org.apache.spark.sql.hive.execution.HiveFileFormat$$anon$1.newInstance(HiveFileFormat.scala:105)
> [info] 	at org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.newOutputWriter(FileFormatDataWriter.scala:161)
> [info] 	at org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.<init>(FileFormatDataWriter.scala:146)
> [info] 	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:313)
> [info] 	at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$20(FileFormatWriter.scala:252)
> [info] 	at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> [info] 	at org.apache.spark.scheduler.Task.run(Task.scala:136)
> [info] 	at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:507)
> [info] 	at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1475)
> [info] 	at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:510)
> [info] 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> [info] 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> [info] 	at java.lang.Thread.run(Thread.java:748)
> [info]
> [info]   Cause: java.lang.IllegalArgumentException: field ended by ';': expected ';' but got 'IF' at line 2:   optional int32 (IF
> [info]   at org.apache.parquet.schema.MessageTypeParser.check(MessageTypeParser.java:239)
> [info]   at org.apache.parquet.schema.MessageTypeParser.addPrimitiveType(MessageTypeParser.java:208)
> [info]   at org.apache.parquet.schema.MessageTypeParser.addType(MessageTypeParser.java:113)
> [info]   at org.apache.parquet.schema.MessageTypeParser.addGroupTypeFields(MessageTypeParser.java:101)
> [info]   at org.apache.parquet.schema.MessageTypeParser.parse(MessageTypeParser.java:94)
> [info]   at org.apache.parquet.schema.MessageTypeParser.parseMessageType(MessageTypeParser.java:84)
> [info]   at org.apache.hadoop.hive.ql.io.parquet.write.DataWritableWriteSupport.getSchema(DataWritableWriteSupport.java:43)
> [info]   at org.apache.hadoop.hive.ql.io.parquet.write.DataWritableWriteSupport.init(DataWritableWriteSupport.java:48)
> [info]   at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:476)
> [info]   at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:430)
> [info]   at org.apache.parquet.hadoop.ParquetOutputFormat.getRecordWriter(ParquetOutputFormat.java:425)
> [info]   at org.apache.hadoop.hive.ql.io.parquet.write.ParquetRecordWriterWrapper.<init>(ParquetRecordWriterWrapper.java:70)
> [info]   at org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat.getParquerRecordWriterWrapper(MapredParquetOutputFormat.java:137)
> [info]   at org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat.getHiveRecordWriter(MapredParquetOutputFormat.java:126)
> [info]   at org.apache.hadoop.hive.ql.io.HiveFileFormatUtils.getRecordWriter(HiveFileFormatUtils.java:286)
> [info]   at org.apache.hadoop.hive.ql.io.HiveFileFormatUtils.getHiveRecordWriter(HiveFileFormatUtils.java:271)
> [info]   at org.apache.spark.sql.hive.execution.HiveOutputWriter.<init>(HiveFileFormat.scala:132)
> [info]   at org.apache.spark.sql.hive.execution.HiveFileFormat$$anon$1.newInstance(HiveFileFormat.scala:105)
> [info]   at org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.newOutputWriter(FileFormatDataWriter.scala:161)
> [info]   at org.apache.spark.sql.execution.datasources.SingleDirectoryDataWriter.<init>(FileFormatDataWriter.scala:146)
> [info]   at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:313)
> [info]   at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$20(FileFormatWriter.scala:252)
> [info]   at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
> [info]   at org.apache.spark.scheduler.Task.run(Task.scala:136)
> [info]   at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:507)
> [info]   at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1475)
> [info]   at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:510)
> [info]   at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
> [info]   at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
> [info]   at java.lang.Thread.run(Thread.java:748)
> {code}



--
This message was sent by Atlassian Jira
(v8.20.1#820001)

---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscribe@spark.apache.org
For additional commands, e-mail: issues-help@spark.apache.org