You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hudi.apache.org by "Balaji Balasubramaniam (Jira)" <ji...@apache.org> on 2020/11/20 23:54:00 UTC

[jira] [Commented] (HUDI-874) Schema evolution does not work with AWS Glue catalog

    [ https://issues.apache.org/jira/browse/HUDI-874?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17236528#comment-17236528 ] 

Balaji Balasubramaniam commented on HUDI-874:
---------------------------------------------

[~uditme] [~vbalaji]

We are using AWS EMR 6.1.0 and I can able to reproduce the same issue as well. Any time a new partition is created, it is failing with the following error.

 

org.apache.hudi.hive.HoodieHiveSyncException: Failed in executing SQL ALTER TABLE `xxxx`.`xxxx` REPLACE COLUMNS(`_hoodie_commit_time` string, `_hoodie_commit_seqno` string, `_hoodie_record_key` string, `_hoodie_partition_path` string, `_hoodie_file_name` string, `xx` string, `xxxx` int, `xxxx` int, `xxxx` string, `xxxx` bigint ) cascade

 at org.apache.hudi.hive.HoodieHiveClient.updateHiveSQL(HoodieHiveClient.java:482)

 at org.apache.hudi.hive.HoodieHiveClient.updateTableDefinition(HoodieHiveClient.java:261)

 at org.apache.hudi.hive.HiveSyncTool.syncSchema(HiveSyncTool.java:164)

 at org.apache.hudi.hive.HiveSyncTool.syncHoodieTable(HiveSyncTool.java:114)

 at org.apache.hudi.hive.HiveSyncTool.syncHoodieTable(HiveSyncTool.java:87)

 at org.apache.hudi.HoodieSparkSqlWriter$.syncHive(HoodieSparkSqlWriter.scala:229)

 at org.apache.hudi.HoodieSparkSqlWriter$.checkWriteStatus(HoodieSparkSqlWriter.scala:279)

 at org.apache.hudi.HoodieSparkSqlWriter$.write(HoodieSparkSqlWriter.scala:184)

 at org.apache.hudi.DefaultSource.createRelation(DefaultSource.scala:108)

 at org.apache.spark.sql.execution.datasources.SaveIntoDataSourceCommand.run(SaveIntoDataSourceCommand.scala:46)

 at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:70)

 at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:68)

 at org.apache.spark.sql.execution.command.ExecutedCommandExec.doExecute(commands.scala:90)

 at org.apache.spark.sql.execution.SparkPlan.$anonfun$execute$1(SparkPlan.scala:180)

 at org.apache.spark.sql.execution.SparkPlan.$anonfun$executeQuery$1(SparkPlan.scala:218)

 at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)

 at org.apache.spark.sql.execution.SparkPlan.executeQuery(SparkPlan.scala:215)

 at org.apache.spark.sql.execution.SparkPlan.execute(SparkPlan.scala:176)

 at org.apache.spark.sql.execution.QueryExecution.toRdd$lzycompute(QueryExecution.scala:124)

 at org.apache.spark.sql.execution.QueryExecution.toRdd(QueryExecution.scala:123)

 at org.apache.spark.sql.DataFrameWriter.$anonfun$runCommand$1(DataFrameWriter.scala:944)

 at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:106)

 at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:207)

 at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:88)

 at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:763)

 at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)

 at org.apache.spark.sql.DataFrameWriter.runCommand(DataFrameWriter.scala:944)

 at org.apache.spark.sql.DataFrameWriter.saveToV1Source(DataFrameWriter.scala:396)

 at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:380)

 at org.apache.spark.sql.DataFrameWriter.save(DataFrameWriter.scala:269)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:37)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:41)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:43)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:45)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw$$iw.<init>(<console>:47)

 at $line39.$read$$iw$$iw$$iw$$iw$$iw.<init>(<console>:49)

 at $line39.$read$$iw$$iw$$iw$$iw.<init>(<console>:51)

 at $line39.$read$$iw$$iw$$iw.<init>(<console>:53)

 at $line39.$read$$iw$$iw.<init>(<console>:55)

 at $line39.$read$$iw.<init>(<console>:57)

 at $line39.$read.<init>(<console>:59)

 at $line39.$read$.<init>(<console>:63)

 at $line39.$read$.<clinit>(<console>)

 at $line39.$eval$.$print$lzycompute(<console>:7)

 at $line39.$eval$.$print(<console>:6)

 at $line39.$eval.$print(<console>)

 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

 at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)

 at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

 at java.lang.reflect.Method.invoke(Method.java:498)

 at scala.tools.nsc.interpreter.IMain$ReadEvalPrint.call(IMain.scala:745)

 at scala.tools.nsc.interpreter.IMain$Request.loadAndRun(IMain.scala:1021)

 at scala.tools.nsc.interpreter.IMain.$anonfun$interpret$1(IMain.scala:574)

 at scala.reflect.internal.util.ScalaClassLoader.asContext(ScalaClassLoader.scala:41)

 at scala.reflect.internal.util.ScalaClassLoader.asContext$(ScalaClassLoader.scala:37)

 at scala.reflect.internal.util.AbstractFileClassLoader.asContext(AbstractFileClassLoader.scala:41)

 at scala.tools.nsc.interpreter.IMain.loadAndRunReq$1(IMain.scala:573)

 at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:600)

 at scala.tools.nsc.interpreter.IMain.interpret(IMain.scala:570)

 at scala.tools.nsc.interpreter.ILoop.interpretStartingWith(ILoop.scala:894)

 at scala.tools.nsc.interpreter.ILoop.command(ILoop.scala:762)

 at scala.tools.nsc.interpreter.ILoop.processLine(ILoop.scala:464)

 at scala.tools.nsc.interpreter.ILoop.loop(ILoop.scala:485)

 at org.apache.spark.repl.SparkILoop.process(SparkILoop.scala:239)

 at org.apache.spark.repl.Main$.doMain(Main.scala:78)

 at org.apache.spark.repl.Main$.main(Main.scala:58)

 at org.apache.spark.repl.Main.main(Main.scala)

 at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)

 at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)

 at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)

 at java.lang.reflect.Method.invoke(Method.java:498)

 at org.apache.spark.deploy.JavaMainApplication.start(SparkApplication.scala:52)

 at org.apache.spark.deploy.SparkSubmit.org$apache$spark$deploy$SparkSubmit$$runMain(SparkSubmit.scala:936)

 at org.apache.spark.deploy.SparkSubmit.doRunMain$1(SparkSubmit.scala:180)

 at org.apache.spark.deploy.SparkSubmit.submit(SparkSubmit.scala:203)

 at org.apache.spark.deploy.SparkSubmit.doSubmit(SparkSubmit.scala:90)

 at org.apache.spark.deploy.SparkSubmit$$anon$2.doSubmit(SparkSubmit.scala:1015)

 at org.apache.spark.deploy.SparkSubmit$.main(SparkSubmit.scala:1024)

 at org.apache.spark.deploy.SparkSubmit.main(SparkSubmit.scala)

Caused by: java.sql.SQLException: Error while processing statement: FAILED: Hive Internal Error: org.apache.hadoop.hive.ql.metadata.HiveException(Error while invoking FailureHook. hooks: java.lang.NullPointerException

 at org.apache.hadoop.hive.ql.reexec.ReExecutionOverlayPlugin$LocalHook.run(ReExecutionOverlayPlugin.java:45)

 at org.apache.hadoop.hive.ql.HookRunner.invokeGeneralHook(HookRunner.java:296)

 at org.apache.hadoop.hive.ql.HookRunner.runFailureHooks(HookRunner.java:283)

 at org.apache.hadoop.hive.ql.Driver.invokeFailureHooks(Driver.java:2616)

 at org.apache.hadoop.hive.ql.Driver.execute(Driver.java:2386)

 at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:2011)

 at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1709)

 at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1703)

 at org.apache.hadoop.hive.ql.reexec.ReExecDriver.run(ReExecDriver.java:157)

 at org.apache.hive.service.cli.operation.SQLOperation.runQuery(SQLOperation.java:224)

 at org.apache.hive.service.cli.operation.SQLOperation.access$700(SQLOperation.java:87)

 at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork$1.run(SQLOperation.java:316)

 at java.security.AccessController.doPrivileged(Native Method)

 at javax.security.auth.Subject.doAs(Subject.java:422)

 at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1730)

 at org.apache.hive.service.cli.operation.SQLOperation$BackgroundWork.run(SQLOperation.java:330)

 at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)

 at java.util.concurrent.FutureTask.run(FutureTask.java:266)

 at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)

 at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)

 at java.lang.Thread.run(Thread.java:748)

)

 at org.apache.hive.jdbc.HiveStatement.waitForOperationToComplete(HiveStatement.java:385)

 at org.apache.hive.jdbc.HiveStatement.execute(HiveStatement.java:254)

 at org.apache.hudi.hive.HoodieHiveClient.updateHiveSQL(HoodieHiveClient.java:480)

 ... 78 more

> Schema evolution does not work with AWS Glue catalog
> ----------------------------------------------------
>
>                 Key: HUDI-874
>                 URL: https://issues.apache.org/jira/browse/HUDI-874
>             Project: Apache Hudi
>          Issue Type: Improvement
>          Components: Hive Integration
>            Reporter: Udit Mehrotra
>            Priority: Major
>
> This issue has been discussed here [https://github.com/apache/incubator-hudi/issues/1581] and at other places as well. Glue catalog currently does not support *cascade* for *ALTER TABLE* statements. As a result features like adding new columns to an existing table does now work with glue catalog .



--
This message was sent by Atlassian Jira
(v8.3.4#803005)