You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by "PURUJIT CHAUGULE (Jira)" <ji...@apache.org> on 2022/06/24 12:10:00 UTC

[jira] [Created] (CARBONDATA-4343) Insert & Insert Overwrite from another Partition table Fails for Partition Table after Alter Table Set Table Properties with SORT_COLUMNS as partition column AND SORT_SCOPE as LOCAL_SORT.

PURUJIT CHAUGULE created CARBONDATA-4343:
--------------------------------------------

             Summary: Insert & Insert Overwrite from another Partition table Fails for Partition Table after Alter Table Set Table Properties with SORT_COLUMNS as partition column AND SORT_SCOPE as LOCAL_SORT. 
                 Key: CARBONDATA-4343
                 URL: https://issues.apache.org/jira/browse/CARBONDATA-4343
             Project: CarbonData
          Issue Type: Bug
          Components: data-load
    Affects Versions: 2.1.1, 2.2.0, 2.1.0
            Reporter: PURUJIT CHAUGULE


*Scenario 1:* Insert from another  Partition table Fails for Partition Table after Alter Table Set Table Properties _with SORT_COLUMNS as partition column AND SORT_SCOPE as LOCAL_SORT._

*Steps:*

drop table if exists uniqdata_part;

CREATE TABLE uniqdata_part(CUST_NAME string,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 int) PARTITIONED BY(CUST_ID int) STORED AS carbondata;

drop table if exists uniqdata_part1;

CREATE TABLE uniqdata_part1(CUST_NAME string, ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(36,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 int) PARTITIONED BY (CUST_ID int) STORED AS carbondata;

LOAD DATA INPATH 'hdfs://hacluster/chetan/2000_UniqData.csv' into table uniqdata_part1 PARTITION (CUST_ID='9001') OPTIONS ('FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ, BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1, Double_COLUMN2,INTEGER_COLUMN1','BAD_RECORDS_ACTION'='FORCE');

alter table uniqdata_part SET TBLPROPERTIES('SORT_COLUMNS'='CUST_ID,'SORT_SCOPE'='LOCAL_SORT');

insert into uniqdata_part PARTITION (CUST_ID='9001') select CUST_NAME, ACTIVE_EMUI_VERSION, DOB, DOJ, BIGINT_COLUMN1, BIGINT_COLUMN2, DECIMAL_COLUMN1, DECIMAL_COLUMN2, Double_COLUMN1, Double_COLUMN2, INTEGER_COLUMN1 from uniqdata_part1;

*Error:* 

Error: org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.SparkException: Job aborted.
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:387)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$3(SparkExecuteStatementOperation.scala:276)
        at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
        at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:276)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:263)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.spark.SparkException: Job aborted.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:273)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoHadoopFsRelationCommand.run(CarbonInsertIntoHadoopFsRelationCommand.scala:168)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:109)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:107)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:121)
        at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:232)
        at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3709)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:111)
        at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:173)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:94)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3707)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:232)
        at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
        at org.apache.spark.sql.execution.command.management.CommonLoadUtils$.loadDataWithPartition(CommonLoadUtils.scala:1088)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoCommand.insertData(CarbonInsertIntoCommand.scala:479)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoCommand.processData(CarbonInsertIntoCommand.scala:266)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.$anonfun$run$3(package.scala:162)
        at org.apache.spark.sql.execution.command.Auditable.runWithAudit(package.scala:118)
        at org.apache.spark.sql.execution.command.Auditable.runWithAudit$(package.scala:114)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:155)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:168)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:71)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:69)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:80)
        at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:232)
        at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3709)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:111)
        at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:173)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:94)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3707)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:232)
        at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
        at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:618)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:613)
        at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:347)
        ... 16 more
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 565.0 failed 4 times, most recent failure: Lost task 0.3 in stage 565.0 (TID 2004) (linux-pokl executor 4): org.apache.spark.SparkException: Task failed while writing rows.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:368)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$17(FileFormatWriter.scala:241)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:500)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1575)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:503)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:482)
        at org.apache.spark.sql.execution.datasources.CarbonOutputWriter.close(SparkCarbonTableFormat.scala:553)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.releaseResources(FileFormatDataWriter.scala:62)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.commit(FileFormatDataWriter.scala:80)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:350)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1609)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:358)
        ... 9 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concurrent.FutureTask.get(FutureTask.java:192)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:479)
        ... 15 more
Caused by: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        ... 3 more
Caused by: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:65)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:280)
        ... 5 more
Caused by: java.lang.ArrayIndexOutOfBoundsException: 7
        at org.apache.carbondata.processing.sort.sortdata.TableFieldStat.<init>(TableFieldStat.java:166)
        at org.apache.carbondata.processing.loading.sort.SortStepRowHandler.<init>(SortStepRowHandler.java:122)
        at org.apache.carbondata.processing.loading.sort.unsafe.merger.UnsafeSingleThreadFinalSortFilesMerger.<init>(UnsafeSingleThreadFinalSortFilesMerger.java:75)
        at org.apache.carbondata.processing.loading.sort.impl.UnsafeParallelReadMergeSorterImpl.initialize(UnsafeParallelReadMergeSorterImpl.java:77)
        at org.apache.carbondata.processing.loading.steps.SortProcessorStepImpl.initialize(SortProcessorStepImpl.java:50)
        at org.apache.carbondata.processing.loading.steps.DataWriterProcessorStepImpl.initialize(DataWriterProcessorStepImpl.java:89)
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:48)
        ... 6 more

Driver stacktrace:
        at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2298)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2247)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2246)
        at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
        at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2246)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1119)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1119)
        at scala.Option.foreach(Option.scala:407)
        at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1119)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2485)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2427)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2416)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:906)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2327)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:230)
        ... 60 more
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:368)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$17(FileFormatWriter.scala:241)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:500)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1575)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:503)
        ... 3 more
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:482)
        at org.apache.spark.sql.execution.datasources.CarbonOutputWriter.close(SparkCarbonTableFormat.scala:553)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.releaseResources(FileFormatDataWriter.scala:62)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.commit(FileFormatDataWriter.scala:80)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:350)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1609)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:358)
        ... 9 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concurrent.FutureTask.get(FutureTask.java:192)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:479)
        ... 15 more
Caused by: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        ... 3 more
Caused by: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:65)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:280)
        ... 5 more
Caused by: java.lang.ArrayIndexOutOfBoundsException: 7
        at org.apache.carbondata.processing.sort.sortdata.TableFieldStat.<init>(TableFieldStat.java:166)
        at org.apache.carbondata.processing.loading.sort.SortStepRowHandler.<init>(SortStepRowHandler.java:122)
        at org.apache.carbondata.processing.loading.sort.unsafe.merger.UnsafeSingleThreadFinalSortFilesMerger.<init>(UnsafeSingleThreadFinalSortFilesMerger.java:75)
        at org.apache.carbondata.processing.loading.sort.impl.UnsafeParallelReadMergeSorterImpl.initialize(UnsafeParallelReadMergeSorterImpl.java:77)
        at org.apache.carbondata.processing.loading.steps.SortProcessorStepImpl.initialize(SortProcessorStepImpl.java:50)
        at org.apache.carbondata.processing.loading.steps.DataWriterProcessorStepImpl.initialize(DataWriterProcessorStepImpl.java:89)
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:48)
        ... 6 more (state=,code=0)

 

 

*Scenario 2:* Insert Overwrite from another Partition table Fails for Partition Table after Alter Table Set Table Properties _with SORT_COLUMNS as partition column AND SORT_SCOPE as LOCAL_SORT._

*Steps:*

drop table if exists uniqdata_part;

CREATE TABLE uniqdata_part(CUST_NAME string,ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 int) PARTITIONED BY(CUST_ID int) STORED AS carbondata;

drop table if exists uniqdata_part1;

CREATE TABLE uniqdata_part1(CUST_NAME string, ACTIVE_EMUI_VERSION string, DOB timestamp, DOJ timestamp, BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(36,10), DECIMAL_COLUMN2 decimal(36,10),Double_COLUMN1 double, Double_COLUMN2 double, INTEGER_COLUMN1 int) PARTITIONED BY (CUST_ID int) STORED AS carbondata;

LOAD DATA INPATH 'hdfs://hacluster/chetan/2000_UniqData.csv' into table uniqdata_part1 PARTITION (CUST_ID='9001') OPTIONS ('FILEHEADER'='CUST_ID,CUST_NAME,ACTIVE_EMUI_VERSION,DOB,DOJ, BIGINT_COLUMN1,BIGINT_COLUMN2,DECIMAL_COLUMN1,DECIMAL_COLUMN2,Double_COLUMN1, Double_COLUMN2,INTEGER_COLUMN1','BAD_RECORDS_ACTION'='FORCE');

alter table uniqdata_part SET TBLPROPERTIES('SORT_COLUMNS'='CUST_ID','SORT_SCOPE'='LOCAL_SORT');

insert overwrite table uniqdata_part PARTITION (CUST_ID='9001') select CUST_NAME, ACTIVE_EMUI_VERSION, DOB, DOJ, BIGINT_COLUMN1, BIGINT_COLUMN2, DECIMAL_COLUMN1, DECIMAL_COLUMN2, Double_COLUMN1, Double_COLUMN2, INTEGER_COLUMN1 from uniqdata_part1;

*Error:*

Error: org.apache.hive.service.cli.HiveSQLException: Error running query: org.apache.spark.SparkException: Job aborted.
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:387)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.$anonfun$run$3(SparkExecuteStatementOperation.scala:276)
        at scala.runtime.java8.JFunction0$mcV$sp.apply(JFunction0$mcV$sp.java:23)
        at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties(SparkOperation.scala:78)
        at org.apache.spark.sql.hive.thriftserver.SparkOperation.withLocalProperties$(SparkOperation.scala:62)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.withLocalProperties(SparkExecuteStatementOperation.scala:46)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:276)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2$$anon$3.run(SparkExecuteStatementOperation.scala:263)
        at java.security.AccessController.doPrivileged(Native Method)
        at javax.security.auth.Subject.doAs(Subject.java:422)
        at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1761)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation$$anon$2.run(SparkExecuteStatementOperation.scala:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
Caused by: org.apache.spark.SparkException: Job aborted.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:273)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoHadoopFsRelationCommand.run(CarbonInsertIntoHadoopFsRelationCommand.scala:168)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult$lzycompute(commands.scala:109)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.sideEffectResult(commands.scala:107)
        at org.apache.spark.sql.execution.command.DataWritingCommandExec.executeCollect(commands.scala:121)
        at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:232)
        at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3709)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:111)
        at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:173)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:94)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3707)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:232)
        at org.apache.spark.sql.Dataset$.$anonfun$ofRows$1(Dataset.scala:92)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:89)
        at org.apache.spark.sql.execution.command.management.CommonLoadUtils$.loadDataWithPartition(CommonLoadUtils.scala:1088)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoCommand.insertData(CarbonInsertIntoCommand.scala:479)
        at org.apache.spark.sql.execution.command.management.CarbonInsertIntoCommand.processData(CarbonInsertIntoCommand.scala:266)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.$anonfun$run$3(package.scala:162)
        at org.apache.spark.sql.execution.command.Auditable.runWithAudit(package.scala:118)
        at org.apache.spark.sql.execution.command.Auditable.runWithAudit$(package.scala:114)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.runWithAudit(package.scala:155)
        at org.apache.spark.sql.execution.command.AtomicRunnableCommand.run(package.scala:168)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult$lzycompute(commands.scala:71)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.sideEffectResult(commands.scala:69)
        at org.apache.spark.sql.execution.command.ExecutedCommandExec.executeCollect(commands.scala:80)
        at org.apache.spark.sql.Dataset.$anonfun$logicalPlan$1(Dataset.scala:232)
        at org.apache.spark.sql.Dataset.$anonfun$withAction$1(Dataset.scala:3709)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$5(SQLExecution.scala:111)
        at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:173)
        at org.apache.spark.sql.execution.SQLExecution$.$anonfun$withNewExecutionId$1(SQLExecution.scala:94)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:68)
        at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3707)
        at org.apache.spark.sql.Dataset.<init>(Dataset.scala:232)
        at org.apache.spark.sql.Dataset$.$anonfun$ofRows$2(Dataset.scala:100)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:97)
        at org.apache.spark.sql.SparkSession.$anonfun$sql$1(SparkSession.scala:618)
        at org.apache.spark.sql.SparkSession.withActive(SparkSession.scala:780)
        at org.apache.spark.sql.SparkSession.sql(SparkSession.scala:613)
        at org.apache.spark.sql.SQLContext.sql(SQLContext.scala:650)
        at org.apache.spark.sql.hive.thriftserver.SparkExecuteStatementOperation.org$apache$spark$sql$hive$thriftserver$SparkExecuteStatementOperation$$execute(SparkExecuteStatementOperation.scala:347)
        ... 16 more
Caused by: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 568.0 failed 4 times, most recent failure: Lost task 0.3 in stage 568.0 (TID 2010) (linux-hbk4 executor 14): org.apache.spark.SparkException: Task failed while writing rows.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:368)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$17(FileFormatWriter.scala:241)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:500)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1575)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:503)
        at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
        at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:750)
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:482)
        at org.apache.spark.sql.execution.datasources.CarbonOutputWriter.close(SparkCarbonTableFormat.scala:553)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.releaseResources(FileFormatDataWriter.scala:62)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.commit(FileFormatDataWriter.scala:80)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:350)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1609)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:358)
        ... 9 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concurrent.FutureTask.get(FutureTask.java:192)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:479)
        ... 15 more
Caused by: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        ... 3 more
Caused by: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:65)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:280)
        ... 5 more
Caused by: java.lang.ArrayIndexOutOfBoundsException: 7
        at org.apache.carbondata.processing.sort.sortdata.TableFieldStat.<init>(TableFieldStat.java:166)
        at org.apache.carbondata.processing.loading.sort.SortStepRowHandler.<init>(SortStepRowHandler.java:122)
        at org.apache.carbondata.processing.loading.sort.unsafe.merger.UnsafeSingleThreadFinalSortFilesMerger.<init>(UnsafeSingleThreadFinalSortFilesMerger.java:75)
        at org.apache.carbondata.processing.loading.sort.impl.UnsafeParallelReadMergeSorterImpl.initialize(UnsafeParallelReadMergeSorterImpl.java:77)
        at org.apache.carbondata.processing.loading.steps.SortProcessorStepImpl.initialize(SortProcessorStepImpl.java:50)
        at org.apache.carbondata.processing.loading.steps.DataWriterProcessorStepImpl.initialize(DataWriterProcessorStepImpl.java:89)
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:48)
        ... 6 more

Driver stacktrace:
        at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2298)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2247)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2246)
        at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
        at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
        at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2246)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1119)
        at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1119)
        at scala.Option.foreach(Option.scala:407)
        at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1119)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2485)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2427)
        at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2416)
        at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
        at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:906)
        at org.apache.spark.SparkContext.runJob(SparkContext.scala:2327)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.write(FileFormatWriter.scala:230)
        ... 60 more
Caused by: org.apache.spark.SparkException: Task failed while writing rows.
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:368)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$write$17(FileFormatWriter.scala:241)
        at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90)
        at org.apache.spark.scheduler.Task.run(Task.scala:131)
        at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$3(Executor.scala:500)
        at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1575)
        at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:503)
        ... 3 more
Caused by: java.lang.RuntimeException: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:482)
        at org.apache.spark.sql.execution.datasources.CarbonOutputWriter.close(SparkCarbonTableFormat.scala:553)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.releaseResources(FileFormatDataWriter.scala:62)
        at org.apache.spark.sql.execution.datasources.FileFormatDataWriter.commit(FileFormatDataWriter.scala:80)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.$anonfun$executeTask$1(FileFormatWriter.scala:350)
        at org.apache.spark.util.Utils$.tryWithSafeFinallyAndFailureCallbacks(Utils.scala:1609)
        at org.apache.spark.sql.execution.datasources.FileFormatWriter$.executeTask(FileFormatWriter.scala:358)
        ... 9 more
Caused by: java.util.concurrent.ExecutionException: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at java.util.concurrent.FutureTask.report(FutureTask.java:122)
        at java.util.concurrent.FutureTask.get(FutureTask.java:192)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat$CarbonRecordWriter.close(CarbonTableOutputFormat.java:479)
        ... 15 more
Caused by: java.lang.RuntimeException: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:290)
        at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        ... 3 more
Caused by: org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException: Data Loading failed for table uniqdata_part
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:65)
        at org.apache.carbondata.hadoop.api.CarbonTableOutputFormat.lambda$getRecordWriter$0(CarbonTableOutputFormat.java:280)
        ... 5 more
Caused by: java.lang.ArrayIndexOutOfBoundsException: 7
        at org.apache.carbondata.processing.sort.sortdata.TableFieldStat.<init>(TableFieldStat.java:166)
        at org.apache.carbondata.processing.loading.sort.SortStepRowHandler.<init>(SortStepRowHandler.java:122)
        at org.apache.carbondata.processing.loading.sort.unsafe.merger.UnsafeSingleThreadFinalSortFilesMerger.<init>(UnsafeSingleThreadFinalSortFilesMerger.java:75)
        at org.apache.carbondata.processing.loading.sort.impl.UnsafeParallelReadMergeSorterImpl.initialize(UnsafeParallelReadMergeSorterImpl.java:77)
        at org.apache.carbondata.processing.loading.steps.SortProcessorStepImpl.initialize(SortProcessorStepImpl.java:50)
        at org.apache.carbondata.processing.loading.steps.DataWriterProcessorStepImpl.initialize(DataWriterProcessorStepImpl.java:89)
        at org.apache.carbondata.processing.loading.DataLoadExecutor.execute(DataLoadExecutor.java:48)
        ... 6 more (state=,code=0)

 



--
This message was sent by Atlassian Jira
(v8.20.7#820007)