You are viewing a plain text version of this content. The canonical link for it is here.
Posted to issues@carbondata.apache.org by "xuchuanyin (JIRA)" <ji...@apache.org> on 2019/03/24 08:22:00 UTC

[jira] [Commented] (CARBONDATA-3327) Errors lies in query with small blocklet size

    [ https://issues.apache.org/jira/browse/CARBONDATA-3327?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16799944#comment-16799944 ] 

xuchuanyin commented on CARBONDATA-3327:
----------------------------------------

Besides, I noticed that if we do not filter on the sort_columns, the problem will appear.

> Errors lies in query with small blocklet size
> ---------------------------------------------
>
>                 Key: CARBONDATA-3327
>                 URL: https://issues.apache.org/jira/browse/CARBONDATA-3327
>             Project: CarbonData
>          Issue Type: Bug
>            Reporter: xuchuanyin
>            Priority: Major
>
> while implementing the following patch
> ```diff
> diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
> index 69374ad..c6b63a4 100644
> --- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
> +++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
> @@ -54,7 +54,7 @@ public final class CarbonCommonConstants {
>    /**
>     * min blocklet size
>     */
> -  public static final int BLOCKLET_SIZE_MIN_VAL = 2000;
> +  public static final int BLOCKLET_SIZE_MIN_VAL = 1;
>  
>    /**
>     * max blocklet size
> diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
> index df97d0f..ace9fd5 100644
> --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
> +++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
> @@ -29,6 +29,7 @@ import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandExcepti
>  class TestSortColumns extends QueryTest with BeforeAndAfterAll {
>  
>    override def beforeAll {
> +    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE, "2")
>      CarbonProperties.getInstance().addProperty(
>        CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
>  
> ```
> I find that some of the tests in `TestSortColumns` failed with NPE and the error logs show that
> ```
> 19/03/23 20:54:30 ERROR Executor: Exception in task 0.0 in stage 104.0 (TID 173)
> java.lang.NullPointerException
>     at org.apache.parquet.io.api.Binary$ByteArrayBackedBinary.getBytes(Binary.java:294)
>     at org.apache.spark.sql.execution.vectorized.ColumnVector.getUTF8String(ColumnVector.java:646)
>     at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
>     at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>     at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>     at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>     at org.apache.spark.scheduler.Task.run(Task.scala:108)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> 19/03/23 20:54:30 ERROR TaskSetManager: Task 0 in stage 104.0 failed 1 times; aborting job
> 19/03/23 20:54:30 INFO TestSortColumns: 
> ===== FINISHED org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns: 'filter on sort_columns include no-dictionary, direct-dictionary and dictioanry' =====
> 19/03/23 20:54:30 INFO TestSortColumns: 
> ===== TEST OUTPUT FOR org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns: 'unsorted table creation, query data loading with heap and safe sort config' =====
> Job aborted due to stage failure: Task 0 in stage 104.0 failed 1 times, most recent failure: Lost task 0.0 in stage 104.0 (TID 173, localhost, executor driver): java.lang.NullPointerException
>     at org.apache.parquet.io.api.Binary$ByteArrayBackedBinary.getBytes(Binary.java:294)
>     at org.apache.spark.sql.execution.vectorized.ColumnVector.getUTF8String(ColumnVector.java:646)
>     at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
>     at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>     at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>     at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>     at org.apache.spark.scheduler.Task.run(Task.scala:108)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
> org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 104.0 failed 1 times, most recent failure: Lost task 0.0 in stage 104.0 (TID 173, localhost, executor driver): java.lang.NullPointerException
>     at org.apache.parquet.io.api.Binary$ByteArrayBackedBinary.getBytes(Binary.java:294)
>     at org.apache.spark.sql.execution.vectorized.ColumnVector.getUTF8String(ColumnVector.java:646)
>     at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
>     at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>     at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>     at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>     at org.apache.spark.scheduler.Task.run(Task.scala:108)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> Driver stacktrace:
>     at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1517)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1505)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1504)
>     at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
>     at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
>     at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1504)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>     at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:814)
>     at scala.Option.foreach(Option.scala:257)
>     at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:814)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:1732)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1687)
>     at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:1676)
>     at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:48)
>     at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:630)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2029)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2050)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2069)
>     at org.apache.spark.SparkContext.runJob(SparkContext.scala:2094)
>     at org.apache.spark.rdd.RDD$$anonfun$collect$1.apply(RDD.scala:936)
>     at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
>     at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
>     at org.apache.spark.rdd.RDD.withScope(RDD.scala:362)
>     at org.apache.spark.rdd.RDD.collect(RDD.scala:935)
>     at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:278)
>     at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$collectFromPlan(Dataset.scala:2861)
>     at org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2387)
>     at org.apache.spark.sql.Dataset$$anonfun$collect$1.apply(Dataset.scala:2387)
>     at org.apache.spark.sql.Dataset$$anonfun$55.apply(Dataset.scala:2842)
>     at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:65)
>     at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2841)
>     at org.apache.spark.sql.Dataset.collect(Dataset.scala:2387)
>     at org.apache.spark.sql.test.util.QueryTest.checkAnswer(QueryTest.scala:100)
>     at org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns$$anonfun$17.apply$mcV$sp(TestSortColumns.scala:237)
>     at org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns$$anonfun$17.apply(TestSortColumns.scala:229)
>     at org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns$$anonfun$17.apply(TestSortColumns.scala:229)
>     at org.scalatest.Transformer$$anonfun$apply$1.apply$mcV$sp(Transformer.scala:22)
>     at org.scalatest.OutcomeOf$class.outcomeOf(OutcomeOf.scala:85)
>     at org.scalatest.OutcomeOf$.outcomeOf(OutcomeOf.scala:104)
>     at org.scalatest.Transformer.apply(Transformer.scala:22)
>     at org.scalatest.Transformer.apply(Transformer.scala:20)
>     at org.scalatest.FunSuiteLike$$anon$1.apply(FunSuiteLike.scala:166)
>     at org.apache.spark.sql.test.util.CarbonFunSuite.withFixture(CarbonFunSuite.scala:41)
>     at org.scalatest.FunSuiteLike$class.invokeWithFixture$1(FunSuiteLike.scala:163)
>     at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
>     at org.scalatest.FunSuiteLike$$anonfun$runTest$1.apply(FunSuiteLike.scala:175)
>     at org.scalatest.SuperEngine.runTestImpl(Engine.scala:306)
>     at org.scalatest.FunSuiteLike$class.runTest(FunSuiteLike.scala:175)
>     at org.scalatest.FunSuite.runTest(FunSuite.scala:1555)
>     at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
>     at org.scalatest.FunSuiteLike$$anonfun$runTests$1.apply(FunSuiteLike.scala:208)
>     at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:413)
>     at org.scalatest.SuperEngine$$anonfun$traverseSubNodes$1$1.apply(Engine.scala:401)
>     at scala.collection.immutable.List.foreach(List.scala:381)
>     at org.scalatest.SuperEngine.traverseSubNodes$1(Engine.scala:401)
>     at org.scalatest.SuperEngine.org$scalatest$SuperEngine$$runTestsInBranch(Engine.scala:396)
>     at org.scalatest.SuperEngine.runTestsImpl(Engine.scala:483)
>     at org.scalatest.FunSuiteLike$class.runTests(FunSuiteLike.scala:208)
>     at org.scalatest.FunSuite.runTests(FunSuite.scala:1555)
>     at org.scalatest.Suite$class.run(Suite.scala:1424)
>     at org.scalatest.FunSuite.org$scalatest$FunSuiteLike$$super$run(FunSuite.scala:1555)
>     at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
>     at org.scalatest.FunSuiteLike$$anonfun$run$1.apply(FunSuiteLike.scala:212)
>     at org.scalatest.SuperEngine.runImpl(Engine.scala:545)
>     at org.scalatest.FunSuiteLike$class.run(FunSuiteLike.scala:212)
>     at org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns.org$scalatest$BeforeAndAfterAll$$super$run(TestSortColumns.scala:29)
>     at org.scalatest.BeforeAndAfterAll$class.liftedTree1$1(BeforeAndAfterAll.scala:257)
>     at org.scalatest.BeforeAndAfterAll$class.run(BeforeAndAfterAll.scala:256)
>     at org.apache.carbondata.spark.testsuite.sortcolumns.TestSortColumns.run(TestSortColumns.scala:29)
>     at org.scalatest.tools.SuiteRunner.run(SuiteRunner.scala:55)
>     at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2563)
>     at org.scalatest.tools.Runner$$anonfun$doRunRunRunDaDoRunRun$3.apply(Runner.scala:2557)
>     at scala.collection.immutable.List.foreach(List.scala:381)
>     at org.scalatest.tools.Runner$.doRunRunRunDaDoRunRun(Runner.scala:2557)
>     at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1044)
>     at org.scalatest.tools.Runner$$anonfun$runOptionallyWithPassFailReporter$2.apply(Runner.scala:1043)
>     at org.scalatest.tools.Runner$.withClassLoaderAndDispatchReporter(Runner.scala:2722)
>     at org.scalatest.tools.Runner$.runOptionallyWithPassFailReporter(Runner.scala:1043)
>     at org.scalatest.tools.Runner$.run(Runner.scala:883)
>     at org.scalatest.tools.Runner.run(Runner.scala)
>     at org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.runScalaTest2(ScalaTestRunner.java:138)
>     at org.jetbrains.plugins.scala.testingSupport.scalaTest.ScalaTestRunner.main(ScalaTestRunner.java:28)
> Caused by: java.lang.NullPointerException
>     at org.apache.parquet.io.api.Binary$ByteArrayBackedBinary.getBytes(Binary.java:294)
>     at org.apache.spark.sql.execution.vectorized.ColumnVector.getUTF8String(ColumnVector.java:646)
>     at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIterator.processNext(Unknown Source)
>     at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
>     at org.apache.spark.sql.execution.WholeStageCodegenExec$$anonfun$8$$anon$1.hasNext(WholeStageCodegenExec.scala:395)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:234)
>     at org.apache.spark.sql.execution.SparkPlan$$anonfun$2.apply(SparkPlan.scala:228)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.RDD$$anonfun$mapPartitionsInternal$1$$anonfun$apply$25.apply(RDD.scala:827)
>     at org.apache.spark.rdd.MapPartitionsRDD.compute(MapPartitionsRDD.scala:38)
>     at org.apache.spark.rdd.RDD.computeOrReadCheckpoint(RDD.scala:323)
>     at org.apache.spark.rdd.RDD.iterator(RDD.scala:287)
>     at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:87)
>     at org.apache.spark.scheduler.Task.run(Task.scala:108)
>     at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:338)
>     at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
>     at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
>     at java.lang.Thread.run(Thread.java:745)
> ```
> I think there may exist some problems in querying if the blocklet size is relative small compared with the loading size.



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)