You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by xu...@apache.org on 2018/11/06 11:59:49 UTC

carbondata git commit: [CARBONDATA-3078] Disable explain collector for count star query without filter

Repository: carbondata
Updated Branches:
  refs/heads/master a3a83dcad -> b6ff4672b


[CARBONDATA-3078] Disable explain collector for count star query without filter

An issue is found about count star query without filter in explain command. It is a special case. It uses different plan.
Considering
no useful information about block/blocklet pruning for count star query without filter, so disable explain collector and avoid the exception in https://issues.apache.org/jira/browse/CARBONDATA-3078

This closes #2900


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b6ff4672
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b6ff4672
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b6ff4672

Branch: refs/heads/master
Commit: b6ff4672be7bd25ab40144feb801be9e20069244
Parents: a3a83dc
Author: Manhua <ke...@qq.com>
Authored: Mon Nov 5 20:17:59 2018 +0800
Committer: xuchuanyin <xu...@hust.edu.cn>
Committed: Tue Nov 6 19:58:24 2018 +0800

----------------------------------------------------------------------
 .../carbondata/hadoop/api/CarbonTableInputFormat.java    |  9 +++++++++
 .../spark/testsuite/filterexpr/CountStarTestCase.scala   | 11 +++++++++++
 2 files changed, 20 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b6ff4672/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index ba3accf..86cbfec 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -43,6 +43,7 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
 import org.apache.carbondata.core.mutate.SegmentUpdateDetails;
 import org.apache.carbondata.core.mutate.UpdateVO;
 import org.apache.carbondata.core.mutate.data.BlockMappingVO;
+import org.apache.carbondata.core.profiler.ExplainCollector;
 import org.apache.carbondata.core.readcommitter.LatestFilesReadCommittedScope;
 import org.apache.carbondata.core.readcommitter.ReadCommittedScope;
 import org.apache.carbondata.core.readcommitter.TableStatusReadCommittedScope;
@@ -575,6 +576,14 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
    */
   public BlockMappingVO getBlockRowCount(Job job, CarbonTable table,
       List<PartitionSpec> partitions) throws IOException {
+    // Normal query flow goes to CarbonInputFormat#getPrunedBlocklets and initialize the
+    // pruning info for table we queried. But here count star query without filter uses a different
+    // query plan, and no pruning info is initialized. When it calls default data map to
+    // prune(with a null filter), exception will occur during setting pruning info.
+    // Considering no useful information about block/blocklet pruning for such query
+    // (actually no pruning), so we disable explain collector here
+    ExplainCollector.remove();
+
     AbsoluteTableIdentifier identifier = table.getAbsoluteTableIdentifier();
     TableDataMap blockletMap = DataMapStoreManager.getInstance().getDefaultDataMap(table);
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b6ff4672/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
index f26d0e7..18ad1d7 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/CountStarTestCase.scala
@@ -54,6 +54,17 @@ class CountStarTestCase extends QueryTest with BeforeAndAfterAll {
     )
   }
 
+  test("explain select count star without filter") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
+
+    sql("explain select count(*) from filterTimestampDataType").collect()
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS,
+        CarbonCommonConstants.ENABLE_QUERY_STATISTICS_DEFAULT)
+  }
+
   override def afterAll {
     sql("drop table if exists filtertestTables")
     sql("drop table if exists filterTimestampDataType")