You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2017/11/26 11:49:24 UTC
carbondata git commit: [CARBONDATA-1713] Fixed Aggregate query on
main table fails after creating pre-aggregate table
Repository: carbondata
Updated Branches:
refs/heads/master 520ee50a7 -> ed8236781
[CARBONDATA-1713] Fixed Aggregate query on main table fails after creating pre-aggregate table
Problem: when select query columns are in upper case pre aggregate table selection is failing
Solution:: Need to convert column name to lower case as table columns are in lower case
This closes #1501
Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ed823678
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ed823678
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ed823678
Branch: refs/heads/master
Commit: ed8236781adcea30e4c043849b0f2e4ce3f1bb41
Parents: 520ee50
Author: kumarvishal <ku...@gmail.com>
Authored: Wed Nov 15 15:19:05 2017 +0530
Committer: Ravindra Pesala <ra...@gmail.com>
Committed: Sun Nov 26 17:19:02 2017 +0530
----------------------------------------------------------------------
.../TestPreAggregateTableSelection.scala | 7 +++++++
.../spark/sql/hive/CarbonPreAggregateRules.scala | 15 +++++++++------
2 files changed, 16 insertions(+), 6 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/carbondata/blob/ed823678/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
index 1d41664..9a71d6e 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateTableSelection.scala
@@ -46,6 +46,8 @@ class TestPreAggregateTableSelection extends QueryTest with BeforeAndAfterAll {
sql("create datamap agg6 on table mainTable using 'preaggregate' as select name,min(age) from mainTable group by name")
sql("create datamap agg7 on table mainTable using 'preaggregate' as select name,max(age) from mainTable group by name")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/measureinsertintotest.csv' into table mainTable")
+ sql("create table if not exists lineitem(L_SHIPDATE string,L_SHIPMODE string,L_SHIPINSTRUCT string,L_RETURNFLAG string,L_RECEIPTDATE string,L_ORDERKEY string,L_PARTKEY string,L_SUPPKEY string,L_LINENUMBER int,L_QUANTITY double,L_EXTENDEDPRICE double,L_DISCOUNT double,L_TAX double,L_LINESTATUS string,L_COMMITDATE string,L_COMMENT string) STORED BY 'org.apache.carbondata.format'TBLPROPERTIES ('table_blocksize'='128','NO_INVERTED_INDEX'='L_SHIPDATE,L_SHIPMODE,L_SHIPINSTRUCT,L_RETURNFLAG,L_RECEIPTDATE,L_ORDERKEY,L_PARTKEY,L_SUPPKEY','sort_columns'='')")
+ sql("create datamap agr_lineitem ON TABLE lineitem USING 'preaggregate' as select L_RETURNFLAG,L_LINESTATUS,sum (L_QUANTITY),sum(L_EXTENDEDPRICE) from lineitem group by L_RETURNFLAG, L_LINESTATUS")
}
@@ -139,6 +141,11 @@ class TestPreAggregateTableSelection extends QueryTest with BeforeAndAfterAll {
preAggTableValidator(df.queryExecution.analyzed, "maintable_agg3")
}
+ test("test PreAggregate table selection 19") {
+ val df = sql("select L_RETURNFLAG,L_LINESTATUS,sum(L_QUANTITY),sum(L_EXTENDEDPRICE) from lineitem group by L_RETURNFLAG, L_LINESTATUS")
+ preAggTableValidator(df.queryExecution.analyzed, "lineitem_agr_lineitem")
+ }
+
def preAggTableValidator(plan: LogicalPlan, actualTableName: String) : Unit ={
var isValidPlan = false
plan.transform {
http://git-wip-us.apache.org/repos/asf/carbondata/blob/ed823678/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
index 20bdb41..7c23e5e 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
@@ -229,14 +229,15 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
aggFunction: String = ""): AttributeReference = {
val aggregationDataMapSchema = dataMapSchema.asInstanceOf[AggregationDataMapSchema];
val columnSchema = if (aggFunction.isEmpty) {
- aggregationDataMapSchema.getChildColByParentColName(attributeReference.name)
+ aggregationDataMapSchema.getChildColByParentColName(attributeReference.name.toLowerCase)
} else {
- aggregationDataMapSchema.getAggChildColByParent(attributeReference.name, aggFunction)
+ aggregationDataMapSchema.getAggChildColByParent(attributeReference.name.toLowerCase,
+ aggFunction.toLowerCase)
}
// here column schema cannot be null, if it is null then aggregate table selection
// logic has some problem
if (null == columnSchema) {
- throw new AnalysisException("Column doesnot exists in Pre Aggregate table")
+ throw new AnalysisException("Column does not exists in Pre Aggregate table")
}
// finding the child attribute from child logical relation
childCarbonRelation.attributeMap.find(p => p._2.name.equals(columnSchema.getColumnName)).get._2
@@ -725,13 +726,15 @@ case class CarbonPreAggregateQueryRules(sparkSession: SparkSession) extends Rule
dataType: String = "",
isChangedDataType: Boolean = false,
isFilterColumn: Boolean = false): QueryColumn = {
- val columnSchema = carbonTable.getColumnByName(tableName, columnName).getColumnSchema
+ val columnSchema = carbonTable.getColumnByName(tableName,
+ columnName.toLowerCase).getColumnSchema
if (isChangedDataType) {
- new QueryColumn(columnSchema, columnSchema.getDataType.getName, aggFunction, isFilterColumn)
+ new QueryColumn(columnSchema, columnSchema.getDataType.getName,
+ aggFunction.toLowerCase, isFilterColumn)
} else {
new QueryColumn(columnSchema,
CarbonScalaUtil.convertSparkToCarbonSchemaDataType(dataType),
- aggFunction, isFilterColumn)
+ aggFunction.toLowerCase, isFilterColumn)
}
}
}