You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ku...@apache.org on 2017/12/26 16:16:16 UTC

carbondata git commit: [CARBONDATA-1931]DataLoad failed for Aggregate table when measure is …

Repository: carbondata
Updated Branches:
  refs/heads/master 7dcc2e755 -> 525920c25


[CARBONDATA-1931]DataLoad failed for Aggregate table when measure is …

Root Cause , during data loading ,dataframe columns are sorted based on selection order but in RowConverter fileds values are compared based on ordinal column. Solution = while making CarbonLoadModel set header sorted by ordinal columns instead of selection order.

This closes 1712


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/525920c2
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/525920c2
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/525920c2

Branch: refs/heads/master
Commit: 525920c2561efab6390c21db358b0ea6dddf5854
Parents: 7dcc2e7
Author: BJangir <ba...@gmail.com>
Authored: Fri Dec 22 14:10:48 2017 +0530
Committer: kumarvishal <ku...@gmail.com>
Committed: Tue Dec 26 21:46:02 2017 +0530

----------------------------------------------------------------------
 .../testsuite/preaggregate/TestPreAggregateLoad.scala  | 13 +++++++++++++
 .../command/preaaggregate/PreAggregateUtil.scala       |  3 ++-
 2 files changed, 15 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/525920c2/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
index 6a5f221..ff1c330 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
@@ -21,6 +21,9 @@ import org.apache.spark.sql.Row
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.{BeforeAndAfterAll, Ignore}
 
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
 @Ignore
 class TestPreAggregateLoad extends QueryTest with BeforeAndAfterAll {
 
@@ -203,4 +206,14 @@ class TestPreAggregateLoad extends QueryTest with BeforeAndAfterAll {
     checkAnswer(sql("select * from maintable_preagg_sum"), Row(1, 29))
   }
 
+  test("test load in aggregate table with Measure col") {
+    val originalBadRecordsAction = CarbonProperties.getInstance().getProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL")
+    sql("drop table if exists y ")
+    sql("create table y(year int,month int,name string,salary int) stored by 'carbondata'")
+    sql("insert into y select 10,11,'babu',12")
+    sql("create datamap y1_sum1 on table y using 'preaggregate' as select year,name,sum(salary) from y group by year,name")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, originalBadRecordsAction)
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/525920c2/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateUtil.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateUtil.scala
index 8614d66..1f5bd41 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateUtil.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateUtil.scala
@@ -508,7 +508,8 @@ object PreAggregateUtil {
     val headers = dataMapSchemas.find(_.getChildSchema.getTableName.equalsIgnoreCase(
       dataMapIdentifier.table)) match {
       case Some(dataMapSchema) =>
-        dataMapSchema.getChildSchema.getListOfColumns.asScala.map(_.getColumnName).mkString(",")
+        dataMapSchema.getChildSchema.getListOfColumns.asScala.sortBy(_.getSchemaOrdinal).map(
+          _.getColumnName).mkString(",")
       case None =>
         throw new RuntimeException(
           s"${ dataMapIdentifier.table} datamap not found in DataMapSchema list: ${