You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2017/09/21 00:27:15 UTC

[05/23] carbondata git commit: [CARBONDATA-1348] Sort_Column should not supported for no-dictionary

[CARBONDATA-1348] Sort_Column should not supported for no-dictionary

Sort_Column should not supported for no dictionary column having numeric data-type and measure column.

This closes #1354


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/302ef2f5
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/302ef2f5
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/302ef2f5

Branch: refs/heads/streaming_ingest
Commit: 302ef2f56f9c0b39a5b1d29fe25af3236a0ddb29
Parents: 8791eab
Author: rahulforallp <ra...@knoldus.in>
Authored: Wed Sep 13 15:43:33 2017 +0530
Committer: Ravindra Pesala <ra...@gmail.com>
Committed: Fri Sep 15 14:00:25 2017 +0530

----------------------------------------------------------------------
 .../testsuite/sortcolumns/TestSortColumns.scala | 20 ++++++++++++++++++--
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala | 12 ++++++++++++
 2 files changed, 30 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/302ef2f5/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
index 2704d23..bd1264a 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
@@ -53,7 +53,7 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
   }
 
   test(
-    "create table with no dictionary sort_columns where NumberOfNoDictSortColumns < " +
+    "create table with no dictionary sort_columns where NumberOfNoDictSortColumns is less than " +
     "NoDictionaryCount")
   {
     sql(
@@ -283,7 +283,7 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
     checkExistence(sql("describe formatted sorttableDesc"),true,"SORT_COLUMNS")
     checkExistence(sql("describe formatted sorttableDesc"),true,"empno,empname")
   }
-  
+
   test("duplicate columns in sort_columns") {
     val exceptionCaught = intercept[MalformedCarbonCommandException]{
       sql("CREATE TABLE sorttable1 (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format' tblproperties('sort_columns'='empno,empname,empno')")
@@ -291,6 +291,21 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
   assert(exceptionCaught.getMessage.equals("SORT_COLUMNS Either having duplicate columns : empno or it contains illegal argumnet."))
   }
 
+  test("Measure columns in sort_columns") {
+    val exceptionCaught = intercept[MalformedCarbonCommandException] {
+      sql(
+        "CREATE TABLE sorttable1 (empno Double, empname String, designation String, doj Timestamp, " +
+        "workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, " +
+        "projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int," +
+        "utilization int,salary int) STORED BY 'org.apache.carbondata.format' tblproperties" +
+        "('sort_columns'='empno')")
+    }
+    println(exceptionCaught.getMessage)
+    assert(exceptionCaught.getMessage
+      .equals(
+        "sort_columns is unsupported for double datatype column: empno"))
+  }
+
   override def afterAll = {
     dropTable
   }
@@ -319,6 +334,7 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists unsortedtable_heap_inmemory")
     sql("drop table if exists test_sort_col")
     sql("drop table if exists test_sort_col_hive")
+    sql("drop table if exists sorttable1b")
   }
 
   def setLoadingProperties(offheap: String, unsafe: String, useBatch: String): Unit = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/302ef2f5/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 03aac20..661f724 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -625,6 +625,13 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         if (DataType.STRING.getName.equalsIgnoreCase(field.dataType.get)) {
           noDictionaryDims :+= field.column
         }
+      } else if (sortKeyDimsTmp.exists(x => x.equalsIgnoreCase(field.column)) &&
+                 (dictExcludeCols.exists(x => x.equalsIgnoreCase(field.column)) ||
+                  isDefaultMeasure(field.dataType)) &&
+                 (!field.dataType.get.equalsIgnoreCase("STRING"))) {
+        throw new MalformedCarbonCommandException(s"Illegal argument in sort_column.Check if you " +
+                                                  s"have included UNSUPPORTED DataType column{${
+                                                  field.column}}in sort_columns.")
       } else if (sortKeyDimsTmp.exists(x => x.equalsIgnoreCase(field.column))) {
         noDictionaryDims :+= field.column
         dimFields += field
@@ -651,6 +658,11 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
     (dimFields.toSeq, msrFields, noDictionaryDims, sortKeyDims)
   }
 
+  def isDefaultMeasure(dataType: Option[String]): Boolean = {
+    val measureList = Array("DOUBLE", "DECIMAL", "FLOAT")
+    measureList.exists(dataType.get.equalsIgnoreCase(_))
+  }
+
   /**
    * It fills non string dimensions in dimFields
    */