You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2017/09/21 00:27:23 UTC

[13/23] carbondata git commit: [CARBONDATA-1425] Inappropriate Exception displays while creating a new partition with incorrect partition type

[CARBONDATA-1425] Inappropriate Exception displays while creating a new partition with incorrect partition type

Change the error content when the range info data mismatch the partition field's data type, the new showing content as below:Data in range info must be the same type with the partition field's type

This closes #1336


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/7e5e29c7
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/7e5e29c7
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/7e5e29c7

Branch: refs/heads/streaming_ingest
Commit: 7e5e29c740e1b1a253379ed969b260accc73b09f
Parents: d3cb6f6
Author: mayun <si...@163.com>
Authored: Wed Sep 6 22:52:39 2017 +0800
Committer: chenliang613 <ch...@apache.org>
Committed: Mon Sep 18 14:48:25 2017 +0800

----------------------------------------------------------------------
 .../carbondata/spark/util/CommonUtil.scala      |   7 +-
 .../partition/TestAlterPartitionTable.scala     | 118 ++++++++++++++++++-
 2 files changed, 123 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e5e29c7/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index ed4d784..fd265a8 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -334,12 +334,17 @@ object CommonUtil {
         timestampFormatter, dateFormatter)
     }
     val iterator = rangeInfo.tail.toIterator
-    while(iterator.hasNext) {
+    while (iterator.hasNext) {
       val next = columnDataType match {
         case DataType.STRING => ByteUtil.toBytes(iterator.next())
         case _ => PartitionUtil.getDataBasedOnDataType(iterator.next(), columnDataType,
           timestampFormatter, dateFormatter)
       }
+      if (next == null) {
+        sys.error(
+          "Data in range info must be the same type with the partition field's type "
+            + columnDataType)
+      }
       if (comparator.compare(head, next) < 0) {
         head = next
       } else {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e5e29c7/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
index 9de2ef5..0c59bd9 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
@@ -36,7 +36,8 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
     dropTable
-
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
     /**
@@ -569,6 +570,111 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
     checkAnswer(result_after5, result_origin5)
   }
 
+   test("test exception when alter partition and the values"
+       + "in range_info can not match partition column type") {
+     val exception_test_range_int: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_int(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='11,12')
+        """.stripMargin)
+       sql("ALTER TABLE test_range_int ADD PARTITION ('abc')")
+    }
+     assert(exception_test_range_int.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_smallint")
+    val exception_test_range_smallint: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_smallint(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='11,12')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_smallint ADD PARTITION ('abc')")
+    }
+     assert(exception_test_range_smallint.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_float")
+    val exception_test_range_float: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_float(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='1.1,2.1')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_float ADD PARTITION ('abc')")
+    }
+     assert(exception_test_range_float.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_double")
+    val exception_test_range_double: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_double(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='1000.005,2000.005')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_double ADD PARTITION ('abc')")
+    }
+     assert(exception_test_range_double.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_bigint")
+    val exception_test_range_bigint: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_bigint(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='123456789,223456789')
+        """.stripMargin)
+       sql("ALTER TABLE test_range_bigint ADD PARTITION ('abc')")
+    }
+     assert(exception_test_range_bigint.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_date")
+    val exception_test_range_date: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_date(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2017-06-11, 2017-06-13')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_date ADD PARTITION ('abc')")
+    }
+    assert(exception_test_range_date.getMessage
+      .contains("Data in range info must be the same type with the partition field's type"))
+
+    sql("DROP TABLE IF EXISTS test_range_timestamp")
+    val exception_test_range_timestamp: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_timestamp(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2017/06/11, 2017/06/13')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_timestamp ADD PARTITION ('abc')")
+    }
+    assert(exception_test_range_timestamp.getMessage
+      .contains("Data in range info must be the same type with the partition field's type"))
+    sql("DROP TABLE IF EXISTS test_range_decimal")
+    val exception_test_range_decimal: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_decimal(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='22.22,33.33')
+        """.stripMargin)
+      sql("ALTER TABLE test_range_decimal ADD PARTITION ('abc')")
+    }
+    assert(exception_test_range_decimal.getMessage
+         .contains("Data in range info must be the same type with the partition field's type"))
+  }
+
   def validateDataFiles(tableUniqueName: String, segmentId: String, partitions: Seq[Int]): Unit = {
     val carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableUniqueName)
     val dataFiles = getDataFiles(carbonTable, segmentId)
@@ -606,6 +712,8 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
   override def afterAll = {
     dropTable
     CarbonProperties.getInstance()
+    .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
+    CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
   }
 
@@ -620,6 +728,14 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS list_table_country")
     sql("DROP TABLE IF EXISTS range_table_logdate_split")
     sql("DROP TABLE IF EXISTS range_table_bucket")
+    sql("DROP TABLE IF EXISTS test_range_int")
+    sql("DROP TABLE IF EXISTS test_range_smallint")
+    sql("DROP TABLE IF EXISTS test_range_bigint")
+    sql("DROP TABLE IF EXISTS test_range_float")
+    sql("DROP TABLE IF EXISTS test_range_double")
+    sql("DROP TABLE IF EXISTS test_range_date")
+    sql("DROP TABLE IF EXISTS test_range_timestamp")
+    sql("DROP TABLE IF EXISTS test_range_decimal")
   }