You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by gv...@apache.org on 2017/06/22 10:03:31 UTC

[1/2] carbondata git commit: [CARBONDATA-1149] Fix issue of mismatch type of partition column when specify partition info

Repository: carbondata
Updated Branches:
  refs/heads/master 52968cddd -> aff201487


[CARBONDATA-1149] Fix issue of mismatch type of partition column when specify partition info


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/ef77313f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/ef77313f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/ef77313f

Branch: refs/heads/master
Commit: ef77313f11d90bec30ed5e0ae0bf10a674d58b05
Parents: 52968cd
Author: chenerlu <ch...@huawei.com>
Authored: Fri Jun 9 17:33:30 2017 +0800
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Thu Jun 22 15:29:01 2017 +0530

----------------------------------------------------------------------
 .../carbondata/core/util/DataTypeUtil.java      |   5 +
 .../partition/TestDDLForPartitionTable.scala    | 253 ++++++++++++++++++-
 .../carbondata/spark/util/CommonUtil.scala      | 119 ++++++++-
 .../org/apache/spark/sql/CarbonSqlParser.scala  |   2 +-
 4 files changed, 366 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef77313f/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
index af3ff2f..1b1884e 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
@@ -269,6 +269,11 @@ public final class DataTypeUtil {
             return null;
           }
           return Short.parseShort(data);
+        case FLOAT:
+          if (data.isEmpty()) {
+            return null;
+          }
+          return Float.parseFloat(data);
         case DOUBLE:
           if (data.isEmpty()) {
             return null;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef77313f/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
index 6aa259e..9ad5959 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestDDLForPartitionTable.scala
@@ -31,6 +31,9 @@ import org.apache.carbondata.core.util.CarbonProperties
 class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll = {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy-MM-dd HH:mm:ss")
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
     dropTable
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
@@ -68,7 +71,7 @@ class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
         | PARTITIONED BY (doj Timestamp)
         | STORED BY 'org.apache.carbondata.format'
         | TBLPROPERTIES('PARTITION_TYPE'='RANGE',
-        |  'RANGE_INFO'='01-01-2010, 01-01-2015, 01-04-2015, 01-07-2015')
+        |  'RANGE_INFO'='2017-06-11 00:00:02, 2017-06-13 23:59:59')
       """.stripMargin)
 
     val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_rangeTable")
@@ -81,11 +84,9 @@ class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
     assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(1) == Encoding.DIRECT_DICTIONARY)
     assert(partitionInfo.getColumnSchemaList.get(0).getEncodingList.get(2) == Encoding.INVERTED_INDEX)
     assert(partitionInfo.getPartitionType == PartitionType.RANGE)
-    assert(partitionInfo.getRangeInfo.size == 4)
-    assert(partitionInfo.getRangeInfo.get(0).equals("01-01-2010"))
-    assert(partitionInfo.getRangeInfo.get(1).equals("01-01-2015"))
-    assert(partitionInfo.getRangeInfo.get(2).equals("01-04-2015"))
-    assert(partitionInfo.getRangeInfo.get(3).equals("01-07-2015"))
+    assert(partitionInfo.getRangeInfo.size == 2)
+    assert(partitionInfo.getRangeInfo.get(0).equals("2017-06-11 00:00:02"))
+    assert(partitionInfo.getRangeInfo.get(1).equals("2017-06-13 23:59:59"))
   }
 
   test("create partition table: list partition") {
@@ -131,10 +132,229 @@ class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
     sql(
       """create table des(a int, b string) partitioned by (c string) stored by 'carbondata'
         |tblproperties ('partition_type'='list','list_info'='1,2')""".stripMargin)
-    checkExistence(sql("describe formatted des"),true, "Partition Columns")
+    checkExistence(sql("describe formatted des"), true, "Partition Columns")
     sql("drop table if exists des")
   }
 
+  test("test exception if hash number is invalid") {
+    sql("DROP TABLE IF EXISTS test_hash_1")
+    val exception_test_hash_1: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_hash_1(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='2.1')
+        """.stripMargin
+      )
+    }
+    assert(exception_test_hash_1.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_hash_2")
+    val exception_test_hash_2: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_hash_2(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='abc')
+        """.stripMargin
+      )
+    }
+    assert(exception_test_hash_2.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_hash_3")
+    val exception_test_hash_3: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_hash_3(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='HASH', 'NUM_PARTITIONS'='-2.1')
+        """.stripMargin
+      )
+    }
+    assert(exception_test_hash_3.getMessage.contains("Invalid partition definition"))
+  }
+
+
+  test("test exception when values in list_info can not match partition column type") {
+    sql("DROP TABLE IF EXISTS test_list_int")
+    val exception_test_list_int: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_int(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_int.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_small")
+    val exception_test_list_small: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_small(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_small.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_float")
+    val exception_test_list_float: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_float(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_float.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_double")
+    val exception_test_list_double: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_double(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_double.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_bigint")
+    val exception_test_list_bigint: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_bigint(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_bigint.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_date")
+    val exception_test_list_date: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_date(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_date.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_timestamp")
+    val exception_test_list_timestamp: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_timestamp(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_list_timestamp.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_list_decimal")
+    val exception_test_list_decimal: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_list_decimal(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='LIST', 'LIST_INFO'='23.23111,2.32')
+        """.stripMargin)
+    }
+    assert(exception_test_list_decimal.getMessage.contains("Invalid partition definition"))
+  }
+
+  test("test exception when values in range_info can not match partition column type") {
+    sql("DROP TABLE IF EXISTS test_range_int")
+    val exception_test_range_int: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_int(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 INT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_int.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_smallint")
+    val exception_test_range_smallint: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_smallint(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_smallint.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_float")
+    val exception_test_range_float: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_float(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_float.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_double")
+    val exception_test_range_double: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_double(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_double.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_bigint")
+    val exception_test_range_bigint: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_bigint(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_bigint.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_date")
+    val exception_test_range_date: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_date(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_date.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_timestamp")
+    val exception_test_range_timestamp: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_timestamp(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_timestamp.getMessage.contains("Invalid partition definition"))
+
+    sql("DROP TABLE IF EXISTS test_range_decimal")
+    val exception_test_range_decimal: Exception = intercept[Exception] {
+      sql(
+        """
+          | CREATE TABLE test_range_decimal(col1 INT, col2 STRING)
+          | PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
+          | TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='abc,def')
+        """.stripMargin)
+    }
+    assert(exception_test_range_decimal.getMessage.contains("Invalid partition definition"))
+  }
+
   override def afterAll = {
     dropTable
     CarbonProperties.getInstance()
@@ -146,6 +366,25 @@ class TestDDLForPartitionTable  extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists rangeTable")
     sql("drop table if exists listTable")
     sql("drop table if exists test")
+    sql("DROP TABLE IF EXISTS test_hash_1")
+    sql("DROP TABLE IF EXISTS test_hash_2")
+    sql("DROP TABLE IF EXISTS test_hash_3")
+    sql("DROP TABLE IF EXISTS test_list_int")
+    sql("DROP TABLE IF EXISTS test_list_smallint")
+    sql("DROP TABLE IF EXISTS test_list_bigint")
+    sql("DROP TABLE IF EXISTS test_list_float")
+    sql("DROP TABLE IF EXISTS test_list_double")
+    sql("DROP TABLE IF EXISTS test_list_date")
+    sql("DROP TABLE IF EXISTS test_list_timestamp")
+    sql("DROP TABLE IF EXISTS test_list_decimal")
+    sql("DROP TABLE IF EXISTS test_range_int")
+    sql("DROP TABLE IF EXISTS test_range_smallint")
+    sql("DROP TABLE IF EXISTS test_range_bigint")
+    sql("DROP TABLE IF EXISTS test_range_float")
+    sql("DROP TABLE IF EXISTS test_range_double")
+    sql("DROP TABLE IF EXISTS test_range_date")
+    sql("DROP TABLE IF EXISTS test_range_timestamp")
+    sql("DROP TABLE IF EXISTS test_range_decimal")
   }
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef77313f/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index 5314a15..d3b6f8d 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -17,6 +17,7 @@
 
 package org.apache.carbondata.spark.util
 
+import java.text.SimpleDateFormat
 import java.util
 
 import scala.collection.JavaConverters._
@@ -27,13 +28,13 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
 import org.apache.spark.SparkContext
 import org.apache.spark.sql.execution.command.{ColumnProperty, Field, PartitionerField}
-import org.apache.spark.sql.types.StructField
 import org.apache.spark.util.FileUtils
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.datatype.DataType
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
+import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
 import org.apache.carbondata.processing.csvload.CSVInputFormat
 import org.apache.carbondata.processing.model.CarbonLoadModel
 import org.apache.carbondata.processing.newflow.exception.CarbonDataLoadingException
@@ -43,6 +44,9 @@ import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
 object CommonUtil {
   private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
+  val FIXED_DECIMAL = """decimal\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
+  val FIXED_DECIMALTYPE = """decimaltype\(\s*(\d+)\s*,\s*(\-?\d+)\s*\)""".r
+
   def validateColumnGroup(colGroup: String, noDictionaryDims: Seq[String],
       msrs: Seq[Field], retrievedColGrps: Seq[String], dims: Seq[Field]) {
     val colGrpCols = colGroup.split(',').map(_.trim)
@@ -167,9 +171,23 @@ object CommonUtil {
       isValid = false
     } else {
       partitionType.get.toUpperCase() match {
-        case "HASH" => if (!numPartitions.isDefined) isValid = false
-        case "LIST" => if (!listInfo.isDefined) isValid = false
-        case "RANGE" => if (!rangeInfo.isDefined) isValid = false
+        case "HASH" => if (!numPartitions.isDefined
+        || scala.util.Try(numPartitions.get.toInt).isFailure
+        || numPartitions.get.toInt <= 0) {
+          isValid = false
+        }
+        case "LIST" => if (!listInfo.isDefined) {
+          isValid = false
+        } else {
+          listInfo.get.replace("(", "").replace(")", "").split(",").map(_.trim).foreach(
+            isValid &= validateTypeConvert(partitionerFields(0), _))
+        }
+        case "RANGE" => if (!rangeInfo.isDefined) {
+          isValid = false
+        } else {
+          rangeInfo.get.split(",").map(_.trim).foreach(
+            isValid &= validateTypeConvert(partitionerFields(0), _))
+        }
         case "RANGE_INTERVAL" => isValid = false
         case _ => isValid = false
       }
@@ -179,6 +197,97 @@ object CommonUtil {
     isValid
   }
 
+  def validateTypeConvertForSpark2(partitionerField: PartitionerField, value: String): Boolean = {
+    val result = partitionerField.dataType.get.toLowerCase match {
+      case "integertype" =>
+        scala.util.Try(value.toInt).isSuccess
+      case "stringtype" =>
+        scala.util.Try(value.toString).isSuccess
+      case "longtype" =>
+        scala.util.Try(value.toLong).isSuccess
+      case "floattype" =>
+        scala.util.Try(value.toFloat).isSuccess
+      case "doubletype" =>
+        scala.util.Try(value.toDouble).isSuccess
+      case "numerictype" =>
+        scala.util.Try(value.toDouble).isSuccess
+      case "smallinttype" =>
+        scala.util.Try(value.toShort).isSuccess
+      case "tinyinttype" =>
+        scala.util.Try(value.toShort).isSuccess
+      case "shorttype" =>
+        scala.util.Try(value.toShort).isSuccess
+      case FIXED_DECIMALTYPE(_, _) =>
+        val parField = partitionerField.dataType.get.split(",")
+        val precision = parField(0).substring(12).toInt
+        val scale = parField(1).substring(0, parField(1).length - 1).toInt
+        val pattern = "^([-]?[0-9]{0," + (precision - scale) +
+                      "})([.][0-9]{1," + scale + "})?$"
+        value.matches(pattern)
+      case "timestamptype" =>
+        val timeStampFormat = new SimpleDateFormat(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT))
+        scala.util.Try(timeStampFormat.parse(value)).isSuccess
+      case "datetype" =>
+        val dateFormat = new SimpleDateFormat(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT))
+        scala.util.Try(dateFormat.parse(value)).isSuccess
+      case others =>
+       if (others != null && others.startsWith("char")) {
+         scala.util.Try(value.toString).isSuccess
+        } else if (others != null && others.startsWith("varchar")) {
+         scala.util.Try(value.toString).isSuccess
+        } else {
+          throw new MalformedCarbonCommandException(
+            "UnSupported partition type: " + partitionerField.dataType)
+        }
+    }
+    result
+  }
+
+  def validateTypeConvert(partitionerField: PartitionerField, value: String): Boolean = {
+    val result = partitionerField.dataType.get.toLowerCase() match {
+      case "int" =>
+        scala.util.Try(value.toInt).isSuccess
+      case "string" =>
+        scala.util.Try(value.toString).isSuccess
+      case "bigint" =>
+        scala.util.Try(value.toLong).isSuccess
+      case "long" =>
+        scala.util.Try(value.toLong).isSuccess
+      case "float" =>
+        scala.util.Try(value.toFloat).isSuccess
+      case "double" =>
+        scala.util.Try(value.toDouble).isSuccess
+      case "numeric" =>
+        scala.util.Try(value.toDouble).isSuccess
+      case "smallint" =>
+        scala.util.Try(value.toShort).isSuccess
+      case "tinyint" =>
+        scala.util.Try(value.toShort).isSuccess
+      case "boolean" =>
+        scala.util.Try(value.toBoolean).isSuccess
+      case FIXED_DECIMAL(_, _) =>
+        val parField = partitionerField.dataType.get.split(",")
+        val precision = parField(0).substring(8).toInt
+        val scale = parField(1).substring(0, parField(1).length - 1).toInt
+        val pattern = "^([-]?[0-9]{0," + (precision - scale) +
+                      "})([.][0-9]{1," + scale + "})?$"
+        value.matches(pattern)
+      case "timestamp" =>
+        val timeStampFormat = new SimpleDateFormat(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT))
+        scala.util.Try(timeStampFormat.parse(value)).isSuccess
+      case "date" =>
+        val dateFormat = new SimpleDateFormat(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.CARBON_DATE_FORMAT))
+        scala.util.Try(dateFormat.parse(value)).isSuccess
+      case _ =>
+        validateTypeConvertForSpark2(partitionerField, value)
+    }
+    result
+  }
+
   def validateFields(key: String, fields: Seq[Field]): Boolean = {
     var isValid: Boolean = false
     fields.foreach { field =>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/ef77313f/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index 99a20b4..f12e54b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -294,7 +294,7 @@ class CarbonSqlParser() extends CarbonDDLSqlParser {
 
           if (partitionCols.nonEmpty) {
             if (!CommonUtil.validatePartitionColumns(tableProperties, partitionCols)) {
-              throw new MalformedCarbonCommandException("Invalid table properties")
+              throw new MalformedCarbonCommandException("Invalid partition definition")
             }
             // partition columns should not be part of the schema
             val colNames = fields.map(_.column)


[2/2] carbondata git commit: [CARBONDATA-1149] Fix issue of mismatch type of partition column when specify partition info. This closes #1046

Posted by gv...@apache.org.
[CARBONDATA-1149] Fix issue of mismatch type of partition column when specify partition info. This closes #1046


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/aff20148
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/aff20148
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/aff20148

Branch: refs/heads/master
Commit: aff201487f84cb3965438cf4cc5c6a3b729b785b
Parents: 52968cd ef77313
Author: Venkata Ramana G <ra...@huawei.com>
Authored: Thu Jun 22 15:33:17 2017 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Thu Jun 22 15:33:17 2017 +0530

----------------------------------------------------------------------
 .../carbondata/core/util/DataTypeUtil.java      |   5 +
 .../partition/TestDDLForPartitionTable.scala    | 253 ++++++++++++++++++-
 .../carbondata/spark/util/CommonUtil.scala      | 119 ++++++++-
 .../org/apache/spark/sql/CarbonSqlParser.scala  |   2 +-
 4 files changed, 366 insertions(+), 13 deletions(-)
----------------------------------------------------------------------