You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by gv...@apache.org on 2017/04/21 11:36:35 UTC

[1/2] incubator-carbondata git commit: supported char and varchar in alter add columns

Repository: incubator-carbondata
Updated Branches:
  refs/heads/master 3de1e6313 -> c0bf7945b


supported char and varchar in alter add columns


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/860fa7dc
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/860fa7dc
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/860fa7dc

Branch: refs/heads/master
Commit: 860fa7dc830406d08d323967ab12f2ee2acb4555
Parents: 3de1e63
Author: dhatchayani <dh...@gmail.com>
Authored: Thu Apr 20 20:21:29 2017 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Fri Apr 21 17:02:48 2017 +0530

----------------------------------------------------------------------
 .../spark/util/DataTypeConverterUtil.scala      |  6 ++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  5 +-
 .../rowreader/AddColumnTestCases.scala          | 54 ++++++++++++++++++++
 3 files changed, 62 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/860fa7dc/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
index 6a43440..566347f 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
@@ -23,7 +23,7 @@ import org.apache.carbondata.format.{DataType => ThriftDataType}
 object DataTypeConverterUtil {
   def convertToCarbonType(dataType: String): DataType = {
     dataType.toLowerCase match {
-      case "string" | "char" => DataType.STRING
+      case "string" => DataType.STRING
       case "int" => DataType.INT
       case "integer" => DataType.INT
       case "tinyint" => DataType.SHORT
@@ -62,6 +62,10 @@ object DataTypeConverterUtil {
           DataType.ARRAY
         } else if (others != null && others.startsWith("structtype")) {
           DataType.STRUCT
+        } else if (others != null && others.startsWith("char")) {
+          DataType.STRING
+        } else if (others != null && others.startsWith("varchar")) {
+          DataType.STRING
         } else {
           sys.error(s"Unsupported data type: $dataType")
         }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/860fa7dc/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 9ca9163..dad5716 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -146,6 +146,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   protected val TIMESTAMP = carbonKeyWord("TIMESTAMP")
   protected val DATE = carbonKeyWord("DATE")
   protected val CHAR = carbonKeyWord("CHAR")
+  protected val VARCHAR = carbonKeyWord("VARCHAR")
   protected val NUMERIC = carbonKeyWord("NUMERIC")
   protected val DECIMAL = carbonKeyWord("DECIMAL")
   protected val DOUBLE = carbonKeyWord("DOUBLE")
@@ -582,7 +583,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
    */
   def isDetectAsDimentionDatatype(dimensionDatatype: String): Boolean = {
     val dimensionType = Array("string", "array", "struct", "timestamp", "date", "char")
-    dimensionType.exists(x => x.equalsIgnoreCase(dimensionDatatype))
+    dimensionType.exists(x => dimensionDatatype.toLowerCase.contains(x))
   }
 
   /**
@@ -864,7 +865,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
    * Matching the decimal(10,0) data type and returning the same.
    */
   private lazy val charType =
-    CHAR ~ ("(" ~>numericLit <~ ")").? ^^ {
+    (CHAR | VARCHAR ) ~ ("(" ~>numericLit <~ ")") ^^ {
       case char ~ digit =>
         s"$char($digit)"
     }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/860fa7dc/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/AddColumnTestCases.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/AddColumnTestCases.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/AddColumnTestCases.scala
index 06f480b..f994083 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/AddColumnTestCases.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/AddColumnTestCases.scala
@@ -153,6 +153,60 @@ class AddColumnTestCases extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS carbon_table")
   }
 
+  test("test to add column with char datatype") {
+    sql("DROP TABLE IF EXISTS carbon_table")
+    sql(
+      "CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
+      "timestamp)STORED BY 'carbondata' TBLPROPERTIES" +
+      "('DICTIONARY_EXCLUDE'='charField')")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE carbon_table " +
+        s"options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
+    sql("Alter table carbon_table add columns(newfield char(10)) TBLPROPERTIES ('DEFAULT.VALUE.newfield'='char')")
+    checkAnswer(sql("select distinct(newfield) from carbon_table"),Row("char"))
+    sql("DROP TABLE IF EXISTS carbon_table")
+  }
+
+  test("test to check if exception is thrown with wrong char syntax") {
+    intercept[Exception] {
+      sql("DROP TABLE IF EXISTS carbon_table")
+      sql(
+        "CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
+
+        "timestamp)STORED BY 'carbondata' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='charField')")
+      sql(
+        "Alter table carbon_table add columns(newfield char) TBLPROPERTIES ('DEFAULT.VALUE.newfield'='c')")
+      sql("DROP TABLE IF EXISTS carbon_table")
+    }
+  }
+
+  test("test to add column with varchar datatype") {
+    sql("DROP TABLE IF EXISTS carbon_table")
+    sql(
+      "CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
+      "timestamp)STORED BY 'carbondata' TBLPROPERTIES" +
+      "('DICTIONARY_EXCLUDE'='charField')")
+    sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE carbon_table " +
+        s"options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
+    sql("Alter table carbon_table add columns(newfield varchar(10)) TBLPROPERTIES ('DEFAULT.VALUE.newfield'='char')")
+    checkAnswer(sql("select distinct(newfield) from carbon_table"),Row("char"))
+    sql("DROP TABLE IF EXISTS carbon_table")
+  }
+
+  test("test to check if exception is thrown with wrong varchar syntax") {
+    intercept[Exception] {
+      sql("DROP TABLE IF EXISTS carbon_table")
+      sql(
+        "CREATE TABLE carbon_table(intField int,stringField string,charField string,timestampField " +
+
+        "timestamp)STORED BY 'carbondata' TBLPROPERTIES" +
+        "('DICTIONARY_EXCLUDE'='charField')")
+      sql(
+        "Alter table carbon_table add columns(newfield varchar) TBLPROPERTIES ('DEFAULT.VALUE.newfield'='c')")
+      sql("DROP TABLE IF EXISTS carbon_table")
+    }
+  }
+
   override def afterAll {
     sql("DROP TABLE IF EXISTS addcolumntest")
     sql("drop table if exists hivetable")


[2/2] incubator-carbondata git commit: [CARBONDATA-966] Supported char and varchar in alter add columns.This closes #826

Posted by gv...@apache.org.
[CARBONDATA-966] Supported char and varchar in alter add columns.This closes #826


Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/c0bf7945
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/c0bf7945
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/c0bf7945

Branch: refs/heads/master
Commit: c0bf7945b7db1192bc57711e5466aca7aa114dc8
Parents: 3de1e63 860fa7d
Author: Venkata Ramana G <ra...@huawei.com>
Authored: Fri Apr 21 17:06:04 2017 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Fri Apr 21 17:06:04 2017 +0530

----------------------------------------------------------------------
 .../spark/util/DataTypeConverterUtil.scala      |  6 ++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |  5 +-
 .../rowreader/AddColumnTestCases.scala          | 54 ++++++++++++++++++++
 3 files changed, 62 insertions(+), 3 deletions(-)
----------------------------------------------------------------------