You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2017/11/18 15:29:55 UTC

[21/28] carbondata git commit: [CARBONDATA-1651] [Supported Boolean Type When Saving DataFrame] Provided Support For Boolean Data Type In CarbonDataFrameWriter

[CARBONDATA-1651] [Supported Boolean Type When Saving DataFrame] Provided Support For Boolean Data Type In CarbonDataFrameWriter

1.Provided Support For Boolean Data Type In CarbonDataFrameWriter
2.Test Cases are Added For Same

This closes #1491


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/91355ef7
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/91355ef7
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/91355ef7

Branch: refs/heads/fgdatamap
Commit: 91355ef7cb3147537eacd11c95518495417eab82
Parents: d74251f
Author: anubhav100 <an...@knoldus.in>
Authored: Mon Nov 13 13:33:15 2017 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Sat Nov 18 01:37:02 2017 +0800

----------------------------------------------------------------------
 .../testsuite/dataload/TestLoadDataFrame.scala  | 27 ++++++++++++++++++--
 .../spark/sql/CarbonDataFrameWriter.scala       |  1 +
 2 files changed, 26 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/91355ef7/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
index f2ea45e..3399740 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestLoadDataFrame.scala
@@ -20,7 +20,7 @@ package org.apache.carbondata.spark.testsuite.dataload
 import java.math.BigDecimal
 
 import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.types.{DecimalType, DoubleType, StringType, StructField, StructType}
+import org.apache.spark.sql.types._
 import org.apache.spark.sql.{DataFrame, Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
@@ -28,6 +28,7 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
   var df: DataFrame = _
   var dataFrame: DataFrame = _
   var df2: DataFrame = _
+  var booldf:DataFrame = _
 
 
   def buildTestData() = {
@@ -49,6 +50,15 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     df2 = sqlContext.sparkContext.parallelize(1 to 1000)
       .map(x => ("key_" + x, "str_" + x, x, x * 2, x * 3))
       .toDF("c1", "c2", "c3", "c4", "c5")
+
+    val boolrdd = sqlContext.sparkContext.parallelize(
+      Row("anubhav",true) ::
+        Row("prince",false) :: Nil)
+
+    val boolSchema = StructType(
+      StructField("name", StringType, nullable = false) ::
+        StructField("isCarbonEmployee",BooleanType,nullable = false)::Nil)
+    booldf = sqlContext.createDataFrame(boolrdd,boolSchema)
   }
 
   def dropTable() = {
@@ -61,6 +71,8 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS carbon7")
     sql("DROP TABLE IF EXISTS carbon8")
     sql("DROP TABLE IF EXISTS carbon9")
+    sql("DROP TABLE IF EXISTS carbon10")
+
   }
 
 
@@ -70,7 +82,18 @@ class TestLoadDataFrame extends QueryTest with BeforeAndAfterAll {
     buildTestData
   }
 
-
+test("test the boolean data type"){
+  booldf.write
+    .format("carbondata")
+    .option("tableName", "carbon10")
+    .option("tempCSV", "true")
+    .option("compress", "true")
+    .mode(SaveMode.Overwrite)
+    .save()
+  checkAnswer(
+    sql("SELECT * FROM CARBON10"),
+    Seq(Row("anubhav", true), Row("prince", false)))
+}
 
   test("test load dataframe with saving compressed csv files") {
     // save dataframe to carbon file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/91355ef7/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index b74576d..89b618f 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -153,6 +153,7 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
       case TimestampType => CarbonType.TIMESTAMP.getName
       case DateType => CarbonType.DATE.getName
       case decimal: DecimalType => s"decimal(${decimal.precision}, ${decimal.scale})"
+      case BooleanType => CarbonType.BOOLEAN.getName
       case other => sys.error(s"unsupported type: $other")
     }
   }