You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by zh...@apache.org on 2019/06/24 12:08:45 UTC

[carbondata] branch master updated: [CARBONDATA-3380] Fix missing appName and AnalysisException bug in DirectSQLExample

This is an automated email from the ASF dual-hosted git repository.

zhangzc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new d1d9531  [CARBONDATA-3380] Fix missing appName and AnalysisException bug in DirectSQLExample
d1d9531 is described below

commit d1d9531825f311b497b74c3b6bb1bd51ffbd033c
Author: lamber-ken <22...@qq.com>
AuthorDate: Sun May 12 23:53:19 2019 +0800

    [CARBONDATA-3380] Fix missing appName and AnalysisException bug in DirectSQLExample
    
    Fix missing appName and AnalysisException bug in DirectSQLExample
    
    This closes #3213
---
 .../carbondata/examples/DirectSQLExample.scala     | 73 +++++++++++++---------
 .../apache/carbondata/examplesCI/RunExamples.scala |  5 ++
 2 files changed, 49 insertions(+), 29 deletions(-)

diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/DirectSQLExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/DirectSQLExample.scala
index 5ddcba0..120ab85 100644
--- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/DirectSQLExample.scala
+++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/DirectSQLExample.scala
@@ -31,18 +31,56 @@ import org.apache.carbondata.sdk.file.{CarbonWriter, Field, Schema}
  * No need to create table first
  * TODO: support more than one carbon file
  */
+// scalastyle:off println
 object DirectSQLExample {
 
+  def main(args: Array[String]) {
+    val carbonSession = ExampleUtils.createCarbonSession("DirectSQLExample")
+    exampleBody(carbonSession)
+    carbonSession.close()
+  }
+
+  def exampleBody(carbonSession : SparkSession): Unit = {
+
+    val rootPath = new File(this.getClass.getResource("/").getPath
+      + "../../../..").getCanonicalPath
+    val path = s"$rootPath/examples/spark2/target/carbonFile/"
+
+    import carbonSession._
+    // 1. generate data file
+    cleanTestData(path)
+
+    val rows = 20
+    buildTestData(path, rows)
+    val readPath = path
+
+    println("Running SQL on carbon files directly")
+    try {
+      // 2. run queries directly, no need to create table first
+      sql(s"""select * FROM carbon.`$readPath` limit 10""".stripMargin).show()
+
+      // 3. check rows count
+      val counts = sql(s"""select * FROM carbon.`$readPath`""".stripMargin).count()
+      assert(rows == counts)
+
+    } catch {
+      case e: Exception => throw e
+    } finally {
+      // 3.delete data files
+      cleanTestData(path)
+    }
+
+  }
+
   // prepare SDK writer output
   def buildTestData(
       path: String,
-      num: Int = 3,
-      sparkSession: SparkSession): Any = {
+      num: Int = 3): Unit = {
 
     // getCanonicalPath gives path with \, but the code expects /.
-    val writerPath = path.replace("\\", "/");
+    val writerPath = path.replace("\\", "/")
 
-    val fields: Array[Field] = new Array[Field](3)
+    val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
     fields(1) = new Field("age", DataTypes.INT)
     fields(2) = new Field("height", DataTypes.DOUBLE)
@@ -54,6 +92,7 @@ object DirectSQLExample {
         .uniqueIdentifier(System.currentTimeMillis)
         .withBlockSize(2)
         .withCsvInput(new Schema(fields))
+        .writtenBy("DirectSQLExample")
       val writer = builder.build()
       var i = 0
       while (i < num) {
@@ -70,29 +109,5 @@ object DirectSQLExample {
     FileUtils.deleteDirectory(new File(path))
   }
 
-  // scalastyle:off
-  def main(args: Array[String]) {
-    val carbonSession = ExampleUtils.createCarbonSession("DirectSQLExample")
-    val rootPath = new File(this.getClass.getResource("/").getPath
-      + "../../../..").getCanonicalPath
-    val path = s"$rootPath/examples/spark2/target/carbonFile/"
-
-    import carbonSession._
-    // 1. generate data file
-    cleanTestData(path)
-    buildTestData(path, 20, sparkSession = carbonSession)
-    val readPath = path
-
-    println("Running SQL on carbon files directly")
-    try {
-      // 2. run queries directly, no need to create table first
-      sql(s"""select * FROM  carbonfile.`$readPath` limit 10""".stripMargin).show()
-    } catch {
-      case e: Exception => throw e
-    } finally {
-      // 3.delete data files
-      cleanTestData(path)
-    }
-  }
-  // scalastyle:on
 }
+// scalastyle:on println
diff --git a/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala b/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
index 268bf5f..853f778 100644
--- a/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
+++ b/examples/spark2/src/test/scala/org/apache/carbondata/examplesCI/RunExamples.scala
@@ -126,8 +126,13 @@ class RunExamples extends QueryTest with BeforeAndAfterAll {
     CarbonReaderExample.main(null)
   }
 
+  test("DirectSQLExample") {
+    DirectSQLExample.exampleBody(spark)
+  }
+
   test("HiveExample") {
     HiveExample.createCarbonTable(TestQueryExecutor.warehouse)
     HiveExample.readFromHive
   }
+
 }