You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2018/06/11 12:36:38 UTC

carbondata git commit: [CARBONDATA-2569] Change the strategy of Search mode throw exception and run sparkSQL

Repository: carbondata
Updated Branches:
  refs/heads/master 041603dcc -> 83ee2c45f


[CARBONDATA-2569] Change the strategy of Search mode throw exception and run sparkSQL

Search mode throw exception but test case pass, please check the jira.

This closes #2357


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/83ee2c45
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/83ee2c45
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/83ee2c45

Branch: refs/heads/master
Commit: 83ee2c45fc10b220605916abe133b7a250007fdc
Parents: 041603d
Author: xubo245 <xu...@huawei.com>
Authored: Fri Jun 1 11:50:52 2018 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Mon Jun 11 18:06:27 2018 +0530

----------------------------------------------------------------------
 .../main/scala/org/apache/spark/sql/CarbonSession.scala | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/83ee2c45/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index 497f95a..93c0b4a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -37,9 +37,8 @@ import org.apache.spark.sql.profiler.{Profiler, SQLStart}
 import org.apache.spark.util.{CarbonReflectionUtils, Utils}
 
 import org.apache.carbondata.common.annotations.InterfaceAudience
-import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.scan.expression.LiteralExpression
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonSessionInfo, ThreadLocalSessionInfo}
 import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.store.SparkCarbonStore
@@ -101,8 +100,8 @@ class CarbonSession(@transient val sc: SparkContext,
           } catch {
             case e: Exception =>
               logError(String.format(
-                "Exception when executing search mode: %s, fallback to SparkSQL", e.getMessage))
-              new Dataset[Row](self, qe, RowEncoder(qe.analyzed.schema))
+                "Exception when executing search mode: %s", e.getMessage))
+              throw e;
           }
         } else {
           new Dataset[Row](self, qe, RowEncoder(qe.analyzed.schema))
@@ -171,19 +170,24 @@ class CarbonSession(@transient val sc: SparkContext,
    */
   private def trySearchMode(qe: QueryExecution, sse: SQLStart): DataFrame = {
     val analyzed = qe.analyzed
+    val LOG: LogService = LogServiceFactory.getLogService(this.getClass.getName)
     analyzed match {
       case _@Project(columns, _@Filter(expr, s: SubqueryAlias))
         if s.child.isInstanceOf[LogicalRelation] &&
            s.child.asInstanceOf[LogicalRelation].relation
              .isInstanceOf[CarbonDatasourceHadoopRelation] =>
+        LOG.info(s"Search service started and supports filter: ${sse.sqlText}")
         runSearch(analyzed, columns, expr, s.child.asInstanceOf[LogicalRelation])
       case gl@GlobalLimit(_, ll@LocalLimit(_, p@Project(columns, _@Filter(expr, s: SubqueryAlias))))
         if s.child.isInstanceOf[LogicalRelation] &&
            s.child.asInstanceOf[LogicalRelation].relation
              .isInstanceOf[CarbonDatasourceHadoopRelation] =>
         val logicalRelation = s.child.asInstanceOf[LogicalRelation]
+        LOG.info(s"Search service started and supports limit: ${sse.sqlText}")
         runSearch(analyzed, columns, expr, logicalRelation, gl.maxRows, ll.maxRows)
       case _ =>
+        LOG.info(s"Search service started, but don't support: ${sse.sqlText}," +
+          s" and will run it with SparkSQL")
         new Dataset[Row](self, qe, RowEncoder(qe.analyzed.schema))
     }
   }