You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2017/11/20 10:44:41 UTC

carbondata git commit: [CARBONDATA-1751] Make the type of exception and message correctly

Repository: carbondata
Updated Branches:
  refs/heads/master 214d9eb9c -> 82277ddfb


[CARBONDATA-1751] Make the type of exception and message correctly

carbon printout improper error message, for example, it printout system error when users run create table with the same column name, but it should printout related exception information
So we modify sys.error method to AnalysisException when uses run related operation except IUD,compaction and alter

This closes #1525


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/82277ddf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/82277ddf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/82277ddf

Branch: refs/heads/master
Commit: 82277ddfb3465109260c11d7274d85a1ce19100b
Parents: 214d9eb
Author: xubo245 <60...@qq.com>
Authored: Fri Nov 17 22:36:46 2017 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Mon Nov 20 18:43:16 2017 +0800

----------------------------------------------------------------------
 .../testsuite/iud/UpdateCarbonTableTestCase.scala  |  2 +-
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     |  7 +++++--
 .../spark/rdd/CarbonScanPartitionRDD.scala         |  7 +++++--
 .../carbondata/spark/util/DataLoadingUtil.scala    |  5 +++--
 .../spark/rdd/CarbonDataRDDFactory.scala           | 12 +++++++-----
 .../apache/spark/sql/CarbonDataFrameWriter.scala   |  3 ++-
 .../spark/sql/CarbonDatasourceHadoopRelation.scala |  8 +++++---
 .../scala/org/apache/spark/sql/CarbonSource.scala  |  8 +++++---
 .../command/CarbonCreateTableCommand.scala         | 17 ++++++++++++-----
 .../execution/command/CarbonDropTableCommand.scala |  4 +++-
 .../management/AlterTableCompactionCommand.scala   | 15 ++++++++++-----
 .../command/management/LoadTableCommand.scala      |  6 +++---
 .../spark/sql/execution/command/package.scala      |  4 +++-
 .../spark/sql/hive/CarbonAnalysisRules.scala       |  4 +++-
 .../spark/sql/hive/CarbonPreAggregateRules.scala   | 10 +++++-----
 .../org/apache/spark/sql/hive/CarbonRelation.scala |  4 +++-
 .../spark/sql/parser/CarbonSpark2SqlParser.scala   |  5 +++--
 .../spark/sql/parser/CarbonSparkSqlParser.scala    | 12 ++++++++----
 .../scala/org/apache/spark/util/Compaction.scala   |  4 ++--
 .../booleantype/BooleanDataTypesInsertTest.scala   |  2 +-
 .../spark/carbondata/CarbonDataSourceSuite.scala   | 11 +++++------
 21 files changed, 94 insertions(+), 56 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
index fd5f144..005dc01 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/UpdateCarbonTableTestCase.scala
@@ -294,7 +294,7 @@ class UpdateCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     val exception = intercept[Exception] {
       sql("""update iud.dest d set (c2, c5 ) = (c2 + 1, concat(c5 , "z"), "abc")""").show()
     }
-    assertResult("Number of source and destination columns are not matching")(exception.getMessage)
+    assertResult("The number of columns in source table and destination table columns mismatch;")(exception.getMessage)
   }
 
   test("update carbon table-error[no set columns") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index aaeedb4..654dc44 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -31,6 +31,7 @@ import org.apache.hadoop.mapreduce.Job
 import org.apache.spark._
 import org.apache.spark.sql.execution.command.{CarbonMergerMapping, NodeInfo}
 import org.apache.spark.sql.hive.DistributionUtil
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -186,9 +187,11 @@ class CarbonMergerRDD[K, V](
           case e: Throwable =>
             LOGGER.error(e)
             if (null != e.getMessage) {
-              sys.error(s"Exception occurred in query execution :: ${ e.getMessage }")
+              CarbonException.analysisException(
+                s"Exception occurred in query execution :: ${ e.getMessage }")
             } else {
-              sys.error("Exception occurred in query execution.Please check logs.")
+              CarbonException.analysisException(
+                "Exception occurred in query execution.Please check logs.")
             }
         }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanPartitionRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanPartitionRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanPartitionRDD.scala
index 13a872f..8d477d5 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanPartitionRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanPartitionRDD.scala
@@ -29,6 +29,7 @@ import org.apache.spark.{Partition, TaskContext}
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.execution.command.AlterPartitionModel
 import org.apache.spark.sql.hive.DistributionUtil
+import org.apache.spark.sql.util.CarbonException
 import org.apache.spark.unsafe.types.UTF8String
 import org.apache.spark.util.PartitionUtils
 
@@ -144,9 +145,11 @@ class CarbonScanPartitionRDD(alterPartitionModel: AlterPartitionModel,
           case e: Throwable =>
             LOGGER.error(e)
             if (null != e.getMessage) {
-              sys.error(s"Exception occurred in query execution :: ${ e.getMessage }")
+              CarbonException.analysisException(
+                s"Exception occurred in query execution :: ${e.getMessage}")
             } else {
-              sys.error("Exception occurred in query execution. Please check logs.")
+              CarbonException.analysisException(
+                "Exception occurred in query execution. Please check logs.")
             }
         }
         val segmentProperties = PartitionUtils.getSegmentProperties(absoluteTableIdentifier,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataLoadingUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataLoadingUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataLoadingUtil.scala
index dbe4734..f359377 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataLoadingUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataLoadingUtil.scala
@@ -22,6 +22,7 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable
 
 import org.apache.commons.lang3.StringUtils
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.constants.LoggerAction
 import org.apache.carbondata.common.logging.LogServiceFactory
@@ -222,7 +223,7 @@ object DataLoadingUtil {
     if (bad_records_logger_enable.toBoolean ||
         LoggerAction.REDIRECT.name().equalsIgnoreCase(bad_records_action)) {
       if (!CarbonUtil.isValidBadStorePath(bad_record_path)) {
-        sys.error("Invalid bad records location.")
+        CarbonException.analysisException("Invalid bad records location.")
       }
     }
     carbonLoadModel.setBadRecordsLocation(bad_record_path)
@@ -293,7 +294,7 @@ object DataLoadingUtil {
     if (delimeter.equalsIgnoreCase(complex_delimeter_level1) ||
         complex_delimeter_level1.equalsIgnoreCase(complex_delimeter_level2) ||
         delimeter.equalsIgnoreCase(complex_delimeter_level2)) {
-      sys.error(s"Field Delimiter & Complex types delimiter are same")
+      CarbonException.analysisException(s"Field Delimiter and Complex types delimiter are same")
     } else {
       carbonLoadModel.setComplexDelimiterLevel1(
         CarbonUtil.delimiterConverter(complex_delimeter_level1))

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 38bb936..47e016b 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -36,6 +36,7 @@ import org.apache.spark.rdd.{DataLoadCoalescedRDD, DataLoadPartitionCoalescer, N
 import org.apache.spark.sql.{CarbonEnv, DataFrame, Row, SQLContext}
 import org.apache.spark.sql.execution.command.{CompactionModel, ExecutionErrors, UpdateTableModel}
 import org.apache.spark.sql.hive.DistributionUtil
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.constants.LoggerAction
 import org.apache.carbondata.common.logging.LogServiceFactory
@@ -114,15 +115,16 @@ object CarbonDataRDDFactory {
           s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
       CarbonCompactionUtil
           .createCompactionRequiredFile(carbonTable.getMetaDataFilepath, compactionType)
-      // do sys error only in case of DDL trigger.
+      // throw exception only in case of DDL trigger.
       if (compactionModel.isDDLTrigger) {
-        sys.error("Compaction is in progress, compaction request for table " +
-            s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
+        CarbonException.analysisException(
+          s"Compaction is in progress, compaction request for table " +
+            s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}" +
             " is in queue.")
       } else {
         LOGGER.error("Compaction is in progress, compaction request for table " +
-            s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
-            " is in queue.")
+          s"${carbonLoadModel.getDatabaseName}.${carbonLoadModel.getTableName}" +
+          " is in queue.")
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index 89b618f..bae93bc 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path
 import org.apache.hadoop.io.compress.GzipCodec
 import org.apache.spark.sql.execution.command.management.LoadTableCommand
 import org.apache.spark.sql.types._
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -154,7 +155,7 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
       case DateType => CarbonType.DATE.getName
       case decimal: DecimalType => s"decimal(${decimal.precision}, ${decimal.scale})"
       case BooleanType => CarbonType.BOOLEAN.getName
-      case other => sys.error(s"unsupported type: $other")
+      case other => CarbonException.analysisException(s"unsupported type: $other")
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 1f4e739..c800055 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -27,6 +27,7 @@ import org.apache.spark.sql.hive.CarbonRelation
 import org.apache.spark.sql.optimizer.CarbonFilters
 import org.apache.spark.sql.sources.{BaseRelation, Filter, InsertableRelation}
 import org.apache.spark.sql.types.StructType
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
@@ -89,13 +90,14 @@ case class CarbonDatasourceHadoopRelation(
 
   override def insert(data: DataFrame, overwrite: Boolean): Unit = {
     if (carbonRelation.output.size > CarbonCommonConstants.DEFAULT_MAX_NUMBER_OF_COLUMNS) {
-      sys.error("Maximum supported column by carbon is: " +
-          CarbonCommonConstants.DEFAULT_MAX_NUMBER_OF_COLUMNS)
+      CarbonException.analysisException("Maximum supported column by carbon is: " +
+        CarbonCommonConstants.DEFAULT_MAX_NUMBER_OF_COLUMNS)
     }
     if (data.logicalPlan.output.size >= carbonRelation.output.size) {
       LoadTableByInsertCommand(this, data.logicalPlan, overwrite).run(sparkSession)
     } else {
-      sys.error("Cannot insert into target table because column number are different")
+      CarbonException.analysisException(
+        "Cannot insert into target table because number of columns mismatch")
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
index c9e2c20..18343e5 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
@@ -33,6 +33,7 @@ import org.apache.spark.sql.parser.CarbonSpark2SqlParser
 import org.apache.spark.sql.sources._
 import org.apache.spark.sql.streaming.OutputMode
 import org.apache.spark.sql.types.StructType
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
@@ -93,7 +94,7 @@ class CarbonSource extends CreatableRelationProvider with RelationProvider
       .exists(tablePath)
     val (doSave, doAppend) = (mode, isExists) match {
       case (SaveMode.ErrorIfExists, true) =>
-        sys.error(s"ErrorIfExists mode, path $storePath already exists.")
+        CarbonException.analysisException(s"path $storePath already exists.")
       case (SaveMode.Overwrite, true) =>
         sqlContext.sparkSession
           .sql(s"DROP TABLE IF EXISTS ${ options.dbName }.${ options.tableName }")
@@ -127,11 +128,12 @@ class CarbonSource extends CreatableRelationProvider with RelationProvider
       CarbonCommonConstants.DATABASE_DEFAULT_NAME).toLowerCase
     val tableOption: Option[String] = parameters.get("tableName")
     if (tableOption.isEmpty) {
-      sys.error("Table creation failed. Table name is not specified")
+      CarbonException.analysisException("Table creation failed. Table name is not specified")
     }
     val tableName = tableOption.get.toLowerCase()
     if (tableName.contains(" ")) {
-      sys.error("Table creation failed. Table name cannot contain blank space")
+      CarbonException.analysisException(
+        "Table creation failed. Table name cannot contain blank space")
     }
     val (path, updatedParams) = if (sqlContext.sparkSession.sessionState.catalog.listTables(dbName)
       .exists(_.table.equalsIgnoreCase(tableName))) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonCreateTableCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonCreateTableCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonCreateTableCommand.scala
index 11f0bc5..06f3645 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonCreateTableCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonCreateTableCommand.scala
@@ -19,7 +19,11 @@ package org.apache.spark.sql.execution.command
 
 import scala.collection.JavaConverters._
 
+import org.apache.spark.sql._
 import org.apache.spark.sql.{CarbonEnv, GetDB, Row, SparkSession}
+import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
+import org.apache.spark.sql.execution.SQLExecution.EXECUTION_ID_KEY
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -61,7 +65,7 @@ case class CarbonCreateTableCommand(
     }
 
     if (tableInfo.getFactTable.getListOfColumns.size <= 0) {
-      sys.error("No Dimensions found. Table should have at least one dimesnion !")
+      CarbonException.analysisException("Table should have at least one column.")
     }
 
     if (sparkSession.sessionState.catalog.listTables(dbName)
@@ -70,7 +74,7 @@ case class CarbonCreateTableCommand(
         LOGGER.audit(
           s"Table creation with Database name [$dbName] and Table name [$tbName] failed. " +
           s"Table [$tbName] already exists under database [$dbName]")
-        sys.error(s"Table [$tbName] already exists under database [$dbName]")
+        throw new TableAlreadyExistsException(dbName, dbName)
       }
     } else {
       val tableIdentifier = AbsoluteTableIdentifier.from(tablePath, dbName, tbName)
@@ -83,6 +87,7 @@ case class CarbonCreateTableCommand(
           cm.dimCols.foreach(f => fields(f.schemaOrdinal) = f)
           cm.msrCols.foreach(f => fields(f.schemaOrdinal) = f)
 
+          sparkSession.sparkContext.setLocalProperty(EXECUTION_ID_KEY, null)
           sparkSession.sql(
             s"""CREATE TABLE $dbName.$tbName
                |(${ fields.map(f => f.rawSchema).mkString(",") })
@@ -90,14 +95,16 @@ case class CarbonCreateTableCommand(
             s""" OPTIONS (tableName "$tbName", dbName "$dbName", tablePath """.stripMargin +
             s""""$tablePath"$carbonSchemaString) """)
         } catch {
+          case e: AnalysisException => throw e
           case e: Exception =>
             // call the drop table to delete the created table.
             CarbonEnv.getInstance(sparkSession).carbonMetastore
               .dropTable(tableIdentifier)(sparkSession)
 
-            LOGGER.audit(s"Table creation with Database name [$dbName] " +
-                         s"and Table name [$tbName] failed")
-            throw e
+            val msg = s"Create table'$tbName' in database '$dbName' failed."
+            LOGGER.audit(msg)
+            LOGGER.error(e, msg)
+            CarbonException.analysisException(msg)
         }
       }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala
index f0a916a..ddd5a76 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/CarbonDropTableCommand.scala
@@ -23,6 +23,7 @@ import org.apache.spark.sql.{CarbonEnv, GetDB, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -113,7 +114,8 @@ case class CarbonDropTableCommand(
     } catch {
       case ex: Exception =>
         LOGGER.error(ex, s"Dropping table $dbName.$tableName failed")
-        sys.error(s"Dropping table $dbName.$tableName failed: ${ ex.getMessage }")
+        CarbonException.analysisException(
+          s"Dropping table $dbName.$tableName failed: ${ ex.getMessage }")
     }
     finally {
       if (carbonLocks.nonEmpty) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/AlterTableCompactionCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/AlterTableCompactionCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/AlterTableCompactionCommand.scala
index 9ff575b..826d35a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/AlterTableCompactionCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/AlterTableCompactionCommand.scala
@@ -20,8 +20,10 @@ package org.apache.spark.sql.execution.command.management
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession, SQLContext}
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.execution.command.{AlterTableModel, CompactionModel, DataProcessCommand, RunnableCommand}
 import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
 import org.apache.carbondata.core.constants.CarbonCommonConstants
@@ -62,11 +64,11 @@ case class AlterTableCompactionCommand(
           .lookupRelation(Option(databaseName), tableName)(sparkSession)
           .asInstanceOf[CarbonRelation]
       if (relation == null) {
-        sys.error(s"Table $databaseName.$tableName does not exist")
+        throw new NoSuchTableException(databaseName, tableName)
       }
       if (null == relation.carbonTable) {
         LOGGER.error(s"alter table failed. table not found: $databaseName.$tableName")
-        sys.error(s"alter table failed. table not found: $databaseName.$tableName")
+        throw new NoSuchTableException(databaseName, tableName)
       }
       relation.carbonTable
     }
@@ -94,9 +96,11 @@ case class AlterTableCompactionCommand(
     } catch {
       case e: Exception =>
         if (null != e.getMessage) {
-          sys.error(s"Compaction failed. Please check logs for more info. ${ e.getMessage }")
+          CarbonException.analysisException(
+            s"Compaction failed. Please check logs for more info. ${ e.getMessage }")
         } else {
-          sys.error("Exception in compaction. Please check logs for more info.")
+          CarbonException.analysisException(
+            "Exception in compaction. Please check logs for more info.")
         }
     }
     Seq.empty
@@ -205,7 +209,8 @@ case class AlterTableCompactionCommand(
                      s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
         LOGGER.error(s"Not able to acquire the compaction lock for table" +
                      s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
-        sys.error("Table is already locked for compaction. Please try after some time.")
+        CarbonException.analysisException(
+          "Table is already locked for compaction. Please try after some time.")
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/LoadTableCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/LoadTableCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/LoadTableCommand.scala
index a24c408..dfcd3cd 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/LoadTableCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/LoadTableCommand.scala
@@ -22,7 +22,7 @@ import scala.collection.JavaConverters._
 import org.apache.commons.lang3.StringUtils
 import org.apache.spark.scheduler.{SparkListener, SparkListenerApplicationEnd}
 import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
+import org.apache.spark.sql.catalyst.analysis.{NoSuchTableException, UnresolvedAttribute}
 import org.apache.spark.sql.execution.command.{DataLoadTableFileMapping, DataProcessCommand, RunnableCommand, UpdateTableModel}
 import org.apache.spark.sql.hive.CarbonRelation
 import org.apache.spark.util.{CausedBy, FileUtils}
@@ -104,12 +104,12 @@ case class LoadTableCommand(
         val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
           .lookupRelation(Option(dbName), tableName)(sparkSession).asInstanceOf[CarbonRelation]
         if (relation == null) {
-          sys.error(s"Table $dbName.$tableName does not exist")
+          throw new NoSuchTableException(dbName, tableName)
         }
         if (null == relation.carbonTable) {
           LOGGER.error(s"Data loading failed. table not found: $dbName.$tableName")
           LOGGER.audit(s"Data loading failed. table not found: $dbName.$tableName")
-          sys.error(s"Data loading failed. table not found: $dbName.$tableName")
+          throw new NoSuchTableException(dbName, tableName)
         }
         relation.carbonTable
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
index 07ef555..e2bfbdc 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/package.scala
@@ -21,6 +21,7 @@ import scala.language.implicitConversions
 
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 
@@ -29,11 +30,12 @@ object Checker {
       dbName: Option[String],
       tableName: String,
       session: SparkSession): Unit = {
+    val database = dbName.getOrElse(session.catalog.currentDatabase)
     val identifier = TableIdentifier(tableName, dbName)
     if (!CarbonEnv.getInstance(session).carbonMetastore.tableExists(identifier)(session)) {
       val err = s"table $dbName.$tableName not found"
       LogServiceFactory.getLogService(this.getClass.getName).error(err)
-      throw new IllegalArgumentException(err)
+      throw new NoSuchTableException(database, tableName)
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
index 7bd0fad..a516c11 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonAnalysisRules.scala
@@ -26,6 +26,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules._
 import org.apache.spark.sql.execution.SparkSqlParser
 import org.apache.spark.sql.execution.command.mutation.ProjectForDeleteCommand
+import org.apache.spark.sql.util.CarbonException
 
 case class CarbonIUDAnalysisRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
 
@@ -55,7 +56,8 @@ case class CarbonIUDAnalysisRule(sparkSession: SparkSession) extends Rule[Logica
       case Project(projectList, child) if !includedDestColumns =>
         includedDestColumns = true
         if (projectList.size != columns.size) {
-          sys.error("Number of source and destination columns are not matching")
+          CarbonException.analysisException(
+            "The number of columns in source table and destination table columns mismatch")
         }
         val renamedProjectList = projectList.zip(columns).map{ case(attr, col) =>
           attr match {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
index c48e6e8..20bdb41 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonPreAggregateRules.scala
@@ -28,6 +28,7 @@ import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.types._
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.schema.table.{AggregationDataMapSchema, CarbonTable, DataMapSchema}
@@ -756,10 +757,8 @@ object CarbonPreInsertionCasts extends Rule[LogicalPlan] {
   : LogicalPlan = {
     if (relation.carbonRelation.output.size > CarbonCommonConstants
       .DEFAULT_MAX_NUMBER_OF_COLUMNS) {
-      sys
-        .error("Maximum supported column by carbon is:" + CarbonCommonConstants
-          .DEFAULT_MAX_NUMBER_OF_COLUMNS
-        )
+      CarbonException.analysisException("Maximum number of columns supported:" +
+        s"${CarbonCommonConstants.DEFAULT_MAX_NUMBER_OF_COLUMNS}")
     }
     val isAggregateTable = !relation.carbonRelation.carbonTable.getTableInfo
       .getParentRelationIdentifiers.isEmpty
@@ -786,7 +785,8 @@ object CarbonPreInsertionCasts extends Rule[LogicalPlan] {
       }
       InsertIntoCarbonTable(relation, p.partition, newChild, p.overwrite, p.ifNotExists)
     } else {
-      sys.error("Cannot insert into target table because column number are different")
+      CarbonException.analysisException(
+        "Cannot insert into target table because number of columns mismatch")
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
index 9187fe2..69b8d50 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala
@@ -26,6 +26,7 @@ import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
 import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
 import org.apache.spark.sql.types._
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.metadata.datatype.DataTypes
@@ -263,7 +264,8 @@ object CarbonMetastoreTypes extends RegexParsers {
   def toDataType(metastoreType: String): DataType = {
     parseAll(dataType, metastoreType) match {
       case Success(result, _) => result
-      case failure: NoSuccess => sys.error(s"Unsupported dataType: $metastoreType")
+      case _: NoSuccess =>
+        CarbonException.analysisException(s"Unsupported dataType: $metastoreType")
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index c8ea4ac..c18f4e3 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -20,7 +20,7 @@ package org.apache.spark.sql.parser
 import scala.collection.mutable
 import scala.language.implicitConversions
 
-import org.apache.spark.sql.{DeleteRecords, ShowLoadsCommand, UpdateTable}
+import org.apache.spark.sql.{AnalysisException, DeleteRecords, ShowLoadsCommand, UpdateTable}
 import org.apache.spark.sql.catalyst.{CarbonDDLSqlParser, TableIdentifier}
 import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
 import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
@@ -31,6 +31,7 @@ import org.apache.spark.sql.execution.command.management.{AlterTableCompactionCo
 import org.apache.spark.sql.execution.command.partition.{AlterTableDropCarbonPartitionCommand, AlterTableSplitCarbonPartitionCommand}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableDataTypeChangeCommand, CarbonAlterTableDropColumnCommand}
 import org.apache.spark.sql.types.StructField
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.spark.CarbonOption
@@ -58,7 +59,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           case logicalPlan => logicalPlan
         }
         case failureOrError =>
-          sys.error(failureOrError.toString)
+          CarbonException.analysisException(failureOrError.toString)
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
index 6a3bab4..8e53927 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParser.scala
@@ -18,7 +18,7 @@ package org.apache.spark.sql.parser
 
 import scala.collection.mutable
 
-import org.apache.spark.sql.{CarbonSession, SparkSession}
+import org.apache.spark.sql.{CarbonEnv, CarbonSession, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.parser.{AbstractSqlParser, ParseException, SqlBaseParser}
 import org.apache.spark.sql.catalyst.parser.ParserUtils._
@@ -28,6 +28,7 @@ import org.apache.spark.sql.execution.SparkSqlAstBuilder
 import org.apache.spark.sql.execution.command.{BucketFields, CarbonCreateTableCommand, PartitionerField, TableModel}
 import org.apache.spark.sql.internal.{SQLConf, VariableSubstitution}
 import org.apache.spark.sql.types.StructField
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.spark.CarbonOption
 import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
@@ -57,9 +58,12 @@ class CarbonSparkSqlParser(conf: SQLConf, sparkSession: SparkSession) extends Ab
           case mce: MalformedCarbonCommandException =>
             throw mce
           case e =>
-            sys
-              .error("\n" + "BaseSqlParser>>>> " + ex.getMessage + "\n" + "CarbonSqlParser>>>> " +
-                     e.getMessage)
+            CarbonException.analysisException(
+              s"""== Parse1 ==
+                 |${ex.getMessage}
+                 |== Parse2 ==
+                 |${e.getMessage}
+               """.stripMargin.trim)
         }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/main/scala/org/apache/spark/util/Compaction.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/Compaction.scala b/integration/spark2/src/main/scala/org/apache/spark/util/Compaction.scala
index 709f474..a3f6797 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/Compaction.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/Compaction.scala
@@ -19,9 +19,9 @@ package org.apache.spark.util
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.spark.sql.execution.command.AlterTableModel
 import org.apache.spark.sql.execution.command.management.AlterTableCompactionCommand
+import org.apache.spark.sql.util.CarbonException
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.processing.merger.CompactionType
 
 /**
  * table compaction api
@@ -42,7 +42,7 @@ object Compaction {
         "")).run(spark)
     }
     else {
-      sys.error("Compaction type is wrong. Please select minor or major.")
+      CarbonException.analysisException("Compaction type is wrong. Please select minor or major.")
     }
 
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
index c6a6708..f8cfa6b 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/booleantype/BooleanDataTypesInsertTest.scala
@@ -431,7 +431,7 @@ class BooleanDataTypesInsertTest extends QueryTest with BeforeAndAfterEach with
            """.stripMargin)
       sql("insert into boolean_table2 select * from boolean_table")
     }
-    assert(exception_insert.getMessage.contains("Cannot insert into target table because column number are different"))
+    assert(exception_insert.getMessage.contains("Cannot insert into target table because number of columns mismatch"))
   }
 
   test("Inserting into Hive table from carbon table: support boolean data type and other format") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/82277ddf/integration/spark2/src/test/scala/org/apache/spark/carbondata/CarbonDataSourceSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/CarbonDataSourceSuite.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/CarbonDataSourceSuite.scala
index d2571b7..cf465fb 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/CarbonDataSourceSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/CarbonDataSourceSuite.scala
@@ -19,9 +19,9 @@ package org.apache.spark.carbondata
 
 import scala.collection.mutable
 
+import org.apache.spark.sql.{AnalysisException, Row, SaveMode}
 import org.apache.spark.sql.common.util.Spark2QueryTest
 import org.apache.spark.sql.types._
-import org.apache.spark.sql.{Row, SaveMode}
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.core.util.CarbonProperties
@@ -227,7 +227,7 @@ class CarbonDataSourceSuite extends Spark2QueryTest with BeforeAndAfterAll {
 
   test("test create table without tableName in options") {
     sql("drop table if exists carbon_test")
-    val exception = intercept[RuntimeException] {
+    val exception = intercept[AnalysisException] {
       sql(
         s"""
            | CREATE TABLE carbon_test(
@@ -240,12 +240,12 @@ class CarbonDataSourceSuite extends Spark2QueryTest with BeforeAndAfterAll {
       )
     }.getMessage
     sql("drop table if exists carbon_test")
-    assert(exception.eq("Table creation failed. Table name is not specified"))
+    assert(exception.contains("Table creation failed. Table name is not specified"))
   }
 
   test("test create table with space in tableName") {
     sql("drop table if exists carbon_test")
-    val exception = intercept[RuntimeException] {
+    val exception = intercept[AnalysisException] {
       sql(
         s"""
            | CREATE TABLE carbon_test(
@@ -258,7 +258,6 @@ class CarbonDataSourceSuite extends Spark2QueryTest with BeforeAndAfterAll {
       )
     }.getMessage
     sql("drop table if exists carbon_test")
-    assert(exception.eq("Table creation failed. Table name cannot contain blank space"))
+    assert(exception.contains("Table creation failed. Table name cannot contain blank space"))
   }
-
 }