You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by xu...@apache.org on 2020/08/25 02:08:48 UTC

[carbondata] branch master updated: [CARBONDATA-3889] Enable scala check style

This is an automated email from the ASF dual-hosted git repository.

xubo245 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 9b92645  [CARBONDATA-3889] Enable scala check style
9b92645 is described below

commit 9b9264523c58a958eabde3b41f778374bd473fc2
Author: QiangCai <qi...@qq.com>
AuthorDate: Fri Aug 14 16:50:33 2020 +0800

    [CARBONDATA-3889] Enable scala check style
    
    Why is this PR needed?
    
    scala style checking is not working during compile and IntelliJ IDEA does not use the scala style config in the dev folder.
    Intellij IDEA sort import with different order
    What changes were proposed in this PR?
    
    move scalastyle-config.xml to the parent project folder.
    a) IntelliJ IDEA will show the style error during coding.
    b) Fix scala style issue which the scala style checking report during maven compile.
    change java-code-format-template.xml to sort import as Scalastyle.
    Does this PR introduce any user interface change?
    
    No
    Is any new testcase added?
    
    No
    
    This closes #3891
---
 dev/java-code-format-template.xml                  |   5 +
 .../carbondata/examples/HadoopFileExample.scala    |   2 +-
 .../index/CarbonMergeBloomIndexFilesRDD.scala      |   3 +-
 .../spark/load/DataLoadProcessBuilderOnSpark.scala |   2 +-
 .../spark/load/DataLoadProcessorStepOnSpark.scala  |   2 +-
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     |   4 +-
 .../spark/rdd/CarbonTableCompactor.scala           |   6 +-
 .../carbondata/spark/util/CarbonSparkUtil.scala    |   4 +-
 .../scala/org/apache/spark/sql/CarbonSession.scala |   5 +-
 .../scala/org/apache/spark/sql/CarbonSource.scala  |   2 +-
 .../catalyst/AbstractCarbonSparkSQLParser.scala    |   2 +-
 .../sql/execution/CastExpressionOptimization.scala |   8 +-
 .../command/cache/CarbonShowCacheCommand.scala     |   2 +-
 .../CarbonAlterTableCompactionCommand.scala        |   3 +-
 .../management/CarbonInsertIntoCommand.scala       |   6 +-
 .../command/table/CarbonCreateTableCommand.scala   |   3 +-
 .../datasources/SparkCarbonTableFormat.scala       |   2 +-
 .../sql/execution/strategy/CarbonPlanHelper.scala  |   2 +-
 .../spark/sql/execution/strategy/DDLHelper.scala   |   6 +-
 .../spark/sql/hive/CarbonSessionCatalog.scala      |  30 ++---
 .../spark/sql/hive/SqlAstBuilderHelper.scala       |   2 +-
 .../sql/listeners/DropCacheEventListeners.scala    |  30 ++---
 .../apache/spark/sql/listeners/MVListeners.scala   |  30 ++---
 .../sql/listeners/ShowCacheEventListener.scala     |  30 ++---
 .../apache/spark/sql/optimizer/CarbonFilters.scala |  11 +-
 .../parser/CarbonExtensionSpark2SqlParser.scala    |   7 --
 .../sql/secondaryindex/util/FileInternalUtil.scala |   2 +-
 .../spark/adapter/CarbonToSparkAdapter.scala       |   2 -
 .../apache/spark/sql/CarbonToSparkAdapter.scala    |   9 +-
 .../execution/strategy/CarbonDataSourceScan.scala  |   3 +-
 .../spark/sql/hive/CarbonSessionStateBuilder.scala |   8 +-
 .../sql/parser/SparkSqlAstBuilderWrapper.scala     |   4 +-
 .../carbondata/mv/plans/util/TableCluster.scala    |  15 ---
 .../mv/plans/modular/ExpressionHelper.scala        |   2 +-
 pom.xml                                            |   7 +-
 dev/scalastyle-config.xml => scalastyle-config.xml | 137 ++++++++++++++-------
 .../streaming/parser/RowStreamParserImp.scala      |   4 +-
 37 files changed, 209 insertions(+), 193 deletions(-)

diff --git a/dev/java-code-format-template.xml b/dev/java-code-format-template.xml
index b25dfb6..b07a706 100644
--- a/dev/java-code-format-template.xml
+++ b/dev/java-code-format-template.xml
@@ -63,6 +63,7 @@
         <option value="org.apache.carbondata" />
       </array>
     </option>
+    <option name="sortAsScalastyle" value="true" />
     <option name="METHOD_BRACE_FORCE" value="1" />
     <option name="NOT_CONTINUATION_INDENT_FOR_PARAMS" value="true" />
     <option name="USE_ALTERNATE_CONTINUATION_INDENT_FOR_PARAMS" value="true" />
@@ -114,6 +115,10 @@
     </indentOptions>
   </codeStyleSettings>
   <codeStyleSettings language="Scala">
+    <option name="KEEP_BLANK_LINES_IN_DECLARATIONS" value="1" />
+    <option name="KEEP_BLANK_LINES_IN_CODE" value="1" />
+    <option name="KEEP_BLANK_LINES_BEFORE_RBRACE" value="0" />
+    <option name="BLANK_LINES_BEFORE_PACKAGE" value="1" />
     <option name="ALIGN_MULTILINE_PARAMETERS" value="false" />
     <option name="ALIGN_MULTILINE_BINARY_OPERATION" value="true" />
     <option name="CALL_PARAMETERS_WRAP" value="5" />
diff --git a/examples/spark/src/main/scala/org/apache/carbondata/examples/HadoopFileExample.scala b/examples/spark/src/main/scala/org/apache/carbondata/examples/HadoopFileExample.scala
index bddc8ae..d0d1c8d 100644
--- a/examples/spark/src/main/scala/org/apache/carbondata/examples/HadoopFileExample.scala
+++ b/examples/spark/src/main/scala/org/apache/carbondata/examples/HadoopFileExample.scala
@@ -23,8 +23,8 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.spark.sql.{SaveMode, SparkSession}
 
 import org.apache.carbondata.examples.util.ExampleUtils
-import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 import org.apache.carbondata.hadoop.CarbonProjection
+import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 
 // scalastyle:off println
 object HadoopFileExample {
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
index 5ed3585..63bed65 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/index/CarbonMergeBloomIndexFilesRDD.scala
@@ -19,10 +19,9 @@ package org.apache.carbondata.index
 
 import scala.collection.JavaConverters._
 
-import org.apache.spark.Partition
+import org.apache.spark.{Partition, TaskContext}
 import org.apache.spark.rdd.CarbonMergeFilePartition
 import org.apache.spark.sql.SparkSession
-import org.apache.spark.TaskContext
 
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.util.path.CarbonTablePath
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
index 419e0b6..eb6c82c 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
@@ -29,8 +29,8 @@ import org.apache.spark.{CarbonInputMetrics, DataSkewRangePartitioner, TaskConte
 import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.rdd.RDD
 import org.apache.spark.sql.{DataFrame, SparkSession}
-import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
 import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
 import org.apache.spark.sql.execution.command.ExecutionErrors
 import org.apache.spark.sql.types.{ByteType, DateType, LongType, StringType, TimestampType}
 import org.apache.spark.sql.util.{SparkSQLUtil, SparkTypeConverter}
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
index 6851959..cfbf83c 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessorStepOnSpark.scala
@@ -21,11 +21,11 @@ import java.util
 
 import com.univocity.parsers.common.TextParsingException
 import org.apache.hadoop.conf.Configuration
+import org.apache.spark.TaskContext
 import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
-import org.apache.spark.TaskContext
 import org.apache.spark.util.LongAccumulator
 
 import org.apache.carbondata.common.logging.LogServiceFactory
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index 3f17f4e..0f0ac0b 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -22,15 +22,13 @@ import java.util
 import java.util.{Collections, List}
 import java.util.concurrent.atomic.AtomicInteger
 
-import scala.collection.mutable
 import scala.collection.JavaConverters._
+import scala.collection.mutable
 import scala.reflect.classTag
 
-import org.apache.hadoop.mapred.JobConf
 import org.apache.hadoop.mapreduce.{InputSplit, Job}
 import org.apache.spark._
 import org.apache.spark.broadcast.Broadcast
-import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.carbondata.execution.datasources.CarbonSparkDataSourceUtil
 import org.apache.spark.sql.catalyst.InternalRow
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
index bb1ca14..f6c1dd5 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonTableCompactor.scala
@@ -37,21 +37,21 @@ import org.apache.carbondata.core.constants.SortScopeOptions.SortScope
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.index.{IndexStoreManager, Segment}
 import org.apache.carbondata.core.locks.{CarbonLockFactory, ICarbonLock, LockUsage}
-import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.metadata.SegmentFileStore
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.segmentmeta.SegmentMetaDataInfo
 import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatusManager}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
 import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.events._
-import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 import org.apache.carbondata.hadoop.CarbonInputSplit
+import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 import org.apache.carbondata.indexserver.{DistributedRDDUtils, IndexServer}
 import org.apache.carbondata.processing.loading.FailureCauses
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 import org.apache.carbondata.processing.merger.{CarbonCompactionUtil, CarbonDataMergerUtil, CompactionType}
-import org.apache.carbondata.spark.load.DataLoadProcessBuilderOnSpark
 import org.apache.carbondata.spark.MergeResultImpl
+import org.apache.carbondata.spark.load.DataLoadProcessBuilderOnSpark
 import org.apache.carbondata.spark.util.CarbonSparkUtil
 import org.apache.carbondata.view.MVManagerInSpark
 
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
index efb10de..d3607c1 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
@@ -24,10 +24,10 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.s3a.Constants.{ACCESS_KEY, ENDPOINT, SECRET_KEY}
 import org.apache.hadoop.mapred.JobConf
 import org.apache.hadoop.mapreduce.Job
-import org.apache.spark.sql.hive.CarbonRelation
-import org.apache.spark.sql.types.{ArrayType, DataType, DataTypes, FloatType, MapType, StructField, StructType}
 import org.apache.spark.SparkConf
 import org.apache.spark.deploy.SparkHadoopUtil
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.sql.types._
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.impl.FileFactory
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
index 4700582..475152b 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSession.scala
@@ -40,11 +40,8 @@ import org.apache.carbondata.streaming.CarbonStreamingQueryListener
  * Session implementation for {org.apache.spark.sql.SparkSession}
  * Implemented this class only to use our own SQL DDL commands.
  * User needs to use {CarbonSession.getOrCreateCarbon} to create Carbon session.
- *
- * @deprecated Since 2.0, only use for backward compatibility,
- *             please switch to use {@link CarbonExtensions}.
  */
-@Deprecated
+@deprecated("only use for backward compatibility, please switch to use CarbonExtensions", "2.0")
 class CarbonSession(@transient val sc: SparkContext,
     @transient private val existingSharedState: Option[SharedState],
     @transient private val useHiveMetaStore: Boolean = true
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSource.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSource.scala
index aab1a8c..90c7339 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSource.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSource.scala
@@ -40,8 +40,8 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry
 import org.apache.carbondata.core.metadata.schema.table.TableInfo
-import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.core.util.CarbonUtil
+import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.spark.CarbonOption
 import org.apache.carbondata.spark.util.{CarbonScalaUtil, CarbonSparkUtil}
 import org.apache.carbondata.streaming.{CarbonStreamException, CarbonStreamingQueryListener, StreamSinkFactory}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/AbstractCarbonSparkSQLParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/AbstractCarbonSparkSQLParser.scala
index 2dc2d4e..bdbb901 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/AbstractCarbonSparkSQLParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/catalyst/AbstractCarbonSparkSQLParser.scala
@@ -18,9 +18,9 @@
 package org.apache.spark.sql.catalyst
 
 import scala.language.implicitConversions
+import scala.util.parsing.combinator.PackratParsers
 import scala.util.parsing.combinator.lexical.StdLexical
 import scala.util.parsing.combinator.syntactical.StandardTokenParsers
-import scala.util.parsing.combinator.PackratParsers
 import scala.util.parsing.input.CharArrayReader.EofCh
 
 import org.apache.spark.sql.catalyst.plans.logical._
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
index 0b22a16..6b125ee 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/CastExpressionOptimization.scala
@@ -23,14 +23,14 @@ import java.util.{Locale, TimeZone}
 
 import scala.collection.JavaConverters._
 
-import org.apache.spark.sql.catalyst.expressions.{Attribute, EmptyRow, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual, Literal, Not}
+import org.apache.spark.sql.CarbonExpressions.{MatchCast => Cast}
 import org.apache.spark.sql.CastExpr
 import org.apache.spark.sql.FalseExpr
-import org.apache.spark.sql.sources
-import org.apache.spark.sql.types._
-import org.apache.spark.sql.CarbonExpressions.{MatchCast => Cast}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, EmptyRow, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, In, LessThan, LessThanOrEqual, Literal, Not}
 import org.apache.spark.sql.catalyst.util.DateTimeUtils
+import org.apache.spark.sql.sources
 import org.apache.spark.sql.sources.Filter
+import org.apache.spark.sql.types._
 import org.apache.spark.unsafe.types.UTF8String
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index a08b245..0fd0742 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -17,8 +17,8 @@
 
 package org.apache.spark.sql.execution.command.cache
 
-import scala.collection.mutable
 import scala.collection.JavaConverters._
+import scala.collection.mutable
 
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
 import org.apache.spark.sql.AnalysisException
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
index f81e8d0..c24c922 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
@@ -23,8 +23,8 @@ import java.util
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row, SparkSession, SQLContext}
-import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 import org.apache.spark.sql.execution.command.{AlterTableModel, AtomicRunnableCommand, CompactionModel}
 import org.apache.spark.sql.hive.CarbonRelation
 import org.apache.spark.sql.optimizer.CarbonFilters
@@ -44,7 +44,6 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.{SegmentStatusManager, SegmentUpdateStatusManager}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataLoadMetrics}
 import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.core.view.{MVSchema, MVStatus}
 import org.apache.carbondata.events._
 import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
 import org.apache.carbondata.processing.merger.{CarbonDataMergerUtil, CompactionType}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
index a87042a..6a655e3 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonInsertIntoCommand.scala
@@ -39,12 +39,12 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.indexstore.PartitionSpec
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo, TableSchema}
-import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, ColumnSchema}
-import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
+import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
+import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatus}
 import org.apache.carbondata.core.util.{CarbonProperties, DataTypeUtil, ThreadLocalSessionInfo}
 import org.apache.carbondata.core.util.path.CarbonTablePath
-import org.apache.carbondata.events.exception.PreEventException
 import org.apache.carbondata.events.OperationContext
+import org.apache.carbondata.events.exception.PreEventException
 import org.apache.carbondata.processing.loading.TableProcessingOperations
 import org.apache.carbondata.processing.loading.exception.NoRetryException
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
index bffca20..f14c371 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
@@ -20,8 +20,8 @@ package org.apache.spark.sql.execution.command.table
 import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession, _}
-import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
 import org.apache.spark.sql.execution.command.MetadataCommand
 import org.apache.spark.sql.parser.CarbonSparkSqlParserUtil
 import org.apache.spark.util.SparkUtil
@@ -29,7 +29,6 @@ import org.apache.spark.util.SparkUtil
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
-import org.apache.carbondata.core.metadata.encoder.Encoding
 import org.apache.carbondata.core.metadata.schema.partition.PartitionType
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
 import org.apache.carbondata.core.util.ThreadLocalSessionInfo
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
index 02ff8e9..4a06bce 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/datasources/SparkCarbonTableFormat.scala
@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.{FileStatus, Path}
 import org.apache.hadoop.io.NullWritable
 import org.apache.hadoop.mapreduce.{Job, JobContext, TaskAttemptContext}
+import org.apache.spark.TaskContext
 import org.apache.spark.internal.Logging
 import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
@@ -37,7 +38,6 @@ import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils
 import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.sources.DataSourceRegister
 import org.apache.spark.sql.types._
-import org.apache.spark.TaskContext
 
 import org.apache.carbondata.common.Maps
 import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
index 3712050..dd335fc 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/CarbonPlanHelper.scala
@@ -22,10 +22,10 @@ import org.apache.spark.sql.{CarbonEnv, InsertIntoCarbonTable, SparkSession}
 import org.apache.spark.sql.carbondata.execution.datasources.CarbonSparkDataSourceUtil
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+import org.apache.spark.sql.execution.SparkPlan
 import org.apache.spark.sql.execution.command.{ExecutedCommandExec, RunnableCommand}
 import org.apache.spark.sql.execution.command.management.{CarbonAlterTableCompactionCommand, CarbonInsertIntoCommand}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableColRenameDataTypeChangeCommand, CarbonAlterTableDropColumnCommand}
-import org.apache.spark.sql.execution.SparkPlan
 import org.apache.spark.sql.types.StructField
 import org.apache.spark.util.{CarbonReflectionUtils, SparkUtil}
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
index bd9b225..35f799c 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/execution/strategy/DDLHelper.scala
@@ -21,13 +21,13 @@ import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.{CarbonParserUtil, TableIdentifier}
 import org.apache.spark.sql.catalyst.analysis.{NoSuchDatabaseException, UnresolvedRelation}
 import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogUtils}
-import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan, Union}
-import org.apache.spark.sql.execution.command.{AlterTableAddPartitionCommand, AlterTableChangeColumnCommand, AlterTableDropPartitionCommand, AlterTableUnsetPropertiesCommand, DescribeTableCommand, ShowPartitionsCommand, _}
-import org.apache.spark.sql.execution.command.table._
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.SparkPlan
+import org.apache.spark.sql.execution.command.{AlterTableAddPartitionCommand, AlterTableChangeColumnCommand, AlterTableDropPartitionCommand, AlterTableUnsetPropertiesCommand, DescribeTableCommand, ShowPartitionsCommand, _}
 import org.apache.spark.sql.execution.command.management.RefreshCarbonTableCommand
 import org.apache.spark.sql.execution.command.partition.{CarbonAlterTableAddHivePartitionCommand, CarbonAlterTableDropHivePartitionCommand}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableColRenameDataTypeChangeCommand, CarbonAlterTableRenameCommand, CarbonAlterTableSetCommand, CarbonAlterTableUnsetCommand}
+import org.apache.spark.sql.execution.command.table._
 import org.apache.spark.sql.execution.datasources.{LogicalRelation, RefreshResource, RefreshTable}
 import org.apache.spark.sql.hive.execution.CreateHiveTableAsSelectCommand
 import org.apache.spark.sql.parser.{CarbonSpark2SqlParser, CarbonSparkSqlParserUtil}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
index 99ceebf..7351b5f 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonSessionCatalog.scala
@@ -1,19 +1,19 @@
 /*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 package org.apache.spark.sql.hive
 
 import org.apache.hadoop.fs.Path
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
index 42a8534..dae6907 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/SqlAstBuilderHelper.scala
@@ -17,11 +17,11 @@
 
 package org.apache.spark.sql.hive
 
+import org.apache.spark.sql.catalyst.CarbonParserUtil
 import org.apache.spark.sql.catalyst.parser.ParserUtils.{string, withOrigin}
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser.{AddTableColumnsContext, ChangeColumnContext, CreateTableContext, ShowTablesContext}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-import org.apache.spark.sql.catalyst.CarbonParserUtil
 import org.apache.spark.sql.execution.SparkSqlAstBuilder
 import org.apache.spark.sql.execution.command.{AlterTableAddColumnsModel, AlterTableDataTypeChangeModel}
 import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableColRenameDataTypeChangeCommand}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/DropCacheEventListeners.scala b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/DropCacheEventListeners.scala
index 3a3d5b2..4e9042e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/DropCacheEventListeners.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/DropCacheEventListeners.scala
@@ -1,19 +1,19 @@
 /*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 package org.apache.spark.sql.listeners
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
index c4d7add..26036f7 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/MVListeners.scala
@@ -1,19 +1,19 @@
 /*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 package org.apache.spark.sql.listeners
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/ShowCacheEventListener.scala b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/ShowCacheEventListener.scala
index dabdd4e..7477277 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/listeners/ShowCacheEventListener.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/listeners/ShowCacheEventListener.scala
@@ -1,19 +1,19 @@
 /*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*    http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
 
 package org.apache.spark.sql.listeners
 
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index 9f6f6c7..cf9ca42 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -23,24 +23,21 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable
 import scala.util.Try
 
-import org.apache.spark.sql._
-import org.apache.spark.sql.catalyst.expressions._
-import org.apache.spark.sql.types._
-import org.apache.spark.sql.CarbonContainsWith
-import org.apache.spark.sql.CarbonEndsWith
+import org.apache.spark.sql.{CarbonContainsWith, CarbonEndsWith, _}
 import org.apache.spark.sql.carbondata.execution.datasources.CarbonSparkDataSourceUtil
 import org.apache.spark.sql.catalyst.TableIdentifier
+import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.hive.{CarbonHiveIndexMetadataUtil, CarbonSessionCatalogUtil}
+import org.apache.spark.sql.types._
 import org.apache.spark.util.CarbonReflectionUtils
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.indexstore.PartitionSpec
 import org.apache.carbondata.core.metadata.datatype.{DataTypes => CarbonDataTypes}
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
-import org.apache.carbondata.core.scan.expression.{ColumnExpression => CarbonColumnExpression, Expression => CarbonExpression, LiteralExpression => CarbonLiteralExpression}
+import org.apache.carbondata.core.scan.expression.{ColumnExpression => CarbonColumnExpression, Expression => CarbonExpression, LiteralExpression => CarbonLiteralExpression, MatchExpression}
 import org.apache.carbondata.core.scan.expression.conditional._
 import org.apache.carbondata.core.scan.expression.logical.{AndExpression, FalseExpression, OrExpression}
-import org.apache.carbondata.core.scan.expression.MatchExpression
 import org.apache.carbondata.core.scan.filter.intf.ExpressionType
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.geo.{GeoUtils, InPolygon}
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
index b1a529b..6630625 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/parser/CarbonExtensionSpark2SqlParser.scala
@@ -20,13 +20,6 @@ package org.apache.spark.sql.parser
 import scala.language.implicitConversions
 
 import org.apache.spark.sql.catalyst.plans.logical._
-import org.apache.spark.sql.catalyst.CarbonParserUtil
-import org.apache.spark.sql.execution.command._
-import org.apache.spark.sql.execution.command.management.CarbonLoadDataCommand
-import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableColRenameDataTypeChangeCommand, CarbonAlterTableDropColumnCommand}
-
-import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
-import org.apache.carbondata.core.constants.CarbonCommonConstants
 
 /**
  * Parser for All Carbon DDL, DML cases in Unified context
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/FileInternalUtil.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/FileInternalUtil.scala
index 46c318e..1a849b6 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/FileInternalUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/util/FileInternalUtil.scala
@@ -19,9 +19,9 @@ package org.apache.spark.sql.secondaryindex.util
 
 import scala.collection.JavaConverters._
 
-import org.apache.spark.sql.secondaryindex.load.CarbonInternalLoaderUtil
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.index.CarbonIndexUtil
+import org.apache.spark.sql.secondaryindex.load.CarbonInternalLoaderUtil
 
 import org.apache.carbondata.core.metadata.SegmentFileStore
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
diff --git a/integration/spark/src/main/spark2.4/org/apache/carbondata/spark/adapter/CarbonToSparkAdapter.scala b/integration/spark/src/main/spark2.4/org/apache/carbondata/spark/adapter/CarbonToSparkAdapter.scala
index dc5c14b..60f55ba 100644
--- a/integration/spark/src/main/spark2.4/org/apache/carbondata/spark/adapter/CarbonToSparkAdapter.scala
+++ b/integration/spark/src/main/spark2.4/org/apache/carbondata/spark/adapter/CarbonToSparkAdapter.scala
@@ -19,9 +19,7 @@ package org.apache.carbondata.spark.adapter
 
 import scala.collection.mutable.ArrayBuffer
 
-import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, ExprId, Expression, NamedExpression}
 import org.apache.spark.sql.execution.datasources.{FilePartition, PartitionedFile}
-import org.apache.spark.sql.types.{DataType, Metadata}
 
 object CarbonToSparkAdapter {
   def createFilePartition(index: Int, files: ArrayBuffer[PartitionedFile]): FilePartition = {
diff --git a/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala b/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
index ec1b976..a7a4bad 100644
--- a/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
+++ b/integration/spark/src/main/spark2.4/org/apache/spark/sql/CarbonToSparkAdapter.scala
@@ -48,7 +48,7 @@ object CarbonToSparkAdapter {
     })
   }
 
-  def addSparkListener(sparkContext: SparkContext) = {
+  def addSparkListener(sparkContext: SparkContext): Unit = {
     sparkContext.addSparkListener(new SparkListener {
       override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
         SparkSession.setDefaultSession(null)
@@ -94,11 +94,11 @@ object CarbonToSparkAdapter {
       qualifier = newSubsume.split("\n").map(_.trim))
   }
 
-  def createScalaUDF(s: ScalaUDF, reference: AttributeReference) = {
+  def createScalaUDF(s: ScalaUDF, reference: AttributeReference): ScalaUDF = {
     ScalaUDF(s.function, s.dataType, Seq(reference), s.inputsNullSafe, s.inputTypes)
   }
 
-  def createExprCode(code: String, isNull: String, value: String, dataType: DataType) = {
+  def createExprCode(code: String, isNull: String, value: String, dataType: DataType): ExprCode = {
     ExprCode(
       code"$code",
       JavaCode.isNullVariable(isNull),
@@ -181,11 +181,12 @@ object CarbonToSparkAdapter {
     subQueryAlias.child.output.map(_.withQualifier(newAlias))
   }
 
-  def getHiveExternalCatalog(sparkSession: SparkSession) =
+  def getHiveExternalCatalog(sparkSession: SparkSession): HiveExternalCatalog = {
     sparkSession.sessionState.catalog.externalCatalog
       .asInstanceOf[ExternalCatalogWithListener]
       .unwrapped
       .asInstanceOf[HiveExternalCatalog]
+  }
 }
 
 class CarbonOptimizer(
diff --git a/integration/spark/src/main/spark2.4/org/apache/spark/sql/execution/strategy/CarbonDataSourceScan.scala b/integration/spark/src/main/spark2.4/org/apache/spark/sql/execution/strategy/CarbonDataSourceScan.scala
index 60ee7ea..7627b0b 100644
--- a/integration/spark/src/main/spark2.4/org/apache/spark/sql/execution/strategy/CarbonDataSourceScan.scala
+++ b/integration/spark/src/main/spark2.4/org/apache/spark/sql/execution/strategy/CarbonDataSourceScan.scala
@@ -14,12 +14,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+
 package org.apache.spark.sql.execution.strategy
 
 import org.apache.spark.rdd.RDD
+import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
 import org.apache.spark.sql.catalyst.expressions.{Attribute, SortOrder}
 import org.apache.spark.sql.catalyst.plans.physical.Partitioning
-import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
 import org.apache.spark.sql.execution.FileSourceScanExec
 import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
 
diff --git a/integration/spark/src/main/spark2.4/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala b/integration/spark/src/main/spark2.4/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
index b3666d1..92c65bb 100644
--- a/integration/spark/src/main/spark2.4/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
+++ b/integration/spark/src/main/spark2.4/org/apache/spark/sql/hive/CarbonSessionStateBuilder.scala
@@ -21,13 +21,13 @@ import java.util.concurrent.Callable
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
-import org.apache.spark.sql.catalyst.expressions.Expression
-import org.apache.spark.sql.catalyst.{QualifiedTableName, TableIdentifier}
-import org.apache.spark.sql.catalyst.parser.ParserInterface
-import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
+import org.apache.spark.sql.catalyst.{QualifiedTableName, TableIdentifier}
 import org.apache.spark.sql.catalyst.analysis.{Analyzer, FunctionRegistry}
 import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTablePartition, ExternalCatalogWithListener, FunctionResourceLoader, GlobalTempViewManager}
+import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.catalyst.parser.ParserInterface
+import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.strategy.{CarbonLateDecodeStrategy, DDLStrategy, StreamingTableStrategy}
 import org.apache.spark.sql.hive.client.HiveClient
 import org.apache.spark.sql.internal.{SessionState, SQLConf}
diff --git a/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala b/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
index 0e56e00..5c4de43 100644
--- a/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
+++ b/integration/spark/src/main/spark2.4/org/apache/spark/sql/parser/SparkSqlAstBuilderWrapper.scala
@@ -24,8 +24,8 @@ import org.apache.spark.sql.internal.SQLConf
 /**
  * use this wrapper to adapter multiple spark versions
  */
-class SparkSqlAstBuilderWrapper(conf: SQLConf)
+abstract class SparkSqlAstBuilderWrapper(conf: SQLConf)
   extends SparkSqlAstBuilder(conf) {
 
-  def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String] = ???
+  def visitPropertyKeyValues(ctx: TablePropertyListContext): Map[String, String]
 }
diff --git a/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala b/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
index 021518c..1bf4619 100644
--- a/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
+++ b/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/util/TableCluster.scala
@@ -23,33 +23,18 @@ import com.google.common.base.Objects
 class TableCluster @JsonCreator()(@JsonProperty("fact") @JsonRawValue fact: Set[String],
     @JsonProperty("dimension") @JsonRawValue dimension: Set[String]) {
 
-  //  @JsonProperty
   def getFact(): Set[String] = {
     fact
   }
 
-  //
-  //  @JsonProperty
   def getDimension(): Set[String] = {
     dimension
   }
 
-  @Override
   override def toString: String = {
     Objects.toStringHelper(this)
       .add("fact", fact)
       .add("dimension", dimension)
       .toString
   }
-
-  /*
-  @Override
-  def toString = {
-    MoreObjects.toStringHelper(this)
-    .add("fact", fact)
-    .add("dimension", dimension)
-    .toString
-  }
-  *
-  */
 }
diff --git a/mv/plan/src/main/spark2.4/org/apache/carbondata/mv/plans/modular/ExpressionHelper.scala b/mv/plan/src/main/spark2.4/org/apache/carbondata/mv/plans/modular/ExpressionHelper.scala
index 4de9042..d1c9d8a 100644
--- a/mv/plan/src/main/spark2.4/org/apache/carbondata/mv/plans/modular/ExpressionHelper.scala
+++ b/mv/plan/src/main/spark2.4/org/apache/carbondata/mv/plans/modular/ExpressionHelper.scala
@@ -17,7 +17,7 @@
 
 package org.apache.carbondata.mv.plans.modular
 
-import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, ExprId, Expression, NamedExpression}
+import org.apache.spark.sql.catalyst.expressions.{Alias, AttributeReference, Expression, ExprId, NamedExpression}
 import org.apache.spark.sql.types.{DataType, Metadata}
 
 object ExpressionHelper {
diff --git a/pom.xml b/pom.xml
index 3c2e73a..25d426d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -416,7 +416,7 @@
       <plugin>
         <groupId>org.scalastyle</groupId>
         <artifactId>scalastyle-maven-plugin</artifactId>
-        <version>0.8.0</version>
+        <version>1.0.0</version>
         <executions>
           <execution>
             <goals>
@@ -431,9 +431,10 @@
           <failOnWarning>false</failOnWarning>
           <sourceDirectory>${basedir}/src/main/scala</sourceDirectory>
           <testSourceDirectory>${basedir}/src/test/scala</testSourceDirectory>
-          <configLocation>${dev.path}/scalastyle-config.xml</configLocation>
+          <configLocation>scalastyle-config.xml</configLocation>
           <outputFile>${basedir}/target/scalastyle-output.xml</outputFile>
-          <outputEncoding>${project.build.sourceEncoding}</outputEncoding>
+          <inputEncoding>${project.build.sourceEncoding}</inputEncoding>
+          <outputEncoding>${project.reporting.outputEncoding}</outputEncoding>
         </configuration>
       </plugin>
       <plugin>
diff --git a/dev/scalastyle-config.xml b/scalastyle-config.xml
similarity index 81%
rename from dev/scalastyle-config.xml
rename to scalastyle-config.xml
index f8808f3..0467b48 100644
--- a/dev/scalastyle-config.xml
+++ b/scalastyle-config.xml
@@ -44,7 +44,7 @@ This file is divided into 3 sections:
  <!--                               rules we enforce                                   -->
  <!-- ================================================================================ -->
 
- <check level="error" class="org.scalastyle.file.FileTabChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.file.FileTabChecker" enabled="true"/>
 
  <check level="error" class="org.scalastyle.file.HeaderMatchesChecker" enabled="true">
   <parameters>
@@ -67,11 +67,11 @@ This file is divided into 3 sections:
   </parameters>
  </check>
 
- <check level="error" class="org.scalastyle.scalariform.SpacesAfterPlusChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.SpacesAfterPlusChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.scalariform.SpacesBeforePlusChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.SpacesBeforePlusChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.file.WhitespaceEndOfLineChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.file.WhitespaceEndOfLineChecker" enabled="true"/>
 
  <check level="error" class="org.scalastyle.file.FileLineLengthChecker" enabled="true">
   <parameters>
@@ -82,28 +82,36 @@ This file is divided into 3 sections:
  </check>
 
  <check level="error" class="org.scalastyle.scalariform.ClassNamesChecker" enabled="true">
-  <parameters><parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter></parameters>
+  <parameters>
+   <parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter>
+  </parameters>
  </check>
 
  <check level="error" class="org.scalastyle.scalariform.ObjectNamesChecker" enabled="true">
-  <parameters><parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter></parameters>
+  <parameters>
+   <parameter name="regex"><![CDATA[[A-Z][A-Za-z]*]]></parameter>
+  </parameters>
  </check>
 
  <check level="error" class="org.scalastyle.scalariform.PackageObjectNamesChecker" enabled="true">
-  <parameters><parameter name="regex"><![CDATA[^[a-z][A-Za-z]*$]]></parameter></parameters>
+  <parameters>
+   <parameter name="regex"><![CDATA[^[a-z][A-Za-z]*$]]></parameter>
+  </parameters>
  </check>
 
  <check level="error" class="org.scalastyle.scalariform.ParameterNumberChecker" enabled="true">
-  <parameters><parameter name="maxParameters"><![CDATA[10]]></parameter></parameters>
+  <parameters>
+   <parameter name="maxParameters"><![CDATA[10]]></parameter>
+  </parameters>
  </check>
 
- <check level="error" class="org.scalastyle.scalariform.NoFinalizeChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.NoFinalizeChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.scalariform.CovariantEqualsChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.CovariantEqualsChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.scalariform.StructuralTypeChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.StructuralTypeChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.scalariform.UppercaseLChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.UppercaseLChecker" enabled="true"/>
 
  <check level="error" class="org.scalastyle.scalariform.IfBraceChecker" enabled="true">
   <parameters>
@@ -112,13 +120,13 @@ This file is divided into 3 sections:
   </parameters>
  </check>
 
- <check level="error" class="org.scalastyle.scalariform.PublicMethodsHaveTypeChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.PublicMethodsHaveTypeChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.file.NewLineAtEofChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.file.NewLineAtEofChecker" enabled="true"/>
 
- <check customId="nonascii" level="error" class="org.scalastyle.scalariform.NonASCIICharacterChecker" enabled="true"></check>
+ <check customId="nonascii" level="error" class="org.scalastyle.scalariform.NonASCIICharacterChecker" enabled="true"/>
 
- <check level="error" class="org.scalastyle.scalariform.SpaceAfterCommentStartChecker" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.SpaceAfterCommentStartChecker" enabled="true"/>
 
  <check level="error" class="org.scalastyle.scalariform.EnsureSingleSpaceBeforeTokenChecker" enabled="true">
   <parameters>
@@ -133,11 +141,12 @@ This file is divided into 3 sections:
  </check>
 
  <!-- ??? usually shouldn't be checked into the code base. -->
- <check level="error" class="org.scalastyle.scalariform.NotImplementedErrorUsage" enabled="true"></check>
+ <check level="error" class="org.scalastyle.scalariform.NotImplementedErrorUsage" enabled="true"/>
 
- <!-- As of SPARK-7977 all printlns need to be wrapped in '// scalastyle:off/on println' -->
  <check customId="println" level="error" class="org.scalastyle.scalariform.TokenChecker" enabled="true">
-  <parameters><parameter name="regex">^println$</parameter></parameters>
+  <parameters>
+   <parameter name="regex">^println$</parameter>
+  </parameters>
   <customMessage><![CDATA[Are you sure you want to println? If yes, wrap the code block with
       // scalastyle:off println
       println(...)
@@ -145,14 +154,18 @@ This file is divided into 3 sections:
  </check>
 
  <check customId="visiblefortesting" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">@VisibleForTesting</parameter></parameters>
+  <parameters>
+   <parameter name="regex">@VisibleForTesting</parameter>
+  </parameters>
   <customMessage><![CDATA[
-      @VisibleForTesting causes classpath issues. Please note this in the java doc instead (SPARK-11615).
+      @VisibleForTesting causes classpath issues. Please note this in the java doc instead.
     ]]></customMessage>
  </check>
 
  <check customId="runtimeaddshutdownhook" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">Runtime\.getRuntime\.addShutdownHook</parameter></parameters>
+  <parameters>
+   <parameter name="regex">Runtime\.getRuntime\.addShutdownHook</parameter>
+  </parameters>
   <customMessage><![CDATA[
       Are you sure that you want to use Runtime.getRuntime.addShutdownHook? In most cases, you should use
       ShutdownHookManager.addShutdownHook instead.
@@ -164,7 +177,9 @@ This file is divided into 3 sections:
  </check>
 
  <check customId="mutablesynchronizedbuffer" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">mutable\.SynchronizedBuffer</parameter></parameters>
+  <parameters>
+   <parameter name="regex">mutable\.SynchronizedBuffer</parameter>
+  </parameters>
   <customMessage><![CDATA[
       Are you sure that you want to use mutable.SynchronizedBuffer? In most cases, you should use
       java.util.concurrent.ConcurrentLinkedQueue instead.
@@ -176,7 +191,9 @@ This file is divided into 3 sections:
  </check>
 
  <check customId="classforname" level="error" class="org.scalastyle.file.RegexChecker" enabled="false">
-  <parameters><parameter name="regex">Class\.forName</parameter></parameters>
+  <parameters>
+   <parameter name="regex">Class\.forName</parameter>
+  </parameters>
   <customMessage><![CDATA[
       Are you sure that you want to use Class.forName? In most cases, you should use Utils.classForName instead.
       If you must use Class.forName, wrap the code block with
@@ -187,7 +204,9 @@ This file is divided into 3 sections:
  </check>
 
  <check customId="awaitresult" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">Await\.result</parameter></parameters>
+  <parameters>
+   <parameter name="regex">Await\.result</parameter>
+  </parameters>
   <customMessage><![CDATA[
       Are you sure that you want to use Await.result? In most cases, you should use ThreadUtils.awaitResult instead.
       If you must use Await.result, wrap the code block with
@@ -197,11 +216,13 @@ This file is divided into 3 sections:
     ]]></customMessage>
  </check>
 
- <!-- As of SPARK-9613 JavaConversions should be replaced with JavaConverters -->
  <check customId="javaconversions" level="error" class="org.scalastyle.scalariform.TokenChecker" enabled="true">
-  <parameters><parameter name="regex">JavaConversions</parameter></parameters>
+  <parameters>
+   <parameter name="regex">JavaConversions</parameter>
+  </parameters>
   <customMessage>Instead of importing implicits in scala.collection.JavaConversions._, import
-   scala.collection.JavaConverters._ and use .asScala / .asJava methods</customMessage>
+   scala.collection.JavaConverters._ and use .asScala / .asJava methods
+  </customMessage>
  </check>
 
  <check level="error" class="org.scalastyle.scalariform.ImportOrderChecker" enabled="true">
@@ -220,32 +241,41 @@ This file is divided into 3 sections:
   </parameters>
  </check>
 
- <!-- SPARK-3854: Single Space between ')' and '{' -->
  <check customId="SingleSpaceBetweenRParenAndLCurlyBrace" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">\)\{</parameter></parameters>
+  <parameters>
+   <parameter name="regex">\)\{</parameter>
+  </parameters>
   <customMessage><![CDATA[
       Single Space between ')' and `{`.
     ]]></customMessage>
  </check>
 
  <check customId="NoScalaDoc" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">(?m)^(\s*)/[*][*].*$(\r|)\n^\1  [*]</parameter></parameters>
+  <parameters>
+   <parameter name="regex">(?m)^(\s*)/[*][*].*$(\r|)\n^\1  [*]</parameter>
+  </parameters>
   <customMessage>Use Javadoc style indentation for multiline comments</customMessage>
  </check>
 
  <check customId="OmitBracesInCase" level="error" class="org.scalastyle.file.RegexChecker" enabled="true">
-  <parameters><parameter name="regex">case[^\n>]*=>\s*\{</parameter></parameters>
+  <parameters>
+   <parameter name="regex">case[^\n>]*=>\s*\{</parameter>
+  </parameters>
   <customMessage>Omit braces in case clauses.</customMessage>
  </check>
 
+ <check level="error" class="org.scalastyle.scalariform.OverrideJavaChecker" enabled="true"/>
+
+ <check level="error" class="org.scalastyle.scalariform.DeprecatedJavaChecker" enabled="true"/>
+
  <!-- ================================================================================ -->
  <!--       rules we'd like to enforce, but haven't cleaned up the codebase yet        -->
  <!-- ================================================================================ -->
 
  <!-- We cannot turn the following two on, because it'd fail a lot of string interpolation use cases. -->
  <!-- Ideally the following two rules should be configurable to rule out string interpolation. -->
- <check level="error" class="org.scalastyle.scalariform.NoWhitespaceBeforeLeftBracketChecker" enabled="false"></check>
- <check level="error" class="org.scalastyle.scalariform.NoWhitespaceAfterLeftBracketChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.scalariform.NoWhitespaceBeforeLeftBracketChecker" enabled="false"/>
+ <check level="error" class="org.scalastyle.scalariform.NoWhitespaceAfterLeftBracketChecker" enabled="false"/>
 
  <!-- This breaks symbolic method names so we don't turn it on. -->
  <!-- Maybe we should update it to allow basic symbolic names, and then we are good to go. -->
@@ -263,53 +293,66 @@ This file is divided into 3 sections:
  <!-- ================================================================================ -->
 
  <check level="error" class="org.scalastyle.scalariform.IllegalImportsChecker" enabled="false">
-  <parameters><parameter name="illegalImports"><![CDATA[sun._,java.awt._]]></parameter></parameters>
+  <parameters>
+   <parameter name="illegalImports"><![CDATA[sun._,java.awt._]]></parameter>
+  </parameters>
  </check>
 
  <!-- We want the opposite of this: NewLineAtEofChecker -->
- <check level="error" class="org.scalastyle.file.NoNewLineAtEofChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.file.NoNewLineAtEofChecker" enabled="false"/>
 
  <!-- This one complains about all kinds of random things. Disable. -->
- <check level="error" class="org.scalastyle.scalariform.SimplifyBooleanExpressionChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.scalariform.SimplifyBooleanExpressionChecker" enabled="false"/>
 
  <!-- We use return quite a bit for control flows and guards -->
- <check level="error" class="org.scalastyle.scalariform.ReturnChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.scalariform.ReturnChecker" enabled="false"/>
 
  <!-- We use null a lot in low level code and to interface with 3rd party code -->
- <check level="error" class="org.scalastyle.scalariform.NullChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.scalariform.NullChecker" enabled="false"/>
 
  <!-- Doesn't seem super big deal here ... -->
- <check level="error" class="org.scalastyle.scalariform.NoCloneChecker" enabled="false"></check>
+ <check level="error" class="org.scalastyle.scalariform.NoCloneChecker" enabled="false"/>
 
  <!-- Doesn't seem super big deal here ... -->
  <check level="error" class="org.scalastyle.file.FileLengthChecker" enabled="false">
-  <parameters><parameter name="maxFileLength">800></parameter></parameters>
+  <parameters>
+   <parameter name="maxFileLength">800></parameter>
+  </parameters>
  </check>
 
  <!-- Doesn't seem super big deal here ... -->
  <check level="error" class="org.scalastyle.scalariform.NumberOfTypesChecker" enabled="false">
-  <parameters><parameter name="maxTypes">30</parameter></parameters>
+  <parameters>
+   <parameter name="maxTypes">30</parameter>
+  </parameters>
  </check>
 
  <!-- Doesn't seem super big deal here ... -->
  <check level="error" class="org.scalastyle.scalariform.CyclomaticComplexityChecker" enabled="false">
-  <parameters><parameter name="maximum">10</parameter></parameters>
+  <parameters>
+   <parameter name="maximum">10</parameter>
+  </parameters>
  </check>
 
  <!-- Doesn't seem super big deal here ... -->
  <check level="error" class="org.scalastyle.scalariform.MethodLengthChecker" enabled="false">
-  <parameters><parameter name="maxLength">50</parameter></parameters>
+  <parameters>
+   <parameter name="maxLength">50</parameter>
+  </parameters>
  </check>
 
  <!-- Not exactly feasible to enforce this right now. -->
  <!-- It is also infrequent that somebody introduces a new class with a lot of methods. -->
  <check level="error" class="org.scalastyle.scalariform.NumberOfMethodsInTypeChecker" enabled="false">
-  <parameters><parameter name="maxMethods"><![CDATA[30]]></parameter></parameters>
+  <parameters>
+   <parameter name="maxMethods"><![CDATA[30]]></parameter>
+  </parameters>
  </check>
 
  <!-- Doesn't seem super big deal here, and we have a lot of magic numbers ... -->
  <check level="error" class="org.scalastyle.scalariform.MagicNumberChecker" enabled="false">
-  <parameters><parameter name="ignore">-1,0,1,2,3</parameter></parameters>
+  <parameters>
+   <parameter name="ignore">-1,0,1,2,3</parameter>
+  </parameters>
  </check>
-
 </scalastyle>
\ No newline at end of file
diff --git a/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala b/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
index 21ce13f..56008c5 100644
--- a/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
+++ b/streaming/src/main/scala/org/apache/carbondata/streaming/parser/RowStreamParserImp.scala
@@ -21,9 +21,9 @@ import java.text.SimpleDateFormat
 import java.util
 
 import org.apache.hadoop.conf.Configuration
-import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
-import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.Row
+import org.apache.spark.sql.catalyst.InternalRow
+import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
 import org.apache.spark.sql.types.StructType
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants