You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ma...@apache.org on 2018/09/14 14:36:36 UTC

carbondata git commit: [HOTFIX] Fixed 2.3 CI

Repository: carbondata
Updated Branches:
  refs/heads/master ac79a343f -> 2fb7dc9a7


[HOTFIX] Fixed 2.3 CI

Fixed following issues:

1. FIxed lz4 jar issue by excluding from kafka dependency
2. Fixed constructor not found for reset command.
3. Removed warn logger for sparkcontext to reduce logs in CI.

This closes #2716


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/2fb7dc9a
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/2fb7dc9a
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/2fb7dc9a

Branch: refs/heads/master
Commit: 2fb7dc9a7831b0d092b98d6716b9e065bd859fe1
Parents: ac79a34
Author: ravipesala <ra...@gmail.com>
Authored: Fri Sep 14 17:38:30 2018 +0530
Committer: manishgupta88 <to...@gmail.com>
Committed: Fri Sep 14 20:10:58 2018 +0530

----------------------------------------------------------------------
 core/pom.xml                                    |  6 ++---
 datamap/mv/plan/pom.xml                         |  6 -----
 examples/spark2/pom.xml                         | 15 +++++------
 .../sdv/generated/SetParameterTestCase.scala    |  2 ++
 integration/spark-common-test/pom.xml           | 26 --------------------
 ...eneFineGrainDataMapWithSearchModeSuite.scala |  1 -
 .../preaggregate/TestPreAggregateLoad.scala     | 17 +++++++++----
 .../preaggregate/TestPreAggregateMisc.scala     |  1 +
 ...tSparkCarbonFileFormatWithSparkSession.scala |  2 --
 .../detailquery/SearchModeTestCase.scala        |  1 -
 integration/spark-datasource/pom.xml            |  9 -------
 integration/spark2/pom.xml                      | 21 +++++-----------
 .../commands/SetCommandTestCase.scala           |  1 +
 pom.xml                                         |  6 +++++
 14 files changed, 37 insertions(+), 77 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/core/pom.xml
----------------------------------------------------------------------
diff --git a/core/pom.xml b/core/pom.xml
index 51c603e..a7d6f4d 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -114,9 +114,9 @@
       <version>4.0.42.Final</version>
     </dependency>
     <dependency>
-      <groupId>net.jpountz.lz4</groupId>
-      <artifactId>lz4</artifactId>
-      <version>1.3.0</version>
+      <groupId>org.lz4</groupId>
+      <artifactId>lz4-java</artifactId>
+      <version>1.4.0</version>
     </dependency>
   </dependencies>
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/datamap/mv/plan/pom.xml
----------------------------------------------------------------------
diff --git a/datamap/mv/plan/pom.xml b/datamap/mv/plan/pom.xml
index 982724d..ff6976d 100644
--- a/datamap/mv/plan/pom.xml
+++ b/datamap/mv/plan/pom.xml
@@ -48,12 +48,6 @@
       <groupId>org.apache.spark</groupId>
       <artifactId>spark-core_${scala.binary.version}</artifactId>
       <version>${spark.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
   </dependencies>
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/examples/spark2/pom.xml
----------------------------------------------------------------------
diff --git a/examples/spark2/pom.xml b/examples/spark2/pom.xml
index bd497c5..aa7b7c5 100644
--- a/examples/spark2/pom.xml
+++ b/examples/spark2/pom.xml
@@ -56,6 +56,12 @@
     <dependency>
       <groupId>org.apache.spark</groupId>
       <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>net.jpountz.lz4</groupId>
+          <artifactId>lz4</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.spark</groupId>
@@ -91,15 +97,6 @@
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-core</artifactId>
       <version>${project.version}</version>
-      <exclusions>
-        <!-- need to Exclude net.jpountz jar from this project.
-         Spark has changed this jar to org.lz4:lz4-java
-         net.jpountz and org.lz4 has same class Name -->
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
   </dependencies>
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
index f3622dc..8c336d8 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/SetParameterTestCase.scala
@@ -40,6 +40,7 @@ class SetParameterTestCase extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists carbon_table_single_pass")
     sql("drop table if exists carbon_table_disable_bad_record_logger")
     sql("drop table if exists carbon_table_load")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("RESET")
   }
 
@@ -155,6 +156,7 @@ class SetParameterTestCase extends QueryTest with BeforeAndAfterAll {
 
   test("TC_007-test SET property IS__EMPTY_DATA_BAD_RECORD=FALSE") {
     sql("drop table if exists emptyColumnValues")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("RESET")
     sql("SET carbon.options.bad.records.logger.enable=true")
     sql("SET carbon.options.is.empty.data.badrecord=false")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/pom.xml b/integration/spark-common-test/pom.xml
index 50e5830..1efb6ef 100644
--- a/integration/spark-common-test/pom.xml
+++ b/integration/spark-common-test/pom.xml
@@ -112,12 +112,6 @@
   <dependencies>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
-      <artifactId>carbondata-spark-common</artifactId>
-      <version>${project.version}</version>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-spark2</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
@@ -127,27 +121,12 @@
       <artifactId>carbondata-lucene</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
-      <exclusions>
-        <!-- need to Exclude net.jpountz jar from this project.
-         Spark has changed this jar to org.lz4:lz4-java
-         net.jpountz and org.lz4 has same class Name -->
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-bloom</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
-      <exclusions>
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
@@ -156,11 +135,6 @@
       <scope>test</scope>
     </dependency>
     <dependency>
-      <groupId>org.apache.spark</groupId>
-      <artifactId>spark-hive-thriftserver_${scala.binary.version}</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
       <!-- spark catalyst added runtime dependency on spark-core,so
       while executing the testcases spark-core should be present else it
       will fail to execute -->

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
index 6cbe747..369bed1 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
@@ -56,7 +56,6 @@ class LuceneFineGrainDataMapWithSearchModeSuite extends QueryTest with BeforeAnd
 
   test("test lucene fine grain data map with search mode") {
 
-    sqlContext.sparkSession.sparkContext.setLogLevel("WARN")
     sql(
       s"""
          | CREATE DATAMAP dm ON TABLE datamap_test

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
index 818dd7c..49a62a2 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
@@ -227,6 +227,7 @@ class TestPreAggregateLoad extends SparkQueryTest with BeforeAndAfterAll with Be
     sql(
       s"""create datamap preagg_sum on table maintable using 'preaggregate' as select id, sum(age) from maintable group by id"""
         .stripMargin)
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     checkAnswer(sql("select * from maintable_preagg_sum"), Row(1, 52))
   }
@@ -292,6 +293,7 @@ class TestPreAggregateLoad extends SparkQueryTest with BeforeAndAfterAll with Be
     sql(
       s"""create datamap preagg_sum on table maintable using 'preaggregate' as select id, sum(age) from maintable group by id,name"""
         .stripMargin)
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     checkAnswer(sql("select * from maintable_preagg_sum"), Row(1, 52, "xyz"))
   }
@@ -630,7 +632,7 @@ test("check load and select for avg double datatype") {
        """.stripMargin)
     checkPreAggTable(sql("SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
       false, "segmaintable_preagg_sum")
-
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     checkAnswer(sql("SELECT * FROM segmaintable_preagg_sum"), Seq(Row(1, 26)))
     checkPreAggTable(sql("SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
@@ -663,7 +665,7 @@ test("check load and select for avg double datatype") {
          | GROUP BY id
        """.stripMargin)
     sql(s"INSERT INTO segmaintable VALUES(1, 'xyz', 'bengaluru', 26)")
-
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     checkAnswer(sql("SELECT * FROM segmaintable_preagg_sum"), Seq(Row(1, 26), Row(1, 26)))
     checkPreAggTable(sql("SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
@@ -709,7 +711,7 @@ test("check load and select for avg double datatype") {
       Seq(Row(1, 26)))
     checkPreAggTable(sql("SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
       false, "segmaintable_preagg_sum")
-
+    sqlContext.sparkSession.catalog.clearCache()
     // reset
     sql("reset")
     checkAnswer(sql(s"SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
@@ -721,6 +723,7 @@ test("check load and select for avg double datatype") {
   test("test whether all segments are loaded into pre-aggregate table: auto merge and input segment") {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("DROP TABLE IF EXISTS segmaintable")
     sql(
@@ -762,7 +765,7 @@ test("check load and select for avg double datatype") {
        """.stripMargin)
 
     sql(s"INSERT INTO segmaintable VALUES(1, 'xyz', 'bengaluru', 26)")
-
+    sqlContext.sparkSession.catalog.clearCache()
     // reset
     sql("reset")
     checkAnswer(sql(s"SELECT id, SUM(age) FROM segmaintable GROUP BY id"),
@@ -777,6 +780,7 @@ test("check load and select for avg double datatype") {
   ignore("test whether all segments are loaded into pre-aggregate table: auto merge and no input segment") {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("DROP TABLE IF EXISTS segmaintable")
     sql(
@@ -827,6 +831,7 @@ test("check load and select for avg double datatype") {
   test("test whether all segments are loaded into pre-aggregate table: create after auto merge and no input segment") {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("DROP TABLE IF EXISTS segmaintable")
     sql(
@@ -871,6 +876,7 @@ test("check load and select for avg double datatype") {
   ignore("test whether all segments are loaded into pre-aggregate table: mixed, load, auto merge and input segment") {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("DROP TABLE IF EXISTS main_table")
     sql(
@@ -894,7 +900,7 @@ test("check load and select for avg double datatype") {
       false, "main_table_preagg_sum")
     checkAnswer(sql(s"SELECT id, SUM(age) FROM main_table GROUP BY id"),
       Seq(Row(1, 26)))
-
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     checkPreAggTable(sql("SELECT id, SUM(age) FROM main_table GROUP BY id"),
       true, "main_table_preagg_sum")
@@ -916,6 +922,7 @@ test("check load and select for avg double datatype") {
   ignore("test whether all segments are loaded into pre-aggregate table: auto merge and check pre-aggregate segment") {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("DROP TABLE IF EXISTS main_table")
     sql(

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateMisc.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateMisc.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateMisc.scala
index 8241288..55c06c0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateMisc.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateMisc.scala
@@ -37,6 +37,7 @@ class TestPreAggregateMisc extends QueryTest with BeforeAndAfterAll {
     checkAnswer(
       sql("select sum(age) from mainTable"),
       Seq(Row(183.0)))
+    sqlContext.sparkSession.catalog.clearCache()
     sql("RESET")
     sql("drop datamap agg1 on table mainTable")
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestSparkCarbonFileFormatWithSparkSession.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestSparkCarbonFileFormatWithSparkSession.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestSparkCarbonFileFormatWithSparkSession.scala
index a8bdb31..a4e58e7 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestSparkCarbonFileFormatWithSparkSession.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestSparkCarbonFileFormatWithSparkSession.scala
@@ -124,8 +124,6 @@ object TestSparkCarbonFileFormatWithSparkSession {
     CarbonProperties.getInstance()
       .addProperty("carbon.storelocation", storeLocation)
 
-    spark.sparkContext.setLogLevel("WARN")
-
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd HH:mm:ss")
       .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
index dbf87a3..19c0d31 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/detailquery/SearchModeTestCase.scala
@@ -33,7 +33,6 @@ class SearchModeTestCase extends QueryTest with BeforeAndAfterAll {
 
   val numRows = 500 * 1000
   override def beforeAll = {
-    sqlContext.sparkContext.setLogLevel("INFO")
     sqlContext.sparkSession.asInstanceOf[CarbonSession].startSearchMode()
     sql("DROP TABLE IF EXISTS main")
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark-datasource/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark-datasource/pom.xml b/integration/spark-datasource/pom.xml
index 3d017c8..9f0d3ff 100644
--- a/integration/spark-datasource/pom.xml
+++ b/integration/spark-datasource/pom.xml
@@ -39,15 +39,6 @@
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-hadoop</artifactId>
       <version>${project.version}</version>
-      <exclusions>
-        <!-- need to Exclude net.jpountz jar from this project.
-         Spark has changed this jar to org.lz4:lz4-java
-         net.jpountz and org.lz4 has same class Name -->
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark2/pom.xml
----------------------------------------------------------------------
diff --git a/integration/spark2/pom.xml b/integration/spark2/pom.xml
index 8c8fd28..1eba780 100644
--- a/integration/spark2/pom.xml
+++ b/integration/spark2/pom.xml
@@ -54,26 +54,11 @@
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-lucene</artifactId>
       <version>${project.version}</version>
-      <exclusions>
-        <!-- need to Exclude net.jpountz jar from this project.
-         Spark has changed this jar to org.lz4:lz4-java
-         net.jpountz and org.lz4 has same class Name -->
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.carbondata</groupId>
       <artifactId>carbondata-bloom</artifactId>
       <version>${project.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>net.jpountz.lz4</groupId>
-          <artifactId>lz4</artifactId>
-        </exclusion>
-      </exclusions>
     </dependency>
     <dependency>
       <groupId>org.apache.spark</groupId>
@@ -113,6 +98,12 @@
       <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
       <version>${spark.version}</version>
       <scope>${spark.deps.scope}</scope>
+      <exclusions>
+        <exclusion>
+          <groupId>net.jpountz.lz4</groupId>
+          <artifactId>lz4</artifactId>
+        </exclusion>
+      </exclusions>
     </dependency>
     <dependency>
       <groupId>junit</groupId>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/integration/spark2/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala
index 303a478..1610d8d 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/commands/SetCommandTestCase.scala
@@ -129,6 +129,7 @@ class SetCommandTestCase extends Spark2QueryTest with BeforeAndAfterAll{
     }
   }
   override def afterAll {
+    sqlContext.sparkSession.catalog.clearCache()
     sql("reset")
     sql("set carbon=true")
     checkAnswer(sql("set carbon"),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/2fb7dc9a/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index 12a5881..eff438b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -222,6 +222,12 @@
         <artifactId>spark-sql-kafka-0-10_${scala.binary.version}</artifactId>
         <version>${spark.version}</version>
         <scope>${spark.deps.scope}</scope>
+        <exclusions>
+          <exclusion>
+            <groupId>net.jpountz.lz4</groupId>
+            <artifactId>lz4</artifactId>
+          </exclusion>
+        </exclusions>
       </dependency>
       <dependency>
         <groupId>org.scala-lang</groupId>