You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2017/10/31 07:00:00 UTC

[02/22] carbondata git commit: [CARBONDATA-1597] Remove spark1 integration

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/CompactionSystemLockFeatureTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/CompactionSystemLockFeatureTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/CompactionSystemLockFeatureTest.scala
deleted file mode 100644
index ab75d6e..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/CompactionSystemLockFeatureTest.scala
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.util.path.{CarbonStorePath, CarbonTablePath}
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
-  * FT for compaction scenario where major segment should not be included in minor.
-  */
-class CompactionSystemLockFeatureTest extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    CarbonProperties.getInstance().addProperty("carbon.compaction.level.threshold", "2,2")
-    sql("drop table if exists  table1")
-    sql("drop table if exists  table2")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.ENABLE_CONCURRENT_COMPACTION, "false")
-    sql(
-      "CREATE TABLE IF NOT EXISTS table1 (country String, ID Int, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-    sql(
-      "CREATE TABLE IF NOT EXISTS table2 (country String, ID Int, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-
-    val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
-
-    val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
-    val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
-
-    // load table1
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE table1 OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE table1  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-
-    // load table2
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE table2 OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE table2  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-
-    // create  a file in table 2 so that it will also be compacted.
-    val absoluteTableIdentifier = new
-        AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "table2", "rrr")
-        )
-    val carbonTablePath: CarbonTablePath = CarbonStorePath
-      .getCarbonTablePath(absoluteTableIdentifier.getStorePath,
-        absoluteTableIdentifier.getCarbonTableIdentifier
-      )
-
-    val file = carbonTablePath.getMetadataDirectoryPath + CarbonCommonConstants
-      .FILE_SEPARATOR + CarbonCommonConstants.majorCompactionRequiredFile
-
-    FileFactory.createNewFile(file, FileFactory.getFileType(file))
-
-    // compaction will happen here.
-    sql("alter table table1 compact 'major'"
-    )
-
-  }
-
-  /**
-    * Test whether major compaction is done for both.
-    */
-  test("check for compaction in both tables") {
-    // delete merged segments
-    sql("clean files for table table1")
-    sql("clean files for table table2")
-
-    // check for table 1.
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(new
-        AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier("default", "table1", "rrr")
-        )
-    )
-    // merged segment should not be there
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-    assert(segments.contains("0.1"))
-    assert(!segments.contains("0"))
-    assert(!segments.contains("1"))
-    // check for table 2.
-    val segmentStatusManager2: SegmentStatusManager = new SegmentStatusManager(new
-        AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier("default", "table2", "rrr1")
-        )
-    )
-    // merged segment should not be there
-    val segments2 = segmentStatusManager2.getValidAndInvalidSegments.getValidSegments.asScala.toList
-    assert(segments2.contains("0.1"))
-    assert(!segments2.contains("0"))
-    assert(!segments2.contains("1"))
-
-  }
-
-  override def afterAll {
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    sql("drop table if exists  table1")
-    sql("drop table if exists  table2")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
deleted file mode 100644
index 827b4f6..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionMinorThresholdTest.scala
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
- * FT for data compaction Minor threshold verification.
- */
-class DataCompactionMinorThresholdTest extends QueryTest with BeforeAndAfterAll {
-  val carbonTableIdentifier: CarbonTableIdentifier =
-    new CarbonTableIdentifier("default", "minorthreshold".toLowerCase(), "1")
-
-  val identifier = new AbsoluteTableIdentifier(storeLocation, carbonTableIdentifier)
-
-  override def beforeAll {
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD, "2,2")
-    sql("drop table if exists  minorthreshold")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS minorthreshold (country String, ID Int, date " +
-      "Timestamp, name " +
-      "String, " +
-      "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-      ".format'"
-    )
-
-    val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
-    val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
-    val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE minorthreshold " +
-        "OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE minorthreshold  " +
-        "OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE minorthreshold  " +
-        "OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE minorthreshold  " +
-        "OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction should happen here.
-    sql("alter table minorthreshold compact 'minor'")
-  }
-
-  /**
-   * Compaction should be completed correctly for minor compaction.
-   */
-  test("check if compaction is completed correctly for minor.") {
-
-    sql("clean files for table minorthreshold")
-
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-
-    assert(segments.contains("0.2"))
-    assert(!segments.contains("0.1"))
-    assert(!segments.contains("0"))
-    assert(!segments.contains("1"))
-    assert(!segments.contains("2"))
-    assert(!segments.contains("3"))
-  }
-
-  override def afterAll {
-    sql("drop table if exists  minorthreshold")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
-        CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
deleted file mode 100644
index 053ad44..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionNoDictionaryTest.scala
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
-  * FT for data compaction scenario.
-  */
-class DataCompactionNoDictionaryTest extends QueryTest with BeforeAndAfterAll {
-
-  // return segment details
-  def getSegments(databaseName : String, tableName : String, tableId : String): List[String] = {
-    val identifier = new AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier(databaseName, tableName.toLowerCase , tableId))
-
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-    segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-  }
-
-  var csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
-  var csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
-  var csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
-
-  override def beforeAll {
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql("DROP TABLE IF EXISTS nodictionaryCompaction")
-    sql(
-      "CREATE TABLE nodictionaryCompaction (country String, ID Int, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='country')"
-    )
-
-
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE nodictionaryCompaction " +
-        "OPTIONS('DELIMITER' = ',')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE nodictionaryCompaction " +
-        "OPTIONS('DELIMITER' = ',')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE nodictionaryCompaction " +
-        "OPTIONS('DELIMITER' = ',')"
-    )
-    // compaction will happen here.
-    sql("alter table nodictionaryCompaction compact 'major'"
-    )
-
-    // wait for compaction to finish.
-    Thread.sleep(1000)
-  }
-
-  // check for 15 seconds if the compacted segments has come or not .
-  // if not created after 15 seconds then testcase will fail.
-
-  test("check if compaction is completed or not.") {
-    var status = true
-    var noOfRetries = 0
-    while (status && noOfRetries < 10) {
-
-      val segments: List[String] = getSegments(
-        CarbonCommonConstants.DATABASE_DEFAULT_NAME, "nodictionaryCompaction", "uni21")
-
-      if (!segments.contains("0.1")) {
-        // wait for 2 seconds for compaction to complete.
-        Thread.sleep(2000)
-        noOfRetries += 1
-      }
-      else {
-        status = false
-      }
-    }
-  }
-
-  test("select country from nodictionaryCompaction") {
-    // check answers after compaction.
-    checkAnswer(
-      sql("select country from nodictionaryCompaction"),
-      Seq(Row("america"),
-        Row("canada"),
-        Row("chile"),
-        Row("china"),
-        Row("england"),
-        Row("burma"),
-        Row("butan"),
-        Row("mexico"),
-        Row("newzealand"),
-        Row("westindies"),
-        Row("china"),
-        Row("india"),
-        Row("iran"),
-        Row("iraq"),
-        Row("ireland")
-      )
-    )
-  }
-
-  test("delete merged folder and execute query") {
-    // delete merged segments
-   sql("clean files for table nodictionaryCompaction")
-
-    // merged segment should not be there
-    val segments =
-      getSegments(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "nodictionaryCompaction", "uni21")
-    assert(!segments.contains("0"))
-    assert(!segments.contains("1"))
-    assert(!segments.contains("2"))
-    assert(segments.contains("0.1"))
-
-    // now check the answers it should be same.
-    checkAnswer(
-      sql("select country from nodictionaryCompaction"),
-      Seq(Row("america"),
-        Row("canada"),
-        Row("chile"),
-        Row("china"),
-        Row("england"),
-        Row("burma"),
-        Row("butan"),
-        Row("mexico"),
-        Row("newzealand"),
-        Row("westindies"),
-        Row("china"),
-        Row("india"),
-        Row("iran"),
-        Row("iraq"),
-        Row("ireland")
-      )
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE nodictionaryCompaction " +
-        "OPTIONS('DELIMITER' = ',')"
-    )
-    sql("delete from table nodictionaryCompaction where segment.id in (0.1,3)")
-    checkAnswer(
-      sql("select country from nodictionaryCompaction"),
-      Seq()
-    )
-  }
-
-  override def afterAll {
-    sql("DROP TABLE IF EXISTS nodictionaryCompaction")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
deleted file mode 100644
index 8a0279f..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.testsuite.datacompaction
-
-import scala.collection.JavaConverters._
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
-  * FT for data compaction scenario.
-  */
-class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
-    sql("drop table if exists  normalcompaction")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-    sql(
-      "CREATE TABLE IF NOT EXISTS normalcompaction (country String, ID Int, date Timestamp, name " +
-        "String, " +
-        "phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
-        ".format'"
-    )
-
-    val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
-    val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
-    val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE normalcompaction OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE normalcompaction  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE normalcompaction  OPTIONS" +
-      "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    // compaction will happen here.
-    sql("alter table normalcompaction compact 'major'"
-    )
-
-  }
-
-  test("check if compaction is completed or not.") {
-    var status = true
-    var noOfRetries = 0
-    while (status && noOfRetries < 10) {
-
-      val identifier = new AbsoluteTableIdentifier(
-            CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-            new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "normalcompaction", "1")
-          )
-
-      val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-
-      val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-
-      if (!segments.contains("0.1")) {
-        // wait for 2 seconds for compaction to complete.
-        Thread.sleep(2000)
-        noOfRetries += 1
-      }
-      else {
-        status = false
-      }
-    }
-  }
-
-
-  test("select country from normalcompaction") {
-    // check answers after compaction.
-    checkAnswer(
-      sql("select country from normalcompaction"),
-      Seq(Row("america"),
-        Row("canada"),
-        Row("chile"),
-        Row("china"),
-        Row("england"),
-        Row("burma"),
-        Row("butan"),
-        Row("mexico"),
-        Row("newzealand"),
-        Row("westindies"),
-        Row("china"),
-        Row("india"),
-        Row("iran"),
-        Row("iraq"),
-        Row("ireland")
-      )
-    )
-  }
-
-  test("delete merged folder and execute query") {
-    // delete merged segments
-    sql("clean files for table normalcompaction")
-
-    val identifier = new AbsoluteTableIdentifier(
-          CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
-          new CarbonTableIdentifier(
-            CarbonCommonConstants.DATABASE_DEFAULT_NAME, "normalcompaction", "uniqueid")
-        )
-
-    val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
-
-    // merged segment should not be there
-    val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
-    assert(!segments.contains("0"))
-    assert(!segments.contains("1"))
-    assert(!segments.contains("2"))
-    assert(segments.contains("0.1"))
-
-    // now check the answers it should be same.
-    checkAnswer(
-      sql("select country from normalcompaction"),
-      Seq(Row("america"),
-        Row("canada"),
-        Row("chile"),
-        Row("china"),
-        Row("england"),
-        Row("burma"),
-        Row("butan"),
-        Row("mexico"),
-        Row("newzealand"),
-        Row("westindies"),
-        Row("china"),
-        Row("india"),
-        Row("iran"),
-        Row("iraq"),
-        Row("ireland")
-      )
-    )
-  }
-
-
-  test("check if compaction with Updates") {
-
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
-    sql("drop table if exists  cardinalityUpdatetest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
-
-    sql(
-      "CREATE TABLE IF NOT EXISTS cardinalityUpdateTest (FirstName String, LastName String, date Timestamp," +
-      "phonetype String, serialname String, ID int, salary Int) STORED BY 'org.apache.carbondata" +
-      ".format'"
-    )
-
-    val csvFilePath1 = s"$resourcesPath/compaction/compactionIUD1.csv"
-    val csvFilePath2 = s"$resourcesPath/compaction/compactionIUD2.csv"
-    val csvFilePath3 = s"$resourcesPath/compaction/compactionIUD3.csv"
-    val csvFilePath4 = s"$resourcesPath/compaction/compactionIUD4.csv"
-
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE cardinalityUpdateTest OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE cardinalityUpdateTest OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath3 + "' INTO TABLE cardinalityUpdateTest OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-    sql("LOAD DATA LOCAL INPATH '" + csvFilePath4 + "' INTO TABLE cardinalityUpdateTest OPTIONS" +
-        "('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
-    )
-
-    // update the first segment
-    sql("update cardinalityUpdateTest set (FirstName) = ('FirstTwentyOne') where ID = 2").show()
-
-    // alter table.
-    sql("alter table cardinalityUpdateTest compact 'major'")
-
-    // Verify the new updated value in compacted segment.
-    // now check the answers it should be same.
-    checkAnswer(
-      sql("select FirstName from cardinalityUpdateTest where FirstName = ('FirstTwentyOne')"),
-      Seq(Row("FirstTwentyOne")
-      )
-    )
-
-    checkAnswer(
-      sql("select count(*) from cardinalityUpdateTest where FirstName = ('FirstTwentyOne')"),
-      Seq(Row(1)
-      )
-    )
-
-    checkAnswer(
-      sql("select count(*) from cardinalityUpdateTest"),
-      Seq(Row(20)
-      )
-    )
-  }
-
-  override def afterAll {
-    sql("drop table if exists normalcompaction")
-    sql("drop table if exists cardinalityUpdatetest")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/GrtLtFilterProcessorTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/GrtLtFilterProcessorTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/GrtLtFilterProcessorTestCase.scala
deleted file mode 100644
index 250ec06..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/GrtLtFilterProcessorTestCase.scala
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.filterexpr
-
-import org.apache.spark.sql.Row
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-
-/**
-  * Test Class for filter expression query on String datatypes
-  *
-  */
-class GrtLtFilterProcessorTestCase extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    sql("drop table if exists a12_allnull")
-
-    sql(
-      "create table a12_allnull(empid String,ename String,sal double,deptno int,mgr string,gender" +
-        " string," +
-        "dob timestamp,comm decimal(4,2),desc string) stored by 'org.apache.carbondata.format'"
-    )
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy-MM-dd HH:mm:ss")
-    val testData = s"$resourcesPath/filter/emp2allnull.csv"
-
-    sql(
-      s"""LOAD DATA LOCAL INPATH '$testData' into table a12_allnull OPTIONS('DELIMITER'=',',
-         'QUOTECHAR'='"','FILEHEADER'='empid,ename,sal,deptno,mgr,gender,dob,comm,desc')"""
-        .stripMargin
-    )
-  }
-
-  test("In condition With improper format query regarding Null filter") {
-    checkAnswer(
-      sql("select empid from a12_allnull " + "where empid not in ('china',NULL)"),
-      Seq()
-    )
-  }
-
-  override def afterAll {
-    sql("drop table if exists a12_allnull")
-    CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/hadooprelation/HadoopFSRelationTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/hadooprelation/HadoopFSRelationTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/hadooprelation/HadoopFSRelationTestCase.scala
deleted file mode 100644
index 1649875..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/hadooprelation/HadoopFSRelationTestCase.scala
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.spark.testsuite.hadooprelation
-
-import org.apache.spark.sql.DataFrame
-import org.apache.spark.sql.test.util.QueryTest
-import org.scalatest.BeforeAndAfterAll
-
-/**
- * Test Class for hadoop fs relation
- *
- */
-class HadoopFSRelationTestCase extends QueryTest with BeforeAndAfterAll {
-
-  override def beforeAll {
-    sql("drop table if exists hadoopfsrelation")
-    sql("drop table if exists hadoopfsrelation_hive")
-    sql(
-      "CREATE TABLE hadoopfsrelation (empno int, empname String, designation " +
-      "String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno " +
-      "int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate " +
-      "Timestamp,attendance int,utilization int,salary int)" +
-      "STORED BY 'org.apache.carbondata.format'")
-    sql(
-      s"LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE hadoopfsrelation " +
-      "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
-    
-    sql("CREATE TABLE hadoopfsrelation_hive (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int)row format delimited fields terminated by ','")
-    
-    sql(
-      s"LOAD DATA local inpath '$resourcesPath/datawithoutheader.csv' INTO TABLE hadoopfsrelation_hive ")
-  }
-
-  test("hadoopfsrelation select all test") {
-    val rdd = sqlContext.read.format("org.apache.spark.sql.CarbonSource")
-      .option("tableName", "hadoopfsrelation").load()
-    assert(rdd.collect().length > 0)
-  }
-
-  test("hadoopfsrelation filters test") {
-    val rdd: DataFrame = sqlContext.read.format("org.apache.spark.sql.CarbonSource")
-      .option("tableName", "hadoopfsrelation").load()
-      .select("empno", "empname", "utilization").where("empname in ('arvind','ayushi')")
-    checkAnswer(
-      rdd,
-      sql("select empno,empname,utilization from hadoopfsrelation_hive where empname in ('arvind','ayushi')"))
-  }
-
-  override def afterAll {
-    sql("drop table hadoopfsrelation")
-    sql("drop table hadoopfsrelation_hive")
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
deleted file mode 100644
index b5b8261..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.util
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{CarbonEnv, CarbonRelation}
-import org.scalatest.BeforeAndAfterAll
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.processing.util.TableOptionConstant
-import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
-
-/**
-  * Test Case for org.apache.carbondata.integration.spark.util.GlobalDictionaryUtil
-  */
-class AllDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
-
-  var pwd: String = _
-  var sampleRelation: CarbonRelation = _
-  var complexRelation: CarbonRelation = _
-  var sampleAllDictionaryFile: String = _
-  var complexAllDictionaryFile: String = _
-
-  def buildCarbonLoadModel(relation: CarbonRelation,
-    filePath: String,
-    header: String,
-    allDictFilePath: String): CarbonLoadModel = {
-    val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
-    val table = relation.tableMeta.carbonTable
-    val carbonSchema = new CarbonDataLoadSchema(table)
-    carbonLoadModel.setDatabaseName(table.getDatabaseName)
-    carbonLoadModel.setTableName(table.getFactTableName)
-    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
-    carbonLoadModel.setFactFilePath(filePath)
-    carbonLoadModel.setCsvHeader(header)
-    carbonLoadModel.setCsvDelimiter(",")
-    carbonLoadModel.setComplexDelimiterLevel1("\\$")
-    carbonLoadModel.setComplexDelimiterLevel2("\\:")
-    carbonLoadModel.setAllDictPath(allDictFilePath)
-    carbonLoadModel.setSerializationNullFormat(
-          TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
-    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
-    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
-    carbonLoadModel
-  }
-
-  override def beforeAll {
-    sql("drop table if exists sample")
-    sql("drop table if exists complextypes")
-    buildTestData
-    // second time comment this line
-    buildTable
-    buildRelation
-  }
-
-  def buildTestData() = {
-    sampleAllDictionaryFile = s"${resourcesPath}/alldictionary/sample/20160423/1400_1405/*.dictionary"
-    complexAllDictionaryFile = s"${resourcesPath}/alldictionary/complex/20160423/1400_1405/*.dictionary"
-  }
-
-  def buildTable() = {
-    try {
-      sql(
-        "CREATE TABLE IF NOT EXISTS sample (id STRING, name STRING, city STRING, " +
-          "age INT) STORED BY 'org.apache.carbondata.format' " +
-        "tblproperties('dictionary_include'='city')"
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-    try {
-      sql(
-        "create table complextypes (deviceInformationId string, channelsId string, " +
-          "ROMSize string, purchasedate string, mobile struct<imei: string, imsi: string>, MAC " +
-          "array<string>, locationinfo array<struct<ActiveAreaId: INT, ActiveCountry: string, " +
-          "ActiveProvince: string, Activecity: string, ActiveDistrict: string, ActiveStreet: " +
-          "string>>, proddate struct<productionDate: string,activeDeactivedate: array<string>>, " +
-          "gamePointId INT,contractNumber INT) STORED BY 'org.apache.carbondata.format'" +
-          "TBLPROPERTIES('DICTIONARY_EXCLUDE'='ROMSize', 'dictionary_include'='channelsId')"
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  def buildRelation() = {
-    val catalog = CarbonEnv.get.carbonMetastore
-    sampleRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "sample")(sqlContext).asInstanceOf[CarbonRelation]
-    complexRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "complextypes")(sqlContext).asInstanceOf[CarbonRelation]
-  }
-
-  test("Support generate global dictionary from all dictionary files") {
-    val header = "id,name,city,age"
-    val carbonLoadModel = buildCarbonLoadModel(sampleRelation, null, header, sampleAllDictionaryFile)
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext,
-        carbonLoadModel,
-        sampleRelation.tableMeta.storePath)
-
-    DictionaryTestCaseUtil.
-      checkDictionary(sampleRelation, "city", "shenzhen")
-  }
-
-  test("Support generate global dictionary from all dictionary files for complex type") {
-    val header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber"
-    val carbonLoadModel = buildCarbonLoadModel(complexRelation, null, header, complexAllDictionaryFile)
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext,
-      carbonLoadModel,
-      complexRelation.tableMeta.storePath)
-
-    DictionaryTestCaseUtil.
-      checkDictionary(complexRelation, "channelsId", "1650")
-  }
-  
-  override def afterAll {
-    sql("drop table sample")
-    sql("drop table complextypes")
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
deleted file mode 100644
index fd2ab2a..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/DictionaryTestCaseUtil.scala
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.carbondata.spark.util
-
-import org.apache.spark.sql.CarbonRelation
-import org.apache.spark.sql.test.TestQueryExecutor
-
-import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier
-import org.apache.carbondata.core.metadata.CarbonTableIdentifier
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.path.CarbonStorePath
-import org.apache.carbondata.processing.util.CarbonLoaderUtil
-
-/**
- * Utility for global dictionary test cases
- */
-object DictionaryTestCaseUtil {
-
-  /**
-   *  check whether the dictionary of specified column generated
-   * @param relation  carbon table relation
-   * @param columnName  name of specified column
-   * @param value  a value of column
-   */
-  def checkDictionary(relation: CarbonRelation, columnName: String, value: String) {
-    val table = relation.tableMeta.carbonTable
-    val dimension = table.getDimensionByName(table.getFactTableName, columnName)
-    val tableIdentifier = new CarbonTableIdentifier(table.getDatabaseName, table.getFactTableName, "uniqueid")
-    val columnIdentifier = new DictionaryColumnUniqueIdentifier(tableIdentifier,
-      dimension.getColumnIdentifier, dimension.getDataType,
-      CarbonStorePath.getCarbonTablePath(table.getStorePath, table.getCarbonTableIdentifier)
-    )
-    val dict = CarbonLoaderUtil.getDictionary(columnIdentifier, TestQueryExecutor.storeLocation)
-    assert(dict.getSurrogateKey(value) != CarbonCommonConstants.INVALID_SURROGATE_KEY)
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
deleted file mode 100644
index f0de47d..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
+++ /dev/null
@@ -1,252 +0,0 @@
-/*
-  * Licensed to the Apache Software Foundation (ASF) under one
-  * or more contributor license agreements.  See the NOTICE file
-  * distributed with this work for additional information
-  * regarding copyright ownership.  The ASF licenses this file
-  * to you under the Apache License, Version 2.0 (the
-  * "License"); you may not use this file except in compliance
-  * with the License.  You may obtain a copy of the License at
-  *
-  *    http://www.apache.org/licenses/LICENSE-2.0
-  *
-  * Unless required by applicable law or agreed to in writing,
-  * software distributed under the License is distributed on an
-  * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-  * KIND, either express or implied.  See the License for the
-  * specific language governing permissions and limitations
-  * under the License.
-  */
-package org.apache.carbondata.spark.util
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{CarbonEnv, CarbonRelation}
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.processing.exception.DataLoadingException
-import org.apache.carbondata.processing.util.TableOptionConstant
-import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
-import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
-
-  /**
- * test case for external column dictionary generation
-  * also support complicated type
-  */
-class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
-
-  var extComplexRelation: CarbonRelation = _
-  var verticalDelimiteRelation: CarbonRelation = _
-  var loadSqlRelation: CarbonRelation = _
-  var filePath: String = _
-  var pwd: String = _
-  var complexFilePath1: String = _
-  var complexFilePath2: String = _
-  var extColDictFilePath1: String = _
-  var extColDictFilePath2: String = _
-  var extColDictFilePath3: String = _
-  var header: String = _
-  var header2: String = _
-
-  def buildTestData() = {
-
-    filePath = s"${resourcesPath}/sample.csv"
-    complexFilePath1 = s"${resourcesPath}/complexdata2.csv"
-    complexFilePath2 = s"${resourcesPath}/verticalDelimitedData.csv"
-    extColDictFilePath1 = s"deviceInformationId:${resourcesPath}/deviceInformationId.csv," +
-      s"mobile.imei:${resourcesPath}/mobileimei.csv," +
-      s"mac:${resourcesPath}/mac.csv," +
-      s"locationInfo.ActiveCountry:${resourcesPath}/locationInfoActiveCountry.csv"
-    extColDictFilePath2 = s"deviceInformationId:${resourcesPath}/deviceInformationId2.csv"
-    extColDictFilePath3 = s"channelsId:${resourcesPath}/channelsId.csv"
-    header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId,contractNumber"
-    header2 = "deviceInformationId,channelsId,contractNumber"
-  }
-
-  def buildTable() = {
-    try {
-      sql("""CREATE TABLE extComplextypes (deviceInformationId int,
-     channelsId string, ROMSize string, purchasedate string,
-     mobile struct<imei:string, imsi:string>, MAC array<string>,
-     locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string,
-     ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>,
-     proddate struct<productionDate:string,activeDeactivedate:array<string>>,
-     gamePointId double,contractNumber double)
-     STORED BY 'org.apache.carbondata.format'
-     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId, channelsId')
-      """)
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-
-    try {
-      sql("""CREATE TABLE verticalDelimitedTable (deviceInformationId int,
-     channelsId string,contractNumber double)
-     STORED BY 'org.apache.carbondata.format'
-     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId, channelsId')
-      """)
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-
-    try {
-      sql("""CREATE TABLE loadSqlTest (deviceInformationId int,
-     channelsId string, ROMSize string, purchasedate string,
-     mobile struct<imei:string, imsi:string>, MAC array<string>,
-     locationinfo array<struct<ActiveAreaId:int, ActiveCountry:string,
-     ActiveProvince:string, Activecity:string, ActiveDistrict:string, ActiveStreet:string>>,
-     proddate struct<productionDate:string,activeDeactivedate:array<string>>,
-     gamePointId double,contractNumber double)
-     STORED BY 'org.apache.carbondata.format'
-     TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
-      """)
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  def buildRelation() = {
-    val catalog = CarbonEnv.get.carbonMetastore
-    extComplexRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "extComplextypes")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-    verticalDelimiteRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "verticalDelimitedTable")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-    loadSqlRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "loadSqlTest")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-  }
-
-  def buildCarbonLoadModel(relation: CarbonRelation,
-      filePath:String,
-      header: String,
-      extColFilePath: String,
-      csvDelimiter: String = ","): CarbonLoadModel = {
-    val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
-    val table = relation.tableMeta.carbonTable
-    val carbonSchema = new CarbonDataLoadSchema(table)
-    carbonLoadModel.setDatabaseName(table.getDatabaseName)
-    carbonLoadModel.setTableName(table.getFactTableName)
-    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
-    carbonLoadModel.setFactFilePath(filePath)
-    carbonLoadModel.setCsvHeader(header)
-    carbonLoadModel.setCsvDelimiter(csvDelimiter)
-    carbonLoadModel.setComplexDelimiterLevel1("\\$")
-    carbonLoadModel.setComplexDelimiterLevel2("\\:")
-    carbonLoadModel.setColDictFilePath(extColFilePath)
-    carbonLoadModel.setQuoteChar("\"");
-    carbonLoadModel.setSerializationNullFormat(
-      TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
-    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
-    carbonLoadModel.setDefaultDateFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_DATE_FORMAT,
-      CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT))  
-    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
-    carbonLoadModel.setMaxColumns("100")
-    carbonLoadModel
-  }
-
-  override def beforeAll {
-    buildTestData
-    buildTable
-    buildRelation
-  }
-
-  test("Generate global dictionary from external column file") {
-    // load the first time
-    var carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
-      header, extColDictFilePath1)
-    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
-      extComplexRelation.tableMeta.storePath)
-    // check whether the dictionary is generated
-    DictionaryTestCaseUtil.checkDictionary(
-      extComplexRelation, "deviceInformationId", "10086")
-
-    // load the second time
-    carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
-      header, extColDictFilePath2)
-    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
-      extComplexRelation.tableMeta.storePath)
-    // check the old dictionary and whether the new distinct value is generated
-    DictionaryTestCaseUtil.checkDictionary(
-      extComplexRelation, "deviceInformationId", "10086")
-    DictionaryTestCaseUtil.checkDictionary(
-      extComplexRelation, "deviceInformationId", "10011")
-  }
-
-  test("When csv delimiter is not comma") {
-    //  when csv delimiter is comma
-    var carbonLoadModel = buildCarbonLoadModel(extComplexRelation, complexFilePath1,
-      header, extColDictFilePath3)
-    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
-      extComplexRelation.tableMeta.storePath)
-    // check whether the dictionary is generated
-    DictionaryTestCaseUtil.checkDictionary(
-      extComplexRelation, "channelsId", "1421|")
-
-    //  when csv delimiter is not comma
-    carbonLoadModel = buildCarbonLoadModel(verticalDelimiteRelation, complexFilePath2,
-      header2, extColDictFilePath3, "|")
-    GlobalDictionaryUtil.generateGlobalDictionary(sqlContext, carbonLoadModel,
-      verticalDelimiteRelation.tableMeta.storePath)
-    // check whether the dictionary is generated
-    DictionaryTestCaseUtil.checkDictionary(
-      verticalDelimiteRelation, "channelsId", "1431,")
-  }
-
-  test("LOAD DML with COLUMNDICT option") {
-    try {
-      sql(s"""
-      LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('single_pass'='true','FILEHEADER'='$header', 'COLUMNDICT'='$extColDictFilePath1')
-        """)
-    } catch {
-      case ex: Exception =>
-        LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-        assert(false)
-    }
-    DictionaryTestCaseUtil.checkDictionary(
-      loadSqlRelation, "deviceInformationId", "10086")
-  }
-
-  test("COLUMNDICT and ALL_DICTIONARY_PATH can not be used together") {
-    try {
-      sql(s"""
-        LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-        OPTIONS('COLUMNDICT'='$extColDictFilePath1',"ALL_DICTIONARY_PATH"='$extColDictFilePath1')
-        """)
-      assert(false)
-    } catch {
-      case ex: MalformedCarbonCommandException =>
-        assertResult(ex.getMessage)("Error: COLUMNDICT and ALL_DICTIONARY_PATH can not be used together " +
-          "in options")
-      case _: Throwable => assert(false)
-    }
-  }
-
-  test("Measure can not use COLUMNDICT") {
-    try {
-      sql(s"""
-      LOAD DATA LOCAL INPATH "$complexFilePath1" INTO TABLE loadSqlTest
-      OPTIONS('single_pass'='true','FILEHEADER'='$header', 'COLUMNDICT'='gamePointId:$filePath')
-      """)
-      assert(false)
-    } catch {
-      case ex: DataLoadingException =>
-        assertResult(ex.getMessage)("Column gamePointId is not a key column. Only key column can be part " +
-          "of dictionary and used in COLUMNDICT option.")
-      case _: Throwable => assert(false)
-    }
-  }
-
-  override def afterAll: Unit = {
-    sql("DROP TABLE extComplextypes")
-    sql("DROP TABLE verticalDelimitedTable")
-    sql("DROP TABLE loadSqlTest")
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
deleted file mode 100644
index d1d31c1..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.util
-
-import java.util.concurrent.{Callable, Executors}
-
-import scala.collection.mutable.ListBuffer
-
-import org.apache.carbondata.common.logging.LogServiceFactory
-import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{CarbonEnv, CarbonRelation}
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.cache.dictionary.DictionaryColumnUniqueIdentifier
-import org.apache.carbondata.core.service.impl.PathFactory
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.datastore.impl.FileFactory
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.core.util.path.CarbonStorePath
-import org.apache.carbondata.processing.util.TableOptionConstant
-import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
-
-class GlobalDictionaryUtilConcurrentTestCase extends QueryTest with BeforeAndAfterAll {
-
-  var sampleRelation: CarbonRelation = _
-
-  def buildCarbonLoadModel(relation: CarbonRelation,
-                           filePath: String,
-                           header: String): CarbonLoadModel = {
-    val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
-    // carbonLoadModel.setSchema(relation.cubeMeta.schema)
-    val table = relation.tableMeta.carbonTable
-    val carbonSchema = new CarbonDataLoadSchema(table)
-    carbonLoadModel.setDatabaseName(table.getDatabaseName)
-    carbonLoadModel.setTableName(table.getFactTableName)
-    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
-    carbonLoadModel.setFactFilePath(filePath)
-    carbonLoadModel.setCsvHeader(header)
-    carbonLoadModel.setCsvDelimiter(",")
-    carbonLoadModel.setComplexDelimiterLevel1("\\$")
-    carbonLoadModel.setComplexDelimiterLevel2("\\:")
-    carbonLoadModel.setStorePath(relation.tableMeta.storePath)
-    carbonLoadModel.setQuoteChar("\"")
-    carbonLoadModel.setSerializationNullFormat(
-      TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
-    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
-    carbonLoadModel.setDefaultDateFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_DATE_FORMAT,
-      CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT))  
-    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
-    carbonLoadModel.setMaxColumns("2000")
-    carbonLoadModel
-  }
-
-  override def beforeAll {
-    // second time comment this line
-    buildTable
-    buildRelation
-  }
-
-  def buildTable() = {
-    try {
-      sql(
-        "CREATE TABLE IF NOT EXISTS employee (empid STRING) STORED BY 'org.apache.carbondata.format' " +
-          "tblproperties('dictionary_include'='empid')")
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  def buildRelation() = {
-    val catalog = CarbonEnv.get.carbonMetastore
-    sampleRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "employee")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-  }
-
-  def writedummydata(filePath: String, recCount: Int) = {
-    var a: Int = 0
-    var records: StringBuilder = StringBuilder.newBuilder
-    for (a <- 0 to recCount) {
-      records.append(a).append("\n")
-    }
-    val dis = FileFactory.getDataOutputStream(filePath, FileFactory.getFileType(filePath))
-    dis.writeBytes(records.toString())
-    dis.close()
-  }
-
-  test("concurrent dictionary generation") {
-    CarbonProperties.getInstance.addProperty(CarbonCommonConstants.MAX_QUERY_EXECUTION_TIME, "-1")
-    val noOfFiles = 5
-    val files = new ListBuffer[String]()
-    val loadModels = new ListBuffer[CarbonLoadModel]()
-    for (i <- 0 until noOfFiles) {
-      val filePath: String = s"${integrationPath}/spark/target/singlecolumn_${10 * (i + 1)}.csv"
-      files += filePath
-      loadModels += buildCarbonLoadModel(sampleRelation, filePath, "empid")
-      writedummydata(filePath, 10 * (i + 1))
-    }
-    try {
-      val dictGenerators = new java.util.ArrayList[Callable[String]](noOfFiles)
-      for (i <- 0 until noOfFiles) {
-        dictGenerators.add(new DictGenerator(loadModels(i)))
-      }
-      val executorService = Executors.newFixedThreadPool(10)
-      val results = executorService.invokeAll(dictGenerators)
-      for (i <- 0 until noOfFiles) {
-        val res = results.get(i).get
-        assert("Pass".equals(res))
-      }
-    } catch {
-      case ex: Exception =>
-        ex.printStackTrace()
-        assert(false)
-    }
-    val carbonTableIdentifier = sampleRelation.tableMeta.carbonTable.getCarbonTableIdentifier
-    val columnIdentifier = sampleRelation.tableMeta.carbonTable.getDimensionByName("employee", "empid").getColumnIdentifier
-    val dictionaryColumnUniqueIdentifier = new DictionaryColumnUniqueIdentifier(
-      carbonTableIdentifier,
-      columnIdentifier,
-      columnIdentifier.getDataType,
-      CarbonStorePath.getCarbonTablePath(storeLocation, carbonTableIdentifier))
-    val carbonTablePath = PathFactory.getInstance()
-        .getCarbonTablePath(sampleRelation.tableMeta.storePath, carbonTableIdentifier, dictionaryColumnUniqueIdentifier)
-    val dictPath = carbonTablePath.getDictionaryFilePath(columnIdentifier.getColumnId)
-    val dictFile = FileFactory.getCarbonFile(dictPath, FileFactory.getFileType(dictPath))
-    val offSet = dictFile.getSize
-    val sortIndexPath = carbonTablePath.getSortIndexFilePath(columnIdentifier.getColumnId, offSet)
-    val sortIndexFile = FileFactory.getCarbonFile(sortIndexPath, FileFactory.getFileType(sortIndexPath))
-    assert(sortIndexFile.exists())
-    val sortIndexFiles = carbonTablePath.getSortIndexFiles(sortIndexFile.getParentFile, columnIdentifier.getColumnId)
-    assert(sortIndexFiles.length >= 1)
-    deleteFiles(files)
-  }
-
-  def deleteFiles(files: ListBuffer[String]) {
-    for (i <- 0 until files.length) {
-      val file = FileFactory.getCarbonFile(files(i), FileFactory.getFileType(files(i)))
-      file.delete()
-    }
-  }
-
-  override def afterAll {
-    sql("drop table if exists employee")
-    CarbonProperties.getInstance.addProperty(CarbonCommonConstants.MAX_QUERY_EXECUTION_TIME,
-        Integer.toString(CarbonCommonConstants.DEFAULT_MAX_QUERY_EXECUTION_TIME))
-  }
-
-  class DictGenerator(loadModel: CarbonLoadModel) extends Callable[String] {
-   override def call:String = {
-     var result = "Pass"
-      try {
-        GlobalDictionaryUtil
-          .generateGlobalDictionary(sqlContext,
-            loadModel,
-            sampleRelation.tableMeta.storePath)
-      } catch {
-        case ex: Exception => 
-          result = ex.getMessage
-          ex.printStackTrace()
-      }
-      result
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
deleted file mode 100644
index 8864d3e..0000000
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.carbondata.spark.util
-
-import java.io.File
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.{CarbonEnv, CarbonRelation}
-import org.scalatest.BeforeAndAfterAll
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.processing.util.TableOptionConstant
-import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
-
-/**
-  * Test Case for org.apache.carbondata.spark.util.GlobalDictionaryUtil
-  */
-class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
-
-  var sampleRelation: CarbonRelation = _
-  var dimSampleRelation: CarbonRelation = _
-  var complexRelation: CarbonRelation = _
-  var incrementalLoadTableRelation: CarbonRelation = _
-  var filePath: String = _
-  var dimFilePath: String = _
-  var complexfilePath: String = _
-  var complexfilePath1: String = _
-  var complexfilePath2: String = _
-
-  def buildCarbonLoadModel(relation: CarbonRelation,
-    filePath: String,
-    header: String): CarbonLoadModel = {
-    val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
-    // carbonLoadModel.setSchema(relation.tableMeta.schema)
-    val table = relation.tableMeta.carbonTable
-    val carbonSchema = new CarbonDataLoadSchema(table)
-    carbonLoadModel.setDatabaseName(table.getDatabaseName)
-    carbonLoadModel.setTableName(table.getFactTableName)
-    carbonLoadModel.setCarbonDataLoadSchema(carbonSchema)
-    carbonLoadModel.setFactFilePath(filePath)
-    carbonLoadModel.setCsvHeader(header)
-    carbonLoadModel.setCsvDelimiter(",")
-    carbonLoadModel.setComplexDelimiterLevel1("\\$")
-    carbonLoadModel.setComplexDelimiterLevel2("\\:")
-    carbonLoadModel.setStorePath(relation.tableMeta.storePath)
-    carbonLoadModel.setQuoteChar("\"")
-    carbonLoadModel.setSerializationNullFormat(
-      TableOptionConstant.SERIALIZATION_NULL_FORMAT.getName + ",\\N")
-    carbonLoadModel.setDefaultTimestampFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
-      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT))
-    carbonLoadModel.setDefaultDateFormat(CarbonProperties.getInstance().getProperty(
-      CarbonCommonConstants.CARBON_DATE_FORMAT,
-      CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT))  
-    carbonLoadModel.setCsvHeaderColumns(CommonUtil.getCsvHeaderColumns(carbonLoadModel))
-    carbonLoadModel.setMaxColumns("2000")
-    carbonLoadModel
-  }
-
-  override def beforeAll {
-    buildTestData
-    // second time comment this line
-    buildTable
-    buildRelation
-  }
-
-  def buildTestData() = {
-    filePath = s"${resourcesPath}/sample.csv"
-    dimFilePath = s"dimTableSample:${resourcesPath}/dimTableSample.csv"
-    complexfilePath1 = s"${resourcesPath}/complexdata1.csv"
-    complexfilePath2 = s"${resourcesPath}/complexdata2.csv"
-    complexfilePath = s"${resourcesPath}/complexdata.csv"
-  }
-
-  def buildTable() = {
-    try {
-      sql(
-        "CREATE TABLE IF NOT EXISTS sample (id STRING, name STRING, city STRING, " +
-          "age INT) STORED BY 'org.apache.carbondata.format'"
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-    try {
-      sql(
-        "CREATE TABLE IF NOT EXISTS dimSample (id STRING, name STRING, city STRING, " +
-          "age INT) STORED BY 'org.apache.carbondata.format'" +
-        "TBLPROPERTIES('DICTIONARY_EXCLUDE'='id,name')"
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-    try {
-      sql(
-        "create table complextypes (deviceInformationId INT, channelsId string, " +
-          "ROMSize string, purchasedate string, mobile struct<imei: string, imsi: string>, MAC " +
-          "array<string>, locationinfo array<struct<ActiveAreaId: INT, ActiveCountry: string, " +
-          "ActiveProvince: string, Activecity: string, ActiveDistrict: string, ActiveStreet: " +
-          "string>>, proddate struct<productionDate: string,activeDeactivedate: array<string>>, " +
-          "gamePointId INT,contractNumber INT) STORED BY 'org.apache.carbondata.format'" +
-          "TBLPROPERTIES('DICTIONARY_EXCLUDE'='ROMSize')"
-
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-
-    try {
-      sql(
-        "create table incrementalLoadTable (deviceInformationId INT, channelsId " +
-          "string, ROMSize string, purchasedate string, mobile struct<imei: string, imsi: string>, " +
-          "MAC array<string>, locationinfo array<struct<ActiveAreaId: INT, ActiveCountry: " +
-          "string, ActiveProvince: string, Activecity: string, ActiveDistrict: string, ActiveStreet: " +
-          "string>>, proddate struct<productionDate: string,activeDeactivedate: array<string>>, " +
-          "gamePointId INT,contractNumber INT) STORED BY 'org.apache.carbondata.format'"+
-          "TBLPROPERTIES('DICTIONARY_INCLUDE'='deviceInformationId')"
-      )
-    } catch {
-      case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
-    }
-  }
-
-  def buildRelation() = {
-    val catalog = CarbonEnv.get.carbonMetastore
-    sampleRelation = catalog.lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
-      "sample")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-    dimSampleRelation = catalog
-      .lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME), "dimSample")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-    complexRelation = catalog
-      .lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME), "complextypes")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-    incrementalLoadTableRelation = catalog
-      .lookupRelation1(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME), "incrementalLoadTable")(sqlContext)
-      .asInstanceOf[CarbonRelation]
-  }
-
-  test("[issue-80]Global Dictionary Generation") {
-
-    val carbonLoadModel = buildCarbonLoadModel(sampleRelation, filePath, null)
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext, carbonLoadModel,
-        sampleRelation.tableMeta.storePath
-      )
-
-    // test for dimension table
-    // TODO - Need to fill and send the dimension table data as per new DimensionRelation in
-    // CarbonDataLoadModel
-    // carbonLoadModel = buildCarbonLoadModel(dimSampleRelation, filePath, dimFilePath, null)
-    // GlobalDictionaryUtil.generateGlobalDictionary(CarbonHiveContext, carbonLoadModel,
-    // dimSampleRelation.tableMeta.dataPath, false)
-  }
-
-  test("[Issue-190]load csv file without header And support complex type") {
-    val header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC,locationinfo," +
-      "proddate,gamePointId,contractNumber"
-    val carbonLoadModel = buildCarbonLoadModel(complexRelation, complexfilePath, header)
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext, carbonLoadModel,
-        complexRelation.tableMeta.storePath
-      )
-  }
-
-  test("[Issue-232]Issue in incremental data load for dictionary generation") {
-    val header = "deviceInformationId,channelsId,ROMSize,purchasedate,mobile,MAC,locationinfo," +
-      "proddate,gamePointId,contractNumber"
-    // load 1
-    var carbonLoadModel = buildCarbonLoadModel(incrementalLoadTableRelation,
-      complexfilePath1,
-      header
-    )
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext, carbonLoadModel,
-        sampleRelation.tableMeta.storePath
-      )
-    DictionaryTestCaseUtil.
-      checkDictionary(incrementalLoadTableRelation, "deviceInformationId", "100010")
-
-    // load 2
-    carbonLoadModel = buildCarbonLoadModel(incrementalLoadTableRelation,
-      complexfilePath2,
-      header
-    )
-    GlobalDictionaryUtil
-      .generateGlobalDictionary(sqlContext, carbonLoadModel,
-        sampleRelation.tableMeta.storePath
-      )
-    DictionaryTestCaseUtil.
-      checkDictionary(incrementalLoadTableRelation, "deviceInformationId", "100077")
-  }
-
-}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0bf597d9/integration/spark/src/test/scala/org/apache/spark/sql/TestCarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/TestCarbonSqlParser.scala b/integration/spark/src/test/scala/org/apache/spark/sql/TestCarbonSqlParser.scala
deleted file mode 100644
index 13d4167..0000000
--- a/integration/spark/src/test/scala/org/apache/spark/sql/TestCarbonSqlParser.scala
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql
-
-import scala.collection.mutable.Map
-
-import org.apache.spark.sql.test.util.QueryTest
-import org.apache.spark.sql.execution.command.Field
-
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-
-/**
-  * Stub class for calling the CarbonSqlParser
-  */
-private class TestCarbonSqlParserStub extends CarbonSqlParser {
-
-  def extractDimAndMsrFieldsTest(fields: Seq[Field],
-      tableProperties: Map[String, String]): (Seq[Field], Seq[Field], Seq[String], Seq[String]) = {
-    extractDimAndMsrFields(fields, tableProperties)
-  }
-
-
-}
-
-/**
-  * Test class to test Carbon Sql Parser
-  */
-class TestCarbonSqlParser extends QueryTest {
-
-  /**
-    * load all test fields
-    * @return
-    */
-  def loadAllFields: Seq[Field] = {
-    var fields: Seq[Field] = Seq[Field]()
-
-    var col1 = Field("col1", Option("String"), Option("col1"), None, null, Some("columnar"))
-    var col2 = Field("col2", Option("String"), Option("col2"), None, null, Some("columnar"))
-    var col3 = Field("col3", Option("String"), Option("col3"), None, null, Some("columnar"))
-    var col4 = Field("col4", Option("int"), Option("col4"), None, null, Some("columnar"))
-    var col5 = Field("col5", Option("String"), Option("col5"), None, null, Some("columnar"))
-    var col6 = Field("col6", Option("String"), Option("col6"), None, null, Some("columnar"))
-    var col7 = Field("col7", Option("String"), Option("col7"), None, null, Some("columnar"))
-    var col8 = Field("col8", Option("String"), Option("col8"), None, null, Some("columnar"))
-
-    fields :+= col1
-    fields :+= col2
-    fields :+= col3
-    fields :+= col4
-    fields :+= col5
-    fields :+= col6
-    fields :+= col7
-    fields :+= col8
-    fields
-  }
-
-  // Testing the extracting of Dims and no Dictionary
-  test("Test-extractDimColsAndNoDictionaryFields") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col2", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col4")
-    val fields: Seq[Field] = loadAllFields
-
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, _, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    // testing col
-
-    //All dimension fields should be available in dimensions list
-    assert(dimCols.size == 8)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-    assert(dimCols.lift(3).get.column.equalsIgnoreCase("col4"))
-
-    //No dictionary column names will be available in noDictionary list
-    assert(noDictionary.size == 7)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(6).get.equalsIgnoreCase("col8"))
-
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields1") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col1")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 7)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(6).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields2") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_INCLUDE -> "col1")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields3") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col1", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col4")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields,
-      tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 8)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-    assert(dimCols.lift(3).get.column.equalsIgnoreCase("col4"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 7)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(6).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 0)
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields4") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col3", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col2")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields5") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col1", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col2")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields6") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col2", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col1")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col3"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields7") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col2 ,col1  ",
-      CarbonCommonConstants.DICTIONARY_INCLUDE -> "col3 ,col4 "
-    )
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 8)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-    assert(dimCols.lift(2).get.column.equalsIgnoreCase("col3"))
-    assert(dimCols.lift(3).get.column.equalsIgnoreCase("col4"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 0)
-  }
-
-  test("Test-DimAndMsrColsWithNoDictionaryFields8") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE-> "col2", CarbonCommonConstants.DICTIONARY_INCLUDE -> "col3")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (dimCols, msrCols, noDictionary, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    //below dimension fields should be available in dimensions list
-    assert(dimCols.size == 7)
-    assert(dimCols.lift(0).get.column.equalsIgnoreCase("col1"))
-    assert(dimCols.lift(1).get.column.equalsIgnoreCase("col2"))
-
-    //below column names will be available in noDictionary list
-    assert(noDictionary.size == 6)
-    assert(noDictionary.lift(0).get.equalsIgnoreCase("col1"))
-    assert(noDictionary.lift(1).get.equalsIgnoreCase("col2"))
-    assert(noDictionary.lift(2).get.equalsIgnoreCase("col5"))
-    assert(noDictionary.lift(3).get.equalsIgnoreCase("col6"))
-    assert(noDictionary.lift(4).get.equalsIgnoreCase("col7"))
-    assert(noDictionary.lift(5).get.equalsIgnoreCase("col8"))
-
-    //check msr
-    assert(msrCols.size == 1)
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-  }
-
-  // Testing the extracting of measures
-  test("Test-extractMsrColsFromFields") {
-    val tableProperties = Map(CarbonCommonConstants.DICTIONARY_EXCLUDE -> "col2",
-      CarbonCommonConstants.DICTIONARY_INCLUDE -> "col1")
-    val fields: Seq[Field] = loadAllFields
-    val stub = new TestCarbonSqlParserStub()
-    val (_, msrCols, _, _) = stub.extractDimAndMsrFieldsTest(fields, tableProperties)
-
-    // testing col
-    assert(msrCols.lift(0).get.column.equalsIgnoreCase("col4"))
-
-  }
-
-}
-
-