You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2016/07/19 08:38:50 UTC

[2/5] incubator-carbondata git commit: change cube to table

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
index b119317..a9c651c 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/aggquery/IntegerDataTypeTestCase.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class IntegerDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE integertypecubeAgg (empno int, workgroupcategory string, deptno int, projectcode int, attendance int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypecubeAgg OPTIONS ('DELIMITER'= ',', 'QUOTECHAR'= '\"', 'FILEHEADER'='')")
+    sql("CREATE TABLE integertypetableAgg (empno int, workgroupcategory string, deptno int, projectcode int, attendance int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypetableAgg OPTIONS ('DELIMITER'= ',', 'QUOTECHAR'= '\"', 'FILEHEADER'='')")
   }
 
-  test("select empno from integertypecubeAgg") {
+  test("select empno from integertypetableAgg") {
     checkAnswer(
-      sql("select empno from integertypecubeAgg"),
+      sql("select empno from integertypetableAgg"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20)))
   }
 
   override def afterAll {
-    sql("drop table integertypecubeAgg")
+    sql("drop table integertypetableAgg")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
index f5fc8e9..c8a8383 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionCardinalityBoundryTest.scala
@@ -107,7 +107,7 @@ class DataCompactionCardinalityBoundryTest extends QueryTest with BeforeAndAfter
   }
 
   override def afterAll {
-    /* sql("drop cube cardinalityTest") */
+    /* sql("drop table cardinalityTest") */
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "false")

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
index 368c83b..4a69294 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionLockTest.scala
@@ -102,7 +102,7 @@ class DataCompactionLockTest extends QueryTest with BeforeAndAfterAll {
 
 
   override def afterAll {
-    /* sql("drop cube compactionLockTestTable") */
+    /* sql("drop table compactionLockTestTable") */
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     carbonLock.unlock()

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
index 9f87ada..00fffab 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/datacompaction/DataCompactionTest.scala
@@ -149,7 +149,7 @@ class DataCompactionTest extends QueryTest with BeforeAndAfterAll {
   }
 
   override def afterAll {
-    /* sql("drop cube normalcompaction") */
+    /* sql("drop table normalcompaction") */
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     CarbonProperties.getInstance().addProperty("carbon.enable.load.merge", "false")

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
index e32d9c6..233ec4a 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/dataload/TestLoadDataWithHiveSyntax.scala
@@ -40,10 +40,10 @@ class TestLoadDataWithHiveSyntax extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test data loading and validate query output") {
-    //Create test cube and hive table
+    //Create test table and hive table
     sql("CREATE table testtable (empno int, empname String, designation String, doj String, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate String, projectenddate String,attendance double,utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
     sql("create table testhivetable(empno int, empname String, designation string, doj String, workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, projectcode int, projectjoindate String,projectenddate String, attendance double,utilization double,salary double)row format delimited fields terminated by ','")
-    //load data into test cube and hive table and validate query result
+    //load data into test table and hive table and validate query result
     sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table testtable")
     sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite INTO table testhivetable")
     checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
@@ -51,24 +51,24 @@ class TestLoadDataWithHiveSyntax extends QueryTest with BeforeAndAfterAll {
     sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE testtable OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
     sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table testhivetable")
     checkAnswer(sql("select * from testtable"), sql("select * from testhivetable"))
-    //drop test cube and table
+    //drop test table and table
     sql("drop table testtable")
     sql("drop table testhivetable")
   }
 
   /**
-   * TODO: temporarily changing cube names to different names,
-    * however deletion and creation of cube with same name
+   * TODO: temporarily changing table names to different names,
+    * however deletion and creation of table with same name
    */
   test("test data loading with different case file header and validate query output") {
-    //Create test cube and hive table
+    //Create test table and hive table
     sql("CREATE table testtable1 (empno int, empname String, designation String, doj String, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate String, projectenddate String,attendance double,utilization double,salary double) STORED BY 'org.apache.carbondata.format' TBLPROPERTIES('DICTIONARY_EXCLUDE'='empno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate')")
     sql("create table testhivetable1(empno int, empname String, designation string, doj String, workgroupcategory int, workgroupcategoryname String,deptno int, deptname String, projectcode int, projectjoindate String,projectenddate String, attendance double,utilization double,salary double)row format delimited fields terminated by ','")
-    //load data into test cube and hive table and validate query result
+    //load data into test table and hive table and validate query result
     sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' INTO table testtable1 options('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='EMPno,empname,designation,doj,workgroupcategory,workgroupcategoryname,deptno,deptname,projectcode,projectjoindate,projectenddate,attendance,utilization,SALARY')")
     sql("LOAD DATA local inpath './src/test/resources/datawithoutheader.csv' overwrite INTO table testhivetable1")
     checkAnswer(sql("select * from testtable1"), sql("select * from testhivetable1"))
-    //drop test cube and table
+    //drop test table and table
     sql("drop table testtable1")
     sql("drop table testhivetable1")
   }
@@ -185,15 +185,16 @@ class TestLoadDataWithHiveSyntax extends QueryTest with BeforeAndAfterAll {
   }
 
   test("test carbon table data loading when table name is in different case with create table, for UpperCase") {
-    sql("create table UPPERCASEcube(empno Int, empname String, designation String, " +
+    sql("create table UPPERCASEtable(empno Int, empname String, designation String, " +
       "doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
       "deptname String, projectcode Int, projectjoindate String, projectenddate String, " +
       "attendance Int,utilization Double,salary Double) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table uppercasecube OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"')")
-    sql("drop table UpperCaseCube")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO table uppercasetable OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"')")
+    sql("drop table UpperCaseTable")
   }
 
   test("test carbon table data loading when table name is in different case with create table ,for LowerCase") {
+    sql("drop table if exists lowercaseCUBE")
     sql("create table lowercaseCUBE(empno Int, empname String, designation String, " +
       "doj String, workgroupcategory Int, workgroupcategoryname String, deptno Int, " +
       "deptname String, projectcode Int, projectjoindate String, projectenddate String, " +

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/deleteTable/TestDeleteTableNewDDL.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/deleteTable/TestDeleteTableNewDDL.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/deleteTable/TestDeleteTableNewDDL.scala
index 7b58308..42367b7 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/deleteTable/TestDeleteTableNewDDL.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/deleteTable/TestDeleteTableNewDDL.scala
@@ -5,7 +5,7 @@ import org.apache.spark.sql.common.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 /**
-  * test class for testing the create cube DDL.
+  * test class for testing the create table DDL.
   */
 class TestDeleteTableNewDDL extends QueryTest with BeforeAndAfterAll {
 
@@ -54,6 +54,7 @@ class TestDeleteTableNewDDL extends QueryTest with BeforeAndAfterAll {
   }
 
   test("drop table using case insensitive table name") {
+    sql("drop table if exists CaseInsensitiveTable")
     // create table
     sql(
       "CREATE table CaseInsensitiveTable (ID int, date String, country String, name " +
@@ -71,7 +72,7 @@ class TestDeleteTableNewDDL extends QueryTest with BeforeAndAfterAll {
       "phonetype String, serialname String, salary int) stored by 'org.apache.carbondata.format'" +
       "TBLPROPERTIES('DICTIONARY_INCLUDE'='ID', 'DICTIONARY_INCLUDE'='salary')"
     )
-
+    sql("drop table if exists CaseInsensitiveTable")
   }
 
   test("drop table using dbName and table name") {

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/AllDataTypesTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/AllDataTypesTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/AllDataTypesTestCase.scala
index 478203a..9fe9ba6 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/AllDataTypesTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/AllDataTypesTestCase.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class AllDataTypesTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE alldatatypescube (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE alldatatypescube OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
+    sql("CREATE TABLE alldatatypestable (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE alldatatypestable OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
   }
 
-  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescube where empname in ('arvind','ayushi') group by empno,empname,utilization") {
+  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestable where empname in ('arvind','ayushi') group by empno,empname,utilization") {
     checkAnswer(
-      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescube where empname in ('arvind','ayushi') group by empno,empname,utilization"),
+      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestable where empname in ('arvind','ayushi') group by empno,empname,utilization"),
       Seq(Row(11, "arvind", 96.2, 1, 11), Row(15, "ayushi", 91.5, 1, 15)))
   }
 
   override def afterAll {
-    sql("drop table alldatatypescube")
+    sql("drop table alldatatypestable")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/HighCardinalityDataTypesTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/HighCardinalityDataTypesTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/HighCardinalityDataTypesTestCase.scala
index 65e125e..de9aa2c 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/HighCardinalityDataTypesTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/HighCardinalityDataTypesTestCase.scala
@@ -36,9 +36,15 @@ import org.carbondata.core.util.CarbonProperties
 class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
+
+    sql("drop table if exists NO_DICTIONARY_HIVE_1")
+    sql("drop table if exists NO_DICTIONARY_CARBON_1")
+    sql("drop table if exists NO_DICTIONARY_CARBON_8")
+    sql("drop table if exists filtertestTable")
+
     //For the Hive table creation and data loading
     sql(
-      "create table NO_DICTIONARY_HIVE_6(empno int,empname string,designation string,doj " +
+      "create table NO_DICTIONARY_HIVE_1(empno int,empname string,designation string,doj " +
         "Timestamp,workgroupcategory int, " +
         "workgroupcategoryname string,deptno int, deptname string, projectcode int, " +
         "projectjoindate Timestamp,projectenddate Timestamp,attendance int, "
@@ -48,10 +54,10 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
     )
     sql(
       "load data local inpath './src/test/resources/datawithoutheader.csv' into table " +
-        "NO_DICTIONARY_HIVE_6"
+        "NO_DICTIONARY_HIVE_1"
     );
-    //For Carbon cube creation.
-    sql("CREATE TABLE NO_DICTIONARY_CARBON_6 (empno Int, " +
+    //For Carbon table creation.
+    sql("CREATE TABLE NO_DICTIONARY_CARBON_1 (empno Int, " +
       "doj Timestamp, workgroupcategory Int, empname String,workgroupcategoryname String, " +
       "deptno Int, deptname String, projectcode Int, projectjoindate Timestamp, " +
       "projectenddate Timestamp, designation String,attendance Int,utilization " +
@@ -59,11 +65,11 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
         "TBLPROPERTIES('DICTIONARY_EXCLUDE'='empno,empname,designation')"
     )
     sql(
-      "LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE NO_DICTIONARY_CARBON_6 " +
+      "LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE NO_DICTIONARY_CARBON_1 " +
         "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
 
-    sql("CREATE TABLE NO_DICTIONARY_CARBON_7 (empno string, " +
+    sql("CREATE TABLE NO_DICTIONARY_CARBON_8 (empno string, " +
       "doj Timestamp, workgroupcategory Int, empname String,workgroupcategoryname String, " +
       "deptno Int, deptname String, projectcode Int, projectjoindate Timestamp, " +
       "projectenddate Timestamp, designation String,attendance Int,utilization " +
@@ -71,7 +77,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
       "TBLPROPERTIES('DICTIONARY_EXCLUDE'='empno,empname,designation')"
     )
     sql(
-      "LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE NO_DICTIONARY_CARBON_7 " +
+      "LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE NO_DICTIONARY_CARBON_8 " +
       "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
     )
     sql("CREATE TABLE filtertestTable (ID Int,date Timestamp, country String, " +
@@ -90,7 +96,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
   test("Count (*) with filter") {
     checkAnswer(
-      sql("select count(*) from NO_DICTIONARY_CARBON_6 where empno=11"),
+      sql("select count(*) from NO_DICTIONARY_CARBON_1 where empno=11"),
       Seq(Row(1))
     )
   }
@@ -99,7 +105,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_6"),
+      sql("select empno from NO_DICTIONARY_CARBON_1"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20))
     )
 
@@ -110,7 +116,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_7 where empno like '12%'"),
+      sql("select empno from NO_DICTIONARY_CARBON_8 where empno like '12%'"),
       Seq(Row("12"))
     )
   }
@@ -119,7 +125,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_7 where empno>'19'"),
+      sql("select empno from NO_DICTIONARY_CARBON_8 where empno>'19'"),
       Seq(Row("20"))
     )
   }
@@ -128,7 +134,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_6 where empno in(11,12,13)"),
+      sql("select empno from NO_DICTIONARY_CARBON_1 where empno in(11,12,13)"),
       Seq(Row(11), Row(12), Row(13))
     )
   }
@@ -136,7 +142,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_6 where empno not in(11,12,13,14,15,16,17)"),
+      sql("select empno from NO_DICTIONARY_CARBON_1 where empno not in(11,12,13,14,15,16,17)"),
       Seq(Row(18), Row(19), Row(20))
     )
   }
@@ -145,7 +151,7 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_CARBON_6 where empno=17"),
+      sql("select empno from NO_DICTIONARY_CARBON_1 where empno=17"),
       Seq(Row(17))
     )
   }
@@ -164,8 +170,8 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
 
     checkAnswer(
-      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_6 where empno=17"),
-      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_6 where empno=17")
+      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_1 where empno=17"),
+      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_1 where empno=17")
     )
   }
 
@@ -173,8 +179,8 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
   test("ORDER Query with NO_DICTIONARY_COLUMN Compare With HIVE RESULT") {
 
     checkAnswer(
-      sql("select empno from NO_DICTIONARY_HIVE_6 order by empno"),
-      sql("select empno from NO_DICTIONARY_CARBON_6 order by empno")
+      sql("select empno from NO_DICTIONARY_HIVE_1 order by empno"),
+      sql("select empno from NO_DICTIONARY_CARBON_1 order by empno")
     )
   }
   //TODO need to add filter test cases for no dictionary columns
@@ -183,30 +189,30 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
   // RESULT") {
   //
   //     checkAnswer(
-  //      sql("select empno from NO_DICTIONARY_HIVE_6 where empno=15 and deptno=12"),
-  //      sql("select empno from NO_DICTIONARY_CARBON_6 where empno=15 and deptno=12"))
+  //      sql("select empno from NO_DICTIONARY_HIVE_1 where empno=15 and deptno=12"),
+  //      sql("select empno from NO_DICTIONARY_CARBON_1 where empno=15 and deptno=12"))
   //   }
 
   test("Distinct Query with NO_DICTIONARY_COLUMN  Compare With HIVE RESULT") {
 
     checkAnswer(
-      sql("select count(distinct empno) from NO_DICTIONARY_HIVE_6"),
-      sql("select count(distinct empno) from NO_DICTIONARY_CARBON_6")
+      sql("select count(distinct empno) from NO_DICTIONARY_HIVE_1"),
+      sql("select count(distinct empno) from NO_DICTIONARY_CARBON_1")
     )
   }
   test("Sum Query with NO_DICTIONARY_COLUMN  Compare With HIVE RESULT") {
 
     checkAnswer(
-      sql("select sum(empno) from NO_DICTIONARY_HIVE_6"),
-      sql("select sum(empno) from NO_DICTIONARY_CARBON_6")
+      sql("select sum(empno) from NO_DICTIONARY_HIVE_1"),
+      sql("select sum(empno) from NO_DICTIONARY_CARBON_1")
     )
   }
 
   test("average Query with NO_DICTIONARY_COLUMN  Compare With HIVE RESULT") {
 
     checkAnswer(
-      sql("select avg(empno) from NO_DICTIONARY_HIVE_6"),
-      sql("select avg(empno) from NO_DICTIONARY_CARBON_6")
+      sql("select avg(empno) from NO_DICTIONARY_HIVE_1"),
+      sql("select avg(empno) from NO_DICTIONARY_CARBON_1")
     )
   }
 
@@ -215,11 +221,11 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
 
     checkAnswer(
       sql(
-        "select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_6 group by empno,empname," +
+        "select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_1 group by empno,empname," +
           "workgroupcategory"
       ),
       sql(
-        "select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_6 group by empno," +
+        "select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_1 group by empno," +
           "empname,workgroupcategory"
       )
     )
@@ -228,13 +234,15 @@ class NO_DICTIONARY_COL_TestCase extends QueryTest with BeforeAndAfterAll {
   test("Multiple column  Detail Query with NO_DICTIONARY_COLUMN  Compare With HIVE RESULT") {
 
     checkAnswer(
-      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_6"),
-      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_6 ")
+      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_HIVE_1"),
+      sql("select empno,empname,workgroupcategory from NO_DICTIONARY_CARBON_1 ")
     )
   }
 
   override def afterAll {
-    //sql("drop cube NO_DICTIONARY_HIVE_1")
-    //sql("drop cube NO_DICTIONARY_CARBON_1")
+    sql("drop table NO_DICTIONARY_HIVE_1")
+    sql("drop table NO_DICTIONARY_CARBON_1")
+    sql("drop table NO_DICTIONARY_CARBON_8")
+    sql("drop table filtertestTable")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/IntegerDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/IntegerDataTypeTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/IntegerDataTypeTestCase.scala
index d955148..cfafd11 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/IntegerDataTypeTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/detailquery/IntegerDataTypeTestCase.scala
@@ -32,17 +32,17 @@ import org.scalatest.BeforeAndAfterAll
 class IntegerDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE integertypecube (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE integertypecube OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
+    sql("CREATE TABLE integertypetable (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA LOCAL INPATH './src/test/resources/data.csv' INTO TABLE integertypetable OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
   }
 
-  test("select empno from integertypecube") {
+  test("select empno from integertypetable") {
     checkAnswer(
-      sql("select empno from integertypecube"),
+      sql("select empno from integertypetable"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20)))
   }
 
   override def afterAll {
-    sql("drop table integertypecube")
+    sql("drop table integertypetable")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
index b8e2988..483a766 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/directdictionary/TimestampDataTypeDirectDictionaryTestCase.scala
@@ -22,16 +22,15 @@ package org.carbondata.spark.testsuite.directdictionary
 import java.io.File
 import java.sql.Timestamp
 
+import org.apache.spark.sql.Row
 import org.apache.spark.sql.common.util.CarbonHiveContext._
-import org.apache.spark.{SparkConf, SparkContext}
-import org.apache.spark.sql.{CarbonContext, Row}
 import org.apache.spark.sql.common.util.QueryTest
 import org.apache.spark.sql.hive.HiveContext
+import org.scalatest.BeforeAndAfterAll
 
 import org.carbondata.core.constants.CarbonCommonConstants
-import org.carbondata.core.util.CarbonProperties
 import org.carbondata.core.keygenerator.directdictionary.timestamp.TimeStampGranularityConstants
-import org.scalatest.BeforeAndAfterAll
+import org.carbondata.core.util.CarbonProperties
 
 
 /**
@@ -52,7 +51,7 @@ class TimestampDataTypeDirectDictionaryTest extends QueryTest with BeforeAndAfte
         )
       CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "true")
       sql(
-        "CREATE TABLE directDictionaryCube (empno int,doj Timestamp, " +
+        "CREATE TABLE directDictionaryTable (empno int,doj Timestamp, " +
           "salary int) " +
           "STORED BY 'org.apache.carbondata.format'"
       )
@@ -62,7 +61,7 @@ class TimestampDataTypeDirectDictionaryTest extends QueryTest with BeforeAndAfte
       val currentDirectory = new File(this.getClass.getResource("/").getPath + "/../../")
         .getCanonicalPath
       var csvFilePath = currentDirectory + "/src/test/resources/datasample.csv"
-      sql("LOAD DATA local inpath '" + csvFilePath + "' INTO TABLE directDictionaryCube OPTIONS" +
+      sql("LOAD DATA local inpath '" + csvFilePath + "' INTO TABLE directDictionaryTable OPTIONS" +
         "('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
 
     } catch {
@@ -71,9 +70,9 @@ class TimestampDataTypeDirectDictionaryTest extends QueryTest with BeforeAndAfte
     }
   }
 
-  test("select doj from directDictionaryCube") {
+  test("select doj from directDictionaryTable") {
     checkAnswer(
-      sql("select doj from directDictionaryCube"),
+      sql("select doj from directDictionaryTable"),
       Seq(Row(Timestamp.valueOf("2016-03-14 15:00:09.0")),
         Row(Timestamp.valueOf("2016-04-14 15:00:09.0"))
       )
@@ -81,31 +80,31 @@ class TimestampDataTypeDirectDictionaryTest extends QueryTest with BeforeAndAfte
   }
 
 
-  test("select doj from directDictionaryCube with equals filter") {
+  test("select doj from directDictionaryTable with equals filter") {
     checkAnswer(
-      sql("select doj from directDictionaryCube where doj='2016-03-14 15:00:09'"),
+      sql("select doj from directDictionaryTable where doj='2016-03-14 15:00:09'"),
       Seq(Row(Timestamp.valueOf("2016-03-14 15:00:09")))
     )
 
   }
   
-    test("select doj from directDictionaryCube with greater than filter") {
+    test("select doj from directDictionaryTable with greater than filter") {
     checkAnswer(
-      sql("select doj from directDictionaryCube where doj>'2016-03-14 15:00:09'"),
+      sql("select doj from directDictionaryTable where doj>'2016-03-14 15:00:09'"),
       Seq(Row(Timestamp.valueOf("2016-04-14 15:00:09")))
     )
 
   }
 
-  test("select count(doj) from directDictionaryCube") {
+  test("select count(doj) from directDictionaryTable") {
     checkAnswer(
-      sql("select count(doj) from directDictionaryCube"),
+      sql("select count(doj) from directDictionaryTable"),
       Seq(Row(2))
     )
   }
 
   override def afterAll {
-    sql("drop table directDictionaryCube")
+    sql("drop table directDictionaryTable")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
     CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "false")

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/AllDataTypesTestCaseFilter.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/AllDataTypesTestCaseFilter.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/AllDataTypesTestCaseFilter.scala
index eec9a0c..670981f 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/AllDataTypesTestCaseFilter.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/AllDataTypesTestCaseFilter.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class AllDataTypesTestCaseFilter extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE alldatatypescubeFilter (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypescubeFilter OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
+    sql("CREATE TABLE alldatatypestableFilter (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypestableFilter OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
   }
 
-  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubeFilter where empname in ('arvind','ayushi') group by empno,empname,utilization") {
+  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableFilter where empname in ('arvind','ayushi') group by empno,empname,utilization") {
     checkAnswer(
-      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubeFilter where empname in ('arvind','ayushi') group by empno,empname,utilization"),
+      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableFilter where empname in ('arvind','ayushi') group by empno,empname,utilization"),
       Seq(Row(11, "arvind", 96.2, 1, 11), Row(15, "ayushi", 91.5, 1, 15)))
   }
 
   override def afterAll {
-    sql("drop table alldatatypescubeFilter")
+    sql("drop table alldatatypestableFilter")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/IntegerDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/IntegerDataTypeTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/IntegerDataTypeTestCase.scala
index 4d5f482..9e0434d 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/IntegerDataTypeTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/filterexpr/IntegerDataTypeTestCase.scala
@@ -32,17 +32,17 @@ import org.scalatest.BeforeAndAfterAll
 class IntegerDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE integertypecubeFilter (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypecubeFilter OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
+    sql("CREATE TABLE integertypetableFilter (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypetableFilter OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
   }
 
-  test("select empno from integertypecubeFilter") {
+  test("select empno from integertypetableFilter") {
     checkAnswer(
-      sql("select empno from integertypecubeFilter"),
+      sql("select empno from integertypetableFilter"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20)))
   }
 
   override def afterAll {
-    sql("drop table integertypecubeFilter")
+    sql("drop table integertypetableFilter")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
index d088021..fd5b57d 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/AllDataTypesTestCaseJoin.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class AllDataTypesTestCaseJoin extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE alldatatypescubeJoin (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypescubeJoin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
+    sql("CREATE TABLE alldatatypestableJoin (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypestableJoin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
   }
 
-  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubeJoin where empname in ('arvind','ayushi') group by empno,empname,utilization") {
+  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableJoin where empname in ('arvind','ayushi') group by empno,empname,utilization") {
     checkAnswer(
-      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubeJoin where empname in ('arvind','ayushi') group by empno,empname,utilization"),
+      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestableJoin where empname in ('arvind','ayushi') group by empno,empname,utilization"),
       Seq(Row(11, "arvind", 96.2, 1, 11), Row(15, "ayushi", 91.5, 1, 15)))
   }
 
   override def afterAll {
-    sql("drop table alldatatypescubeJoin")
+    sql("drop table alldatatypestableJoin")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/IntegerDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/IntegerDataTypeTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/IntegerDataTypeTestCase.scala
index 6dac5c7..5a14e5f 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/IntegerDataTypeTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/joinquery/IntegerDataTypeTestCase.scala
@@ -32,17 +32,17 @@ import org.scalatest.BeforeAndAfterAll
 class IntegerDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE integertypecubejoin (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypecubejoin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
+    sql("CREATE TABLE integertypetablejoin (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE integertypetablejoin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
   }
 
-  test("select empno from integertypecubejoin") {
+  test("select empno from integertypetablejoin") {
     checkAnswer(
-      sql("select empno from integertypecubejoin"),
+      sql("select empno from integertypetablejoin"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20)))
   }
 
   override def afterAll {
-    sql("drop table integertypecubejoin")
+    sql("drop table integertypetablejoin")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/AllDataTypesTestCaseSort.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/AllDataTypesTestCaseSort.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/AllDataTypesTestCaseSort.scala
index 9562c8f..4aab691 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/AllDataTypesTestCaseSort.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/AllDataTypesTestCaseSort.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class AllDataTypesTestCaseSort extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE alldatatypescubesort (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypescubesort OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
+    sql("CREATE TABLE alldatatypestablesort (empno int, empname String, designation String, doj Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,utilization int,salary int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE alldatatypestablesort OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')");
   }
 
-  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubesort where empname in ('arvind','ayushi') group by empno,empname,utilization order by empno") {
+  test("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestablesort where empname in ('arvind','ayushi') group by empno,empname,utilization order by empno") {
     checkAnswer(
-      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypescubesort where empname in ('arvind','ayushi') group by empno,empname,utilization order by empno"),
+      sql("select empno,empname,utilization,count(salary),sum(empno) from alldatatypestablesort where empname in ('arvind','ayushi') group by empno,empname,utilization order by empno"),
       Seq(Row(11, "arvind", 96.2, 1, 11), Row(15, "ayushi", 91.5, 1, 15)))
   }
 
   override def afterAll {
-    sql("drop table alldatatypescubesort")
+    sql("drop table alldatatypestablesort")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/IntegerDataTypeTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/IntegerDataTypeTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/IntegerDataTypeTestCase.scala
index d0c4a6a..f15ccae 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/IntegerDataTypeTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/testsuite/sortexpr/IntegerDataTypeTestCase.scala
@@ -33,17 +33,17 @@ import org.scalatest.BeforeAndAfterAll
 class IntegerDataTypeTestCase extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll {
-    sql("CREATE TABLE inttypecubesort (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
-    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE inttypecubesort OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
+    sql("CREATE TABLE inttypetablesort (empno int, workgroupcategory string, deptno int, projectcode int,attendance int) STORED BY 'org.apache.carbondata.format'")
+    sql("LOAD DATA local inpath './src/test/resources/data.csv' INTO TABLE inttypetablesort OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')")
   }
 
-  test("select empno from inttypecubesort") {
+  test("select empno from inttypetablesort") {
     checkAnswer(
-      sql("select empno from inttypecubesort"),
+      sql("select empno from inttypetablesort"),
       Seq(Row(11), Row(12), Row(13), Row(14), Row(15), Row(16), Row(17), Row(18), Row(19), Row(20)))
   }
 
   override def afterAll {
-    sql("drop table inttypecubesort")
+    sql("drop table inttypetablesort")
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
index ef59484..0edc9e9 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
@@ -56,10 +56,10 @@ class AutoHighCardinalityIdentifyTestCase extends QueryTest with BeforeAndAfterA
     dimensionFilePath: String,
     header: String): CarbonLoadModel = {
     val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.cubeMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.cubeMeta.carbonTableIdentifier.getTableName)
-    // carbonLoadModel.setSchema(relation.cubeMeta.schema)
-    val table = relation.cubeMeta.carbonTable
+    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
+    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
+    // carbonLoadModel.setSchema(relation.tableMeta.schema)
+    val table = relation.tableMeta.carbonTable
     val carbonSchema = new CarbonDataLoadSchema(table)
     carbonLoadModel.setDatabaseName(table.getDatabaseName)
     carbonLoadModel.setTableName(table.getFactTableName)
@@ -137,9 +137,9 @@ class AutoHighCardinalityIdentifyTestCase extends QueryTest with BeforeAndAfterA
   }
 
   test("auto identify high cardinality column in first load #396") {
-    val oldTable = relation.cubeMeta.carbonTable
+    val oldTable = relation.tableMeta.carbonTable
     sql(s"LOAD DATA LOCAL INPATH '$filePath' into table highcard")
-    val newTable = relation.cubeMeta.carbonTable
+    val newTable = relation.tableMeta.carbonTable
     sql(s"select count(hc1) from highcard").show
 
     // check dictionary file

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/integration/spark/src/test/scala/org/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala b/integration/spark/src/test/scala/org/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
index dff2fbc..5d26965 100644
--- a/integration/spark/src/test/scala/org/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
+++ b/integration/spark/src/test/scala/org/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
@@ -58,10 +58,10 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     dimensionFilePath: String,
     header: String): CarbonLoadModel = {
     val carbonLoadModel = new CarbonLoadModel
-    carbonLoadModel.setTableName(relation.cubeMeta.carbonTableIdentifier.getDatabaseName)
-    carbonLoadModel.setDatabaseName(relation.cubeMeta.carbonTableIdentifier.getTableName)
-    // carbonLoadModel.setSchema(relation.cubeMeta.schema)
-    val table = relation.cubeMeta.carbonTable
+    carbonLoadModel.setTableName(relation.tableMeta.carbonTableIdentifier.getDatabaseName)
+    carbonLoadModel.setDatabaseName(relation.tableMeta.carbonTableIdentifier.getTableName)
+    // carbonLoadModel.setSchema(relation.tableMeta.schema)
+    val table = relation.tableMeta.carbonTable
     val carbonSchema = new CarbonDataLoadSchema(table)
     carbonLoadModel.setDatabaseName(table.getDatabaseName)
     carbonLoadModel.setTableName(table.getFactTableName)
@@ -72,7 +72,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     carbonLoadModel.setCsvDelimiter(",")
     carbonLoadModel.setComplexDelimiterLevel1("\\$")
     carbonLoadModel.setComplexDelimiterLevel2("\\:")
-    carbonLoadModel.setStorePath(relation.cubeMeta.storePath)
+    carbonLoadModel.setStorePath(relation.tableMeta.storePath)
     carbonLoadModel
   }
 
@@ -156,7 +156,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
   }
 
   def checkDictionary(relation: CarbonRelation, columnName: String, value: String) {
-    val table = relation.cubeMeta.carbonTable
+    val table = relation.tableMeta.carbonTable
     val dimension = table.getDimensionByName(table.getFactTableName, columnName)
     val tableIdentifier = new CarbonTableIdentifier(table.getDatabaseName, table.getFactTableName, "uniqueid")
 
@@ -175,7 +175,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     GlobalDictionaryUtil
       .generateGlobalDictionary(CarbonHiveContext,
         carbonLoadModel,
-        sampleRelation.cubeMeta.storePath
+        sampleRelation.tableMeta.storePath
       )
 
     // test for dimension table
@@ -183,7 +183,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     // CarbonDataLoadModel
     // carbonLoadModel = buildCarbonLoadModel(dimSampleRelation, filePath, dimFilePath, null)
     // GlobalDictionaryUtil.generateGlobalDictionary(CarbonHiveContext, carbonLoadModel,
-    // dimSampleRelation.cubeMeta.dataPath, false)
+    // dimSampleRelation.tableMeta.dataPath, false)
   }
 
   test("[Issue-190]load csv file without header And support complex type") {
@@ -193,7 +193,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     GlobalDictionaryUtil
       .generateGlobalDictionary(CarbonHiveContext,
         carbonLoadModel,
-        complexRelation.cubeMeta.storePath
+        complexRelation.tableMeta.storePath
       )
   }
 
@@ -209,7 +209,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     GlobalDictionaryUtil
       .generateGlobalDictionary(CarbonHiveContext,
         carbonLoadModel,
-        sampleRelation.cubeMeta.storePath
+        sampleRelation.tableMeta.storePath
       )
     checkDictionary(incrementalLoadTableRelation, "deviceInformationId", "100010")
 
@@ -222,7 +222,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
     GlobalDictionaryUtil
       .generateGlobalDictionary(CarbonHiveContext,
         carbonLoadModel,
-        sampleRelation.cubeMeta.storePath
+        sampleRelation.tableMeta.storePath
       )
     checkDictionary(incrementalLoadTableRelation, "deviceInformationId", "100077")
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/lcm/locks/LocalFileLock.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/lcm/locks/LocalFileLock.java b/processing/src/main/java/org/carbondata/lcm/locks/LocalFileLock.java
index a1cf40a..1672117 100644
--- a/processing/src/main/java/org/carbondata/lcm/locks/LocalFileLock.java
+++ b/processing/src/main/java/org/carbondata/lcm/locks/LocalFileLock.java
@@ -62,9 +62,9 @@ public class LocalFileLock extends AbstractCarbonLock {
 
   public static final String tmpPath;
 
-  private String cubeName;
+  private String tableName;
 
-  private String schemaName;
+  private String databaseName;
 
   /**
    * LOGGER for  logging the messages.
@@ -84,11 +84,11 @@ public class LocalFileLock extends AbstractCarbonLock {
     this.lockUsage = lockUsage;
     location = location.replace("\\", "/");
     String tempStr = location.substring(0, location.lastIndexOf('/'));
-    cubeName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
+    tableName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
     tempStr = tempStr.substring(0, tempStr.lastIndexOf('/'));
-    schemaName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
+    databaseName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
     this.location =
-        tmpPath + File.separator + schemaName + File.separator + cubeName + File.separator
+        tmpPath + File.separator + databaseName + File.separator + tableName + File.separator
             + this.lockUsage;
     initRetry();
   }
@@ -100,16 +100,16 @@ public class LocalFileLock extends AbstractCarbonLock {
    */
   @Override public boolean lock() {
     try {
-      String schemaFolderPath = tmpPath + File.separator + schemaName;
-      String cubeFolderPath = schemaFolderPath + File.separator + cubeName;
-      // create dir with schema name in tmp location.
-      if (!FileFactory.isFileExist(schemaFolderPath, FileFactory.getFileType(tmpPath))) {
-        FileFactory.mkdirs(schemaFolderPath, FileFactory.getFileType(tmpPath));
+      String databaseFolderPath = tmpPath + File.separator + databaseName;
+      String tableFolderPath = databaseFolderPath + File.separator + tableName;
+      // create dir with database name in tmp location.
+      if (!FileFactory.isFileExist(databaseFolderPath, FileFactory.getFileType(tmpPath))) {
+        FileFactory.mkdirs(databaseFolderPath, FileFactory.getFileType(tmpPath));
       }
 
-      // create dir with cube name in tmp location.
-      if (!FileFactory.isFileExist(cubeFolderPath, FileFactory.getFileType(tmpPath))) {
-        FileFactory.mkdirs(cubeFolderPath, FileFactory.getFileType(tmpPath));
+      // create dir with table name in tmp location.
+      if (!FileFactory.isFileExist(tableFolderPath, FileFactory.getFileType(tmpPath))) {
+        FileFactory.mkdirs(tableFolderPath, FileFactory.getFileType(tmpPath));
       }
       if (!FileFactory.isFileExist(location, FileFactory.getFileType(location))) {
         FileFactory.createNewLockFile(location, FileFactory.getFileType(location));

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/lcm/locks/ZooKeeperLocking.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/lcm/locks/ZooKeeperLocking.java b/processing/src/main/java/org/carbondata/lcm/locks/ZooKeeperLocking.java
index ce5a62b..2d18b54 100644
--- a/processing/src/main/java/org/carbondata/lcm/locks/ZooKeeperLocking.java
+++ b/processing/src/main/java/org/carbondata/lcm/locks/ZooKeeperLocking.java
@@ -50,7 +50,7 @@ public class ZooKeeperLocking extends AbstractCarbonLock {
   private static final String zooKeeperLocation = CarbonCommonConstants.ZOOKEEPER_LOCATION;
 
   /**
-   * Unique folder for each cube with SchemaName_CubeName
+   * Unique folder for each table with DatabaseName_TableName
    */
   private final String tableIdFolder;
 
@@ -75,17 +75,17 @@ public class ZooKeeperLocking extends AbstractCarbonLock {
     this.lockTypeFolder = zooKeeperLocation;
     location = location.replace("\\", "/");
     String tempStr = location.substring(0, location.lastIndexOf('/'));
-    String schemaName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
+    String databaseName = tempStr.substring(tempStr.lastIndexOf('/') + 1, tempStr.length());
 
-    String cubeName = location.substring(location.lastIndexOf('/') + 1, location.length());
+    String tableName = location.substring(location.lastIndexOf('/') + 1, location.length());
 
-    this.tableIdFolder = zooKeeperLocation + CarbonCommonConstants.FILE_SEPARATOR + schemaName
-        + '.' + cubeName;
+    this.tableIdFolder = zooKeeperLocation + CarbonCommonConstants.FILE_SEPARATOR + databaseName
+        + '.' + tableName;
 
     zk = ZookeeperInit.getInstance().getZookeeper();
 
-    this.lockTypeFolder = zooKeeperLocation + CarbonCommonConstants.FILE_SEPARATOR + schemaName
-        + '.' + cubeName + CarbonCommonConstants.FILE_SEPARATOR
+    this.lockTypeFolder = zooKeeperLocation + CarbonCommonConstants.FILE_SEPARATOR + databaseName
+        + '.' + tableName + CarbonCommonConstants.FILE_SEPARATOR
         + lockUsage.toString();
     try {
       createBaseNode();

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/lcm/status/SegmentStatusManager.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/lcm/status/SegmentStatusManager.java b/processing/src/main/java/org/carbondata/lcm/status/SegmentStatusManager.java
index cc652d7..3949f47 100644
--- a/processing/src/main/java/org/carbondata/lcm/status/SegmentStatusManager.java
+++ b/processing/src/main/java/org/carbondata/lcm/status/SegmentStatusManager.java
@@ -165,15 +165,15 @@ public class SegmentStatusManager {
   /**
    * This method reads the load metadata file
    *
-   * @param cubeFolderPath
+   * @param tableFolderPath
    * @return
    */
-  public LoadMetadataDetails[] readLoadMetadata(String cubeFolderPath) {
+  public LoadMetadataDetails[] readLoadMetadata(String tableFolderPath) {
     Gson gsonObjectToRead = new Gson();
     DataInputStream dataInputStream = null;
     BufferedReader buffReader = null;
     InputStreamReader inStream = null;
-    String metadataFileName = cubeFolderPath + CarbonCommonConstants.FILE_SEPARATOR
+    String metadataFileName = tableFolderPath + CarbonCommonConstants.FILE_SEPARATOR
         + CarbonCommonConstants.LOADMETADATA_FILENAME;
     LoadMetadataDetails[] listOfLoadFolderDetailsArray;
 
@@ -228,12 +228,12 @@ public class SegmentStatusManager {
   /**
    * updates deletion status
    * @param loadIds
-   * @param cubeFolderPath
+   * @param tableFolderPath
    * @return
    */
-  public List<String> updateDeletionStatus(List<String> loadIds, String cubeFolderPath) {
+  public List<String> updateDeletionStatus(List<String> loadIds, String tableFolderPath) {
     ICarbonLock carbonLock =
-        CarbonLockFactory.getCarbonLockObj(cubeFolderPath, LockUsage.METADATA_LOCK);
+        CarbonLockFactory.getCarbonLockObj(tableFolderPath, LockUsage.METADATA_LOCK);
     List<String> invalidLoadIds = new ArrayList<String>(0);
     try {
       if (carbonLock.lockWithRetries()) {
@@ -250,7 +250,7 @@ public class SegmentStatusManager {
           return loadIds;
         }
         // read existing metadata details in load metadata.
-        listOfLoadFolderDetailsArray = readLoadMetadata(cubeFolderPath);
+        listOfLoadFolderDetailsArray = readLoadMetadata(tableFolderPath);
         if (listOfLoadFolderDetailsArray != null && listOfLoadFolderDetailsArray.length != 0) {
           updateDeletionStatus(loadIds, listOfLoadFolderDetailsArray, invalidLoadIds);
           if(invalidLoadIds.isEmpty())

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/api/dataloader/SchemaInfo.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/api/dataloader/SchemaInfo.java b/processing/src/main/java/org/carbondata/processing/api/dataloader/SchemaInfo.java
index 3f6b8a0..c3d71ec 100644
--- a/processing/src/main/java/org/carbondata/processing/api/dataloader/SchemaInfo.java
+++ b/processing/src/main/java/org/carbondata/processing/api/dataloader/SchemaInfo.java
@@ -22,9 +22,9 @@ package org.carbondata.processing.api.dataloader;
 public class SchemaInfo {
 
   /**
-   * schemaName
+   * databaseName
    */
-  private String schemaName;
+  private String databaseName;
 
   /**
    * srcDriverName
@@ -47,9 +47,9 @@ public class SchemaInfo {
   private String srcPwd;
 
   /**
-   * cubeName
+   * tableName
    */
-  private String cubeName;
+  private String tableName;
 
   /**
    * isAutoAggregateRequest
@@ -132,12 +132,12 @@ public class SchemaInfo {
     this.srcPwd = srcPwd;
   }
 
-  public String getCubeName() {
-    return cubeName;
+  public String getTableName() {
+    return tableName;
   }
 
-  public void setCubeName(String cubeName) {
-    this.cubeName = cubeName;
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
   }
 
   /**
@@ -155,17 +155,17 @@ public class SchemaInfo {
   }
 
   /**
-   * @return the schemaName
+   * @return the databaseName
    */
-  public String getSchemaName() {
-    return schemaName;
+  public String getDatabaseName() {
+    return databaseName;
   }
 
   /**
-   * @param schemaName the schemaName to set
+   * @param databaseName the databaseName to set
    */
-  public void setSchemaName(String schemaName) {
-    this.schemaName = schemaName;
+  public void setDatabaseName(String databaseName) {
+    this.databaseName = databaseName;
   }
 
 }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/csvload/DataGraphExecuter.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/csvload/DataGraphExecuter.java b/processing/src/main/java/org/carbondata/processing/csvload/DataGraphExecuter.java
index ca21ec1..93b65da 100644
--- a/processing/src/main/java/org/carbondata/processing/csvload/DataGraphExecuter.java
+++ b/processing/src/main/java/org/carbondata/processing/csvload/DataGraphExecuter.java
@@ -246,7 +246,7 @@ public class DataGraphExecuter {
     }
 
     //Don't change the logic of creating key
-    String key = model.getSchemaName() + '/' + model.getCubeName() + '_' + model.getTableName();
+    String key = model.getDatabaseName() + '/' + model.getTableName() + '_' + model.getTableName();
 
     if (trans.getErrors() > 0) {
       LOGGER.error("Graph Execution had errors");

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/csvload/GraphExecutionUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/csvload/GraphExecutionUtil.java b/processing/src/main/java/org/carbondata/processing/csvload/GraphExecutionUtil.java
index 6a66817..97a1a29 100644
--- a/processing/src/main/java/org/carbondata/processing/csvload/GraphExecutionUtil.java
+++ b/processing/src/main/java/org/carbondata/processing/csvload/GraphExecutionUtil.java
@@ -195,7 +195,7 @@ public final class GraphExecutionUtil {
   /**
    * This method update the column Name
    *
-   * @param cube
+   * @param table
    * @param tableName
    * @param schema
    */

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/dataprocessor/DataProcessTaskStatus.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/dataprocessor/DataProcessTaskStatus.java b/processing/src/main/java/org/carbondata/processing/dataprocessor/DataProcessTaskStatus.java
index 44ce52b..abda9a2 100644
--- a/processing/src/main/java/org/carbondata/processing/dataprocessor/DataProcessTaskStatus.java
+++ b/processing/src/main/java/org/carbondata/processing/dataprocessor/DataProcessTaskStatus.java
@@ -57,9 +57,7 @@ public class DataProcessTaskStatus implements IDataProcessStatus, Serializable {
    */
   private int taskType;
 
-  private String schemaName;
-
-  private String cubeName;
+  private String databaseName;
 
   private String tableName;
 
@@ -89,9 +87,8 @@ public class DataProcessTaskStatus implements IDataProcessStatus, Serializable {
 
   private String escapeCharacter;
 
-  public DataProcessTaskStatus(String schemaName, String cubeName, String tableName) {
-    this.schemaName = schemaName;
-    this.cubeName = cubeName;
+  public DataProcessTaskStatus(String databaseName, String tableName) {
+    this.databaseName = databaseName;
     this.tableName = tableName;
     this.desc = "";
     this.setNewSchemaFilePath("");
@@ -134,17 +131,17 @@ public class DataProcessTaskStatus implements IDataProcessStatus, Serializable {
   }
 
   /**
-   * @return the schemaName
+   * @return the databaseName
    */
-  public String getSchemaName() {
-    return schemaName;
+  public String getDatabaseName() {
+    return databaseName;
   }
 
   /**
-   * @param schemaName the schemaName to set
+   * @param databaseName the databaseName to set
    */
-  public void setSchemaName(String schemaName) {
-    this.schemaName = schemaName;
+  public void setDatabaseName(String databaseName) {
+    this.databaseName = databaseName;
   }
 
   /**
@@ -161,20 +158,6 @@ public class DataProcessTaskStatus implements IDataProcessStatus, Serializable {
     this.tableName = tableName;
   }
 
-  /**
-   * @return the cubeName
-   */
-  public String getCubeName() {
-    return cubeName;
-  }
-
-  /**
-   * @param cubeName the cubeName to set
-   */
-  public void setCubeName(String cubeName) {
-    this.cubeName = cubeName;
-  }
-
   public String getDesc() {
     return desc;
   }
@@ -268,11 +251,11 @@ public class DataProcessTaskStatus implements IDataProcessStatus, Serializable {
    */
   public IDataProcessStatus makeCopy() {
     IDataProcessStatus copy = new DataProcessTaskStatus();
-    copy.setCubeName(this.cubeName);
+    copy.setTableName(this.tableName);
     copy.setDataloadstatusid(this.dataloadstatusid);
     copy.setDesc(this.desc);
     copy.setKey(this.key);
-    copy.setSchemaName(schemaName);
+    copy.setDatabaseName(databaseName);
     copy.setStatus(status);
     return copy;
   }

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/dataprocessor/IDataProcessStatus.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/dataprocessor/IDataProcessStatus.java b/processing/src/main/java/org/carbondata/processing/dataprocessor/IDataProcessStatus.java
index 54c7463..c1bd9a7 100644
--- a/processing/src/main/java/org/carbondata/processing/dataprocessor/IDataProcessStatus.java
+++ b/processing/src/main/java/org/carbondata/processing/dataprocessor/IDataProcessStatus.java
@@ -59,7 +59,7 @@ public interface IDataProcessStatus {
   /**
    * This method is used to get the Key for saving status of data loading.
    *
-   * @return String - Key (schemaName + cubeName + tableName).
+   * @return String - Key (databaseName + tableName + tableName).
    */
   String getKey();
 
@@ -92,24 +92,14 @@ public interface IDataProcessStatus {
   void setTaskType(int taskType);
 
   /**
-   * @return the schemaName
+   * @return the databaseName
    */
-  String getSchemaName();
+  String getDatabaseName();
 
   /**
-   * @param schemaName the schemaName to set
+   * @param databaseName the databaseName to set
    */
-  void setSchemaName(String schemaName);
-
-  /**
-   * @return the cubeName
-   */
-  String getCubeName();
-
-  /**
-   * @param cubeName the cubeName to set
-   */
-  void setCubeName(String cubeName);
+  void setDatabaseName(String databaseName);
 
   /**
    * @return the tableName

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/graphgenerator/GraphGenerator.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/graphgenerator/GraphGenerator.java b/processing/src/main/java/org/carbondata/processing/graphgenerator/GraphGenerator.java
index f61e5dd..a342c86 100644
--- a/processing/src/main/java/org/carbondata/processing/graphgenerator/GraphGenerator.java
+++ b/processing/src/main/java/org/carbondata/processing/graphgenerator/GraphGenerator.java
@@ -126,13 +126,13 @@ public class GraphGenerator {
    */
   private int yAxixLocation = 100;
   /**
-   * schemaName
+   * databaseName
    */
-  private String schemaName;
+  private String databaseName;
   /**
-   * cube
+   * table
    */
-  //    private Cube cube;
+  //    private Table table;
   /**
    * instance
    */
@@ -203,7 +203,7 @@ public class GraphGenerator {
     this.isAutoAggRequest = schemaInfo.isAutoAggregateRequest();
     //this.schema = schema;
     this.carbonDataLoadSchema = carbonDataLoadSchema;
-    this.schemaName = carbonDataLoadSchema.getCarbonTable().getDatabaseName();
+    this.databaseName = carbonDataLoadSchema.getCarbonTable().getDatabaseName();
     this.partitionID = partitionID;
     this.factStoreLocation = factStoreLocation;
     this.isColumnar = Boolean.parseBoolean(CarbonCommonConstants.IS_COLUMNAR_STORAGE_DEFAULTVALUE);
@@ -269,7 +269,7 @@ public class GraphGenerator {
   private void initialise() {
     this.instance = CarbonProperties.getInstance();
     //TO-DO need to take care while supporting aggregate table using new schema.
-    //aggregateTable = CarbonSchemaParser.getAggregateTable(cube, schema);
+    //aggregateTable = CarbonSchemaParser.getAggregateTable(table, schema);
     this.factTableName = carbonDataLoadSchema.getCarbonTable().getFactTableName();
   }
 
@@ -282,7 +282,7 @@ public class GraphGenerator {
 
   private void validateAndInitialiseKettelEngine() throws GraphGeneratorException {
     File file = new File(
-        outputLocation + File.separator + schemaInfo.getSchemaName() + File.separator
+        outputLocation + File.separator + schemaInfo.getDatabaseName() + File.separator
             + this.tableName + File.separator + this.segmentId + File.separator + this.taskNo
             + File.separator);
     boolean isDirCreated = false;
@@ -388,7 +388,7 @@ public class GraphGenerator {
     trans.addTransHop(mdkeyToSliceMerger);
 
     String graphFilePath =
-        outputLocation + File.separator + schemaInfo.getSchemaName() + File.separator
+        outputLocation + File.separator + schemaInfo.getDatabaseName() + File.separator
             + this.tableName + File.separator + segmentId + File.separator + this.taskNo
             + File.separator + this.tableName + ".ktr";
     generateGraphFile(trans, graphFilePath);
@@ -457,8 +457,8 @@ public class GraphGenerator {
     sliceMerger.setMdkeySize(configurationInfo.getMdkeySize());
     sliceMerger.setMeasureCount(configurationInfo.getMeasureCount());
     sliceMerger.setTabelName(configurationInfo.getTableName());
-    sliceMerger.setCubeName(schemaInfo.getCubeName());
-    sliceMerger.setSchemaName(schemaInfo.getSchemaName());
+    sliceMerger.setTableName(schemaInfo.getTableName());
+    sliceMerger.setDatabaseName(schemaInfo.getDatabaseName());
     if (null != this.factStoreLocation) {
       sliceMerger.setCurrentRestructNumber(
           CarbonUtil.getRestructureNumber(this.factStoreLocation, this.factTableName));
@@ -541,8 +541,8 @@ public class GraphGenerator {
     seqMeta.setBatchSize(Integer.parseInt(graphConfiguration.getBatchSize()));
     seqMeta.setNoDictionaryDims(graphConfiguration.getNoDictionaryDims());
     seqMeta.setDimensionColumnsDataType(graphConfiguration.getDimensionColumnsDataType());
-    seqMeta.setCubeName(schemaInfo.getCubeName());
-    seqMeta.setSchemaName(schemaInfo.getSchemaName());
+    seqMeta.setTableName(schemaInfo.getTableName());
+    seqMeta.setDatabaseName(schemaInfo.getDatabaseName());
     seqMeta.setComplexDelimiterLevel1(schemaInfo.getComplexDelimiterLevel1());
     seqMeta.setComplexDelimiterLevel2(schemaInfo.getComplexDelimiterLevel2());
     seqMeta.setCurrentRestructNumber(graphConfiguration.getCurrentRestructNumber());
@@ -602,8 +602,8 @@ public class GraphGenerator {
     carbonMdKey.setSegmentId(segmentId);
     carbonMdKey.setNumberOfCores(graphConfiguration.getNumberOfCores());
     carbonMdKey.setTableName(graphConfiguration.getTableName());
-    carbonMdKey.setSchemaName(schemaInfo.getSchemaName());
-    carbonMdKey.setCubeName(schemaInfo.getCubeName());
+    carbonMdKey.setDatabaseName(schemaInfo.getDatabaseName());
+    carbonMdKey.setTableName(schemaInfo.getTableName());
     carbonMdKey.setComplexTypeString(graphConfiguration.getComplexTypeString());
     carbonMdKey.setCurrentRestructNumber(graphConfiguration.getCurrentRestructNumber());
     carbonMdKey.setAggregateLevels(CarbonDataProcessorUtil
@@ -747,8 +747,8 @@ public class GraphGenerator {
     sortRowsMeta.setSegmentId(segmentId);
     sortRowsMeta.setTaskNo(taskNo);
     sortRowsMeta.setTabelName(graphConfiguration.getTableName());
-    sortRowsMeta.setCubeName(schemaInfo.getCubeName());
-    sortRowsMeta.setSchemaName(schemaInfo.getSchemaName());
+    sortRowsMeta.setTableName(schemaInfo.getTableName());
+    sortRowsMeta.setDatabaseName(schemaInfo.getDatabaseName());
     sortRowsMeta.setOutputRowSize(actualMeasures.length + 1 + "");
     sortRowsMeta.setCurrentRestructNumber(graphConfiguration.getCurrentRestructNumber());
     sortRowsMeta.setDimensionCount(graphConfiguration.getDimensions().length + "");
@@ -787,9 +787,9 @@ public class GraphGenerator {
         .getDimensionByTableName(carbonDataLoadSchema.getCarbonTable().getFactTableName());
     prepareIsUseInvertedIndex(dimensions, graphConfiguration);
     graphConfiguration
-        .setDimensions(CarbonSchemaParser.getCubeDimensions(dimensions, carbonDataLoadSchema));
+        .setDimensions(CarbonSchemaParser.getTableDimensions(dimensions, carbonDataLoadSchema));
     graphConfiguration
-        .setActualDims(CarbonSchemaParser.getCubeDimensions(dimensions, carbonDataLoadSchema));
+        .setActualDims(CarbonSchemaParser.getTableDimensions(dimensions, carbonDataLoadSchema));
     graphConfiguration
         .setColumnPropertiesString(CarbonSchemaParser.getColumnPropertiesString(dimensions));
     graphConfiguration.setComplexTypeString(CarbonSchemaParser.getComplexTypeString(dimensions));
@@ -860,10 +860,10 @@ public class GraphGenerator {
         .setActualDimensionColumns(CarbonSchemaParser.getActualDimensions(dimensions));
     graphConfiguration
     .setDimensionColumnsDataType(CarbonSchemaParser.getDimensionsDataTypes(dimensions));
-    //graphConfiguration.setNormHiers(CarbonSchemaParser.getNormHiers(cube, schema));
+    //graphConfiguration.setNormHiers(CarbonSchemaParser.getNormHiers(table, schema));
     graphConfiguration.setMeasureDataTypeInfo(CarbonSchemaParser.getMeasuresDataType(measures));
     graphConfiguration.setStoreLocation(
-        this.schemaName + '/' + carbonDataLoadSchema.getCarbonTable().getFactTableName());
+        this.databaseName + '/' + carbonDataLoadSchema.getCarbonTable().getFactTableName());
     graphConfiguration.setBlockletSize(
         (instance.getProperty("com.huawei.unibi.carbon.blocklet.size", DEFAUL_BLOCKLET_SIZE)));
     graphConfiguration.setMaxBlockletInFile(
@@ -919,7 +919,7 @@ public class GraphGenerator {
     return CarbonSchemaParser.QUOTES;
   }
 
-  public CarbonTable getCube() {
+  public CarbonTable getTable() {
     return carbonDataLoadSchema.getCarbonTable();
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/graphgenerator/configuration/GraphConfigurationInfo.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/graphgenerator/configuration/GraphConfigurationInfo.java b/processing/src/main/java/org/carbondata/processing/graphgenerator/configuration/GraphConfigurationInfo.java
index 1b2afa5..750f5b0 100644
--- a/processing/src/main/java/org/carbondata/processing/graphgenerator/configuration/GraphConfigurationInfo.java
+++ b/processing/src/main/java/org/carbondata/processing/graphgenerator/configuration/GraphConfigurationInfo.java
@@ -64,7 +64,7 @@ public class GraphConfigurationInfo {
 
   private String jndiName;
 
-  private Map<String, String> cubeMeasuresAndDataTypeMap;
+  private Map<String, String> tableMeasuresAndDataTypeMap;
 
   private String tableInputSqlQuery;
 
@@ -515,17 +515,17 @@ public class GraphConfigurationInfo {
   }
 
   /**
-   * @return the cubeMeasuresAndDataTypeMap
+   * @return the tableMeasuresAndDataTypeMap
    */
-  public Map<String, String> getCubeMeasuresAndDataTypeMap() {
-    return cubeMeasuresAndDataTypeMap;
+  public Map<String, String> getTableMeasuresAndDataTypeMap() {
+    return tableMeasuresAndDataTypeMap;
   }
 
   /**
-   * @param cubeMeasuresAndDataTypeMap the cubeMeasuresAndDataTypeMap to set
+   * @param tableMeasuresAndDataTypeMap the tableMeasuresAndDataTypeMap to set
    */
-  public void setCubeMeasuresAndDataTypeMap(Map<String, String> cubeMeasuresAndDataTypeMap) {
-    this.cubeMeasuresAndDataTypeMap = cubeMeasuresAndDataTypeMap;
+  public void setTableMeasuresAndDataTypeMap(Map<String, String> tableMeasuresAndDataTypeMap) {
+    this.tableMeasuresAndDataTypeMap = tableMeasuresAndDataTypeMap;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStep.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStep.java b/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStep.java
index 1b89823..619da5e 100644
--- a/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStep.java
+++ b/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStep.java
@@ -252,7 +252,7 @@ public class MDKeyGenStep extends BaseStep {
   private boolean setStepConfiguration() {
     this.tableName = meta.getTableName();
     storeLocation = CarbonDataProcessorUtil
-        .getLocalDataFolderLocation(meta.getSchemaName(), meta.getTableName(),
+        .getLocalDataFolderLocation(meta.getDatabaseName(), meta.getTableName(),
             String.valueOf(meta.getTaskNo()), meta.getPartitionID(), meta.getSegmentId()+"");
     isNoDictionaryDimension =
         RemoveDictionaryUtil.convertStringToBooleanArr(meta.getNoDictionaryDimsMapping());
@@ -301,7 +301,7 @@ public class MDKeyGenStep extends BaseStep {
     }
 
     CarbonTable carbonTable = CarbonMetadata.getInstance()
-        .getCarbonTable(meta.getSchemaName() + CarbonCommonConstants.UNDERSCORE + tableName);
+        .getCarbonTable(meta.getDatabaseName() + CarbonCommonConstants.UNDERSCORE + tableName);
     wrapperColumnSchema = CarbonUtil
         .getColumnSchemaList(carbonTable.getDimensionByTableName(tableName),
             carbonTable.getMeasureByTableName(tableName));
@@ -376,7 +376,7 @@ public class MDKeyGenStep extends BaseStep {
    */
   private CarbonFactDataHandlerModel getCarbonFactDataHandlerModel() {
     CarbonFactDataHandlerModel carbonFactDataHandlerModel = new CarbonFactDataHandlerModel();
-    carbonFactDataHandlerModel.setDatabaseName(meta.getSchemaName());
+    carbonFactDataHandlerModel.setDatabaseName(meta.getDatabaseName());
     carbonFactDataHandlerModel.setTableName(tableName);
     carbonFactDataHandlerModel.setMeasureCount(measureCount);
     carbonFactDataHandlerModel.setMdKeyLength(data.generator.getKeySizeInBytes());
@@ -398,7 +398,7 @@ public class MDKeyGenStep extends BaseStep {
     aggType = new char[measureCount];
     Arrays.fill(aggType, 'n');
     CarbonTable carbonTable = CarbonMetadata.getInstance().getCarbonTable(
-        meta.getSchemaName() + CarbonCommonConstants.UNDERSCORE + meta.getTableName());
+        meta.getDatabaseName() + CarbonCommonConstants.UNDERSCORE + meta.getTableName());
     List<CarbonMeasure> measures = carbonTable.getMeasureByTableName(meta.getTableName());
     for (int i = 0; i < measureCount; i++) {
       aggType[i] = DataTypeUtil.getAggType(measures.get(i).getDataType());
@@ -482,7 +482,7 @@ public class MDKeyGenStep extends BaseStep {
     String carbonStorePath =
         CarbonProperties.getInstance().getProperty(CarbonCommonConstants.STORE_LOCATION_HDFS);
     CarbonTable carbonTable = CarbonMetadata.getInstance().getCarbonTable(
-        meta.getSchemaName() + CarbonCommonConstants.UNDERSCORE + meta.getTableName());
+        meta.getDatabaseName() + CarbonCommonConstants.UNDERSCORE + meta.getTableName());
     CarbonTablePath carbonTablePath =
         CarbonStorePath.getCarbonTablePath(carbonStorePath, carbonTable.getCarbonTableIdentifier());
     String carbonDataDirectoryPath =

http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/84b476bc/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStepMeta.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStepMeta.java b/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStepMeta.java
index 9b65e63..92894c3 100644
--- a/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStepMeta.java
+++ b/processing/src/main/java/org/carbondata/processing/mdkeygen/MDKeyGenStepMeta.java
@@ -66,14 +66,9 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
   private String numberOfCores;
 
   /**
-   * cubeName
+   * databaseName
    */
-  private String cubeName;
-
-  /**
-   * schemaName
-   */
-  private String schemaName;
+  private String databaseName;
 
   /**
    * aggregateLevels
@@ -156,8 +151,8 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
     tableName = "";
     numberOfCores = "";
     aggregateLevels = "";
-    cubeName = "";
-    schemaName = "";
+    tableName = "";
+    databaseName = "";
     columnGroupsString = "";
     noDictionaryDims = "";
     currentRestructNumber = -1;
@@ -175,8 +170,8 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
     retval.append("    ").append(XMLHandler.addTagValue("TableName", tableName));
     retval.append("    ").append(XMLHandler.addTagValue("AggregateLevels", aggregateLevels));
     retval.append("    ").append(XMLHandler.addTagValue("NumberOfCores", numberOfCores));
-    retval.append("    ").append(XMLHandler.addTagValue("cubeName", cubeName));
-    retval.append("    ").append(XMLHandler.addTagValue("schemaName", schemaName));
+    retval.append("    ").append(XMLHandler.addTagValue("tableName", tableName));
+    retval.append("    ").append(XMLHandler.addTagValue("databaseName", databaseName));
     retval.append("    ").append(XMLHandler.addTagValue("noDictionaryDims", noDictionaryDims));
     retval.append("    ").append(XMLHandler.addTagValue("measureCount", measureCount));
     retval.append("    ").append(XMLHandler.addTagValue("dimensionsStoreType", columnGroupsString));
@@ -204,8 +199,8 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
       tableName = XMLHandler.getTagValue(stepnode, "TableName");
       aggregateLevels = XMLHandler.getTagValue(stepnode, "AggregateLevels");
       numberOfCores = XMLHandler.getTagValue(stepnode, "NumberOfCores");
-      schemaName = XMLHandler.getTagValue(stepnode, "schemaName");
-      cubeName = XMLHandler.getTagValue(stepnode, "cubeName");
+      databaseName = XMLHandler.getTagValue(stepnode, "databaseName");
+      tableName = XMLHandler.getTagValue(stepnode, "tableName");
       noDictionaryDims = XMLHandler.getTagValue(stepnode, "noDictionaryDims");
       measureCount = XMLHandler.getTagValue(stepnode, "measureCount");
       columnGroupsString = XMLHandler.getTagValue(stepnode, "dimensionsStoreType");
@@ -232,8 +227,8 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
       rep.saveStepAttribute(idTransformation, idStep, "TableName", tableName);
       rep.saveStepAttribute(idTransformation, idStep, "AggregateLevels", aggregateLevels);
       rep.saveStepAttribute(idTransformation, idStep, "NumberOfCores", numberOfCores);
-      rep.saveStepAttribute(idTransformation, idStep, "schemaName", schemaName);
-      rep.saveStepAttribute(idTransformation, idStep, "cubeName", cubeName);
+      rep.saveStepAttribute(idTransformation, idStep, "databaseName", databaseName);
+      rep.saveStepAttribute(idTransformation, idStep, "tableName", tableName);
       rep.saveStepAttribute(idTransformation, idStep, "noDictionaryDims", noDictionaryDims);
       rep.saveStepAttribute(idTransformation, idStep, "measureCount", measureCount);
       rep.saveStepAttribute(idTransformation, idStep, "dimensionsStoreType", columnGroupsString);
@@ -264,8 +259,8 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
       tableName = rep.getStepAttributeString(idStep, "TableName");
       aggregateLevels = rep.getStepAttributeString(idStep, "AggregateLevels");
       numberOfCores = rep.getStepAttributeString(idStep, "NumberOfCores");
-      schemaName = rep.getStepAttributeString(idStep, "schemaName");
-      cubeName = rep.getStepAttributeString(idStep, "cubeName");
+      databaseName = rep.getStepAttributeString(idStep, "databaseName");
+      tableName = rep.getStepAttributeString(idStep, "tableName");
       noDictionaryDims = rep.getStepAttributeString(idStep, "noDictionaryDims");
       measureCount = rep.getStepAttributeString(idStep, "measureCount");
       columnGroupsString = rep.getStepAttributeString(idStep, "dimensionsStoreType");
@@ -335,31 +330,17 @@ public class MDKeyGenStepMeta extends BaseStepMeta implements StepMetaInterface
   }
 
   /**
-   * @return the cubeName
-   */
-  public String getCubeName() {
-    return cubeName;
-  }
-
-  /**
-   * @param cubeName the cubeName to set
-   */
-  public void setCubeName(String cubeName) {
-    this.cubeName = cubeName;
-  }
-
-  /**
-   * @return the schemaName
+   * @return the databaseName
    */
-  public String getSchemaName() {
-    return schemaName;
+  public String getDatabaseName() {
+    return databaseName;
   }
 
   /**
-   * @param schemaName the schemaName to set
+   * @param databaseName the databaseName to set
    */
-  public void setSchemaName(String schemaName) {
-    this.schemaName = schemaName;
+  public void setDatabaseName(String databaseName) {
+    this.databaseName = databaseName;
   }
 
   /**