You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2018/12/17 14:16:57 UTC

[15/21] carbondata git commit: [CARBONDATA-3002] Fix some spell error

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/main/scala/org/apache/spark/util/TableLoader.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/TableLoader.scala b/integration/spark2/src/main/scala/org/apache/spark/util/TableLoader.scala
index b6667df..efaa191 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/TableLoader.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/TableLoader.scala
@@ -82,7 +82,7 @@ object TableLoader {
 
     val spark = TableAPIUtil.spark(storePath, s"TableLoader: $dbName.$tableName")
 
-    CarbonEnv.getInstance(spark).carbonMetastore.
+    CarbonEnv.getInstance(spark).carbonMetaStore.
       checkSchemasModifiedTimeAndReloadTable(TableIdentifier(tableName, Some(dbName)))
     loadTable(spark, Option(dbName), tableName, inputPaths, map)
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
index 759539b..47b6b67 100644
--- a/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
+++ b/integration/spark2/src/main/spark2.1/org/apache/spark/sql/hive/CarbonSessionState.scala
@@ -164,10 +164,10 @@ class CarbonHiveSessionCatalog(
       carbonDatasourceHadoopRelation: CarbonDatasourceHadoopRelation): Boolean = {
     var isRefreshed = false
     val storePath = CarbonProperties.getStorePath
-    carbonEnv.carbonMetastore.
+    carbonEnv.carbonMetaStore.
       checkSchemasModifiedTimeAndReloadTable(identifier)
 
-    val table = carbonEnv.carbonMetastore.getTableFromMetadataCache(
+    val table = carbonEnv.carbonMetaStore.getTableFromMetadataCache(
       carbonDatasourceHadoopRelation.carbonTable.getDatabaseName,
       carbonDatasourceHadoopRelation.carbonTable.getTableName)
     if (table.isEmpty || (table.isDefined &&

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
index ed5486b..bceb0fc 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
@@ -137,7 +137,7 @@ class AllDictionaryTestCase extends Spark2QueryTest with BeforeAndAfterAll {
       .config("spark.executor.heartbeatInterval", "600s")
       .config("carbon.enable.vector.reader","false")
       .getOrCreateCarbonSession(storeLocation, metastoredb)
-    val catalog = CarbonEnv.getInstance(spark).carbonMetastore
+    val catalog = CarbonEnv.getInstance(spark).carbonMetaStore
     sampleRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
       "sample")(spark).asInstanceOf[CarbonRelation]
     complexRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
index 245ee7c..3c50a18 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/DictionaryLRUCacheTestCase.scala
@@ -39,7 +39,7 @@ class DictionaryLRUCacheTestCase extends Spark2QueryTest with BeforeAndAfterAll
   var path : String = null
 
   def checkDictionaryAccessCount(databaseName: String, tableName: String): Unit = {
-    val carbonTable = CarbonEnv.getInstance(Spark2TestQueryExecutor.spark).carbonMetastore
+    val carbonTable = CarbonEnv.getInstance(Spark2TestQueryExecutor.spark).carbonMetaStore
       .lookupRelation(Option(databaseName), tableName)(Spark2TestQueryExecutor.spark)
       .asInstanceOf[CarbonRelation].carbonTable
     val absoluteTableIdentifier = carbonTable.getAbsoluteTableIdentifier

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
index 69248d6..9607bbc 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
@@ -132,7 +132,7 @@ class ExternalColumnDictionaryTestCase extends Spark2QueryTest with BeforeAndAft
       .config("spark.executor.heartbeatInterval", "600s")
       .config("carbon.enable.vector.reader","false")
       .getOrCreateCarbonSession(storeLocation, metastoredb)
-    val catalog = CarbonEnv.getInstance(spark).carbonMetastore
+    val catalog = CarbonEnv.getInstance(spark).carbonMetaStore
     extComplexRelation = catalog
       .lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
         "extComplextypes")(spark)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
index e3c2d88..5096089 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOpName.scala
@@ -226,7 +226,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
     assertResult("Streaming property value is incorrect")(msg.getMessage)
 
     val identifier = new TableIdentifier("batch_table", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     var server: ServerSocket = null
     try {
@@ -253,7 +253,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
   // input source: file
   test("streaming ingest from file source") {
     val identifier = new TableIdentifier("stream_table_file", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdata").getCanonicalPath
     // streaming ingest 10 rows
@@ -277,7 +277,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
 
   test("test preaggregate table creation on streaming table without handoff") {
     val identifier = new TableIdentifier("agg_table", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     // streaming ingest 10 rows
@@ -297,7 +297,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
   test("test if data is loaded into preaggregate after handoff is fired") {
     createTable(tableName = "agg_table2", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("agg_table2", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     // streaming ingest 10 rows
@@ -358,7 +358,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
   test("test whether data is loaded into preaggregate after handoff is fired") {
     createTable(tableName = "agg_table2", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("agg_table2", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     // streaming ingest 10 rows
@@ -401,7 +401,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
   test("test whether data is loaded into preaggregate before handoff is fired") {
     createTable(tableName = "agg_table2", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("agg_table2", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     // streaming ingest 10 rows
@@ -438,7 +438,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists timeseries_table")
     createTable(tableName = "timeseries_table", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("timeseries_table", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     val thread = createFileStreamingThread(spark, carbonTable, csvDataDir, intervalSecond = 1,
@@ -465,7 +465,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists timeseries_table")
     createTable(tableName = "timeseries_table", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("timeseries_table", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     val thread = createFileStreamingThread(spark, carbonTable, csvDataDir, intervalSecond = 1,
@@ -549,7 +549,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
 
   def loadData() {
     val identifier = new TableIdentifier("agg_table2", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     // streaming ingest 10 rows
@@ -567,7 +567,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists agg_table2")
     createTable(tableName = "agg_table2", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("agg_table2", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdata1").getCanonicalPath
     generateCSVDataFile(spark, idStart = 10, rowNums = 5, csvDataDir)
@@ -598,7 +598,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
   test("test if data is loaded in aggregate table after handoff is done for streaming table") {
     createTable(tableName = "agg_table3", streaming = true, withBatchLoad = false)
     val identifier = new TableIdentifier("agg_table3", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     val csvDataDir = new File("target/csvdatanew").getCanonicalPath
     generateCSVDataFile(spark, idStart = 10, rowNums = 5, csvDataDir)
@@ -1621,7 +1621,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
 
   test("block drop streaming table while streaming is in progress") {
     val identifier = new TableIdentifier("stream_table_drop", Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     var server: ServerSocket = null
     try {
@@ -2420,7 +2420,7 @@ class TestStreamingTableOpName extends QueryTest with BeforeAndAfterAll {
       badRecordsPath: String = CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL
   ): Unit = {
     val identifier = new TableIdentifier(tableName, Option("streaming"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     var server: ServerSocket = null
     try {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
index 21cad07..5d806a3 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableWithRowParser.scala
@@ -816,7 +816,7 @@ class TestStreamingTableWithRowParser extends QueryTest with BeforeAndAfterAll {
       autoHandoff: Boolean = CarbonCommonConstants.ENABLE_AUTO_HANDOFF_DEFAULT.toBoolean
   ): Unit = {
     val identifier = new TableIdentifier(tableName, Option("streaming1"))
-    val carbonTable = CarbonEnv.getInstance(spark).carbonMetastore.lookupRelation(identifier)(spark)
+    val carbonTable = CarbonEnv.getInstance(spark).carbonMetaStore.lookupRelation(identifier)(spark)
       .asInstanceOf[CarbonRelation].metaData.carbonTable
     var server: ServerSocket = null
     try {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/integration/spark2/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
index a0c801a..e4e7d92 100644
--- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
+++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/register/TestRegisterCarbonTable.scala
@@ -67,7 +67,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       checkAnswer(sql("select count(*) from carbontable"), Row(1))
@@ -83,7 +83,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'a',1,'aa','aaa'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       checkAnswer(sql("select count(*) from carbontable"), Row(1))
@@ -103,7 +103,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     backUpData(dblocation, "carbontable")
     backUpData(dblocation, "carbontable_preagg1")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       restoreData(dblocation, "carbontable_preagg1")
       sql("refresh table carbontable")
@@ -126,7 +126,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     backUpData(dblocation, "carbontable")
     backUpData(dblocation, "carbontable_preagg1")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       restoreData(dblocation, "carbontable_preagg1")
       sql("refresh table carbontable")
@@ -149,7 +149,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     backUpData(dblocation, "carbontable")
     backUpData(dblocation, "carbontable_preagg1")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       intercept[ProcessMetaDataException] {
         sql("refresh table carbontable")
@@ -169,7 +169,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'b',1,'bb','bbb'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       // update operation
@@ -195,7 +195,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql(s"LOAD DATA LOCAL INPATH '$testData' into table automerge")
     backUpData(dblocation, "automerge")
     sql("drop table automerge")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "automerge")
       sql("refresh table automerge")
       // update operation
@@ -216,7 +216,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'b',1,'bb','bbb'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       // delete operation
@@ -238,7 +238,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'b',1,'bb','bbb'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable add columns(c4 string) " +
@@ -260,7 +260,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'b',1,'bb','bbb'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable change c2 c2 long")
@@ -281,7 +281,7 @@ class TestRegisterCarbonTable extends QueryTest with BeforeAndAfterAll {
     sql("insert into carbontable select 'b',1,'bb','bbb'")
     backUpData(dblocation, "carbontable")
     sql("drop table carbontable")
-    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetastore.isReadFromHiveMetaStore) {
+    if (!CarbonEnv.getInstance(sqlContext.sparkSession).carbonMetaStore.isReadFromHiveMetaStore) {
       restoreData(dblocation, "carbontable")
       sql("refresh table carbontable")
       sql("Alter table carbontable drop columns(c2)")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/processing/src/main/java/org/apache/carbondata/processing/loading/CarbonDataLoadConfiguration.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/CarbonDataLoadConfiguration.java b/processing/src/main/java/org/apache/carbondata/processing/loading/CarbonDataLoadConfiguration.java
index d3501c7..54dc2d4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/CarbonDataLoadConfiguration.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/CarbonDataLoadConfiguration.java
@@ -116,7 +116,7 @@ public class CarbonDataLoadConfiguration {
   private boolean carbonTransactionalTable;
 
   /**
-   * Flder path to where data should be written for this load.
+   * Folder path to where data should be written for this load.
    */
   private String dataWritePath;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModel.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModel.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModel.java
index aecc52e..71d61db 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModel.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModel.java
@@ -217,7 +217,7 @@ public class CarbonLoadModel implements Serializable {
   private boolean isJsonFileLoad;
 
   /**
-   * Flder path to where data should be written for this load.
+   * Folder path to where data should be written for this load.
    */
   private String dataWritePath;
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/313ba0ea/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
index 64fcaa2..7688415 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
@@ -551,14 +551,14 @@ public final class CarbonLoaderUtil {
    * This method will divide the blocks among the nodes as per the data locality
    *
    * @param blockInfos blocks
-   * @param noOfNodesInput -1 if number of nodes has to be decided
+   * @param numOfNodesInput -1 if number of nodes has to be decided
    *                       based on block location information
    * @param blockAssignmentStrategy strategy used to assign blocks
    * @param expectedMinSizePerNode the property load_min_size_inmb specified by the user
    * @return a map that maps node to blocks
    */
   public static Map<String, List<Distributable>> nodeBlockMapping(
-      List<Distributable> blockInfos, int noOfNodesInput, List<String> activeNodes,
+      List<Distributable> blockInfos, int numOfNodesInput, List<String> activeNodes,
       BlockAssignmentStrategy blockAssignmentStrategy, String expectedMinSizePerNode) {
     ArrayList<NodeMultiBlockRelation> rtnNode2Blocks = new ArrayList<>();
 
@@ -569,9 +569,9 @@ public final class CarbonLoaderUtil {
       nodes.add(relation.getNode());
     }
 
-    int noofNodes = (-1 == noOfNodesInput) ? nodes.size() : noOfNodesInput;
+    int numOfNodes = (-1 == numOfNodesInput) ? nodes.size() : numOfNodesInput;
     if (null != activeNodes) {
-      noofNodes = activeNodes.size();
+      numOfNodes = activeNodes.size();
     }
 
     // calculate the average expected size for each node
@@ -579,7 +579,7 @@ public final class CarbonLoaderUtil {
     long totalFileSize = 0;
     if (BlockAssignmentStrategy.BLOCK_NUM_FIRST == blockAssignmentStrategy) {
       if (blockInfos.size() > 0) {
-        sizePerNode = blockInfos.size() / noofNodes;
+        sizePerNode = blockInfos.size() / numOfNodes;
       }
       sizePerNode = sizePerNode <= 0 ? 1 : sizePerNode;
     } else if (BlockAssignmentStrategy.BLOCK_SIZE_FIRST == blockAssignmentStrategy
@@ -587,7 +587,7 @@ public final class CarbonLoaderUtil {
       for (Distributable blockInfo : uniqueBlocks) {
         totalFileSize += ((TableBlockInfo) blockInfo).getBlockLength();
       }
-      sizePerNode = totalFileSize / noofNodes;
+      sizePerNode = totalFileSize / numOfNodes;
     }
 
     // if enable to control the minimum amount of input data for each node
@@ -1152,7 +1152,7 @@ public final class CarbonLoaderUtil {
    * @return
    * @throws IOException
    */
-  public static String mergeIndexFilesinPartitionedSegment(CarbonTable table, String segmentId,
+  public static String mergeIndexFilesInPartitionedSegment(CarbonTable table, String segmentId,
       String uuid) throws IOException {
     String tablePath = table.getTablePath();
     return new CarbonIndexFileMergeWriter(table)