You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ch...@apache.org on 2016/11/25 01:31:50 UTC
[1/2] incubator-carbondata git commit: change Logging
Repository: incubator-carbondata
Updated Branches:
refs/heads/master 068288d02 -> 75e02caf4
change Logging
Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/a28e605c
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/a28e605c
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/a28e605c
Branch: refs/heads/master
Commit: a28e605ce6136a0858e75d53f2a1325113a19821
Parents: 068288d
Author: jackylk <ja...@huawei.com>
Authored: Fri Nov 25 00:55:59 2016 +0800
Committer: jackylk <ja...@huawei.com>
Committed: Fri Nov 25 00:55:59 2016 +0800
----------------------------------------------------------------------
.../examples/util/AllDictionaryUtil.scala | 13 +-
.../spark/CarbonDataFrameWriter.scala | 8 +-
.../spark/rdd/CarbonCleanFilesRDD.scala | 4 +-
.../spark/rdd/CarbonDataLoadRDD.scala | 22 ++--
.../spark/rdd/CarbonDataRDDFactory.scala | 124 +++++++++----------
.../spark/rdd/CarbonDeleteLoadByDateRDD.scala | 4 +-
.../spark/rdd/CarbonDeleteLoadRDD.scala | 4 +-
.../spark/rdd/CarbonDropTableRDD.scala | 4 +-
.../spark/rdd/CarbonGlobalDictionaryRDD.scala | 8 +-
.../carbondata/spark/rdd/CarbonMergerRDD.scala | 2 +-
.../carbondata/spark/rdd/CarbonScanRDD.scala | 4 +-
.../spark/rdd/NewCarbonDataLoadRDD.scala | 4 +-
.../carbondata/spark/util/CarbonScalaUtil.scala | 7 +-
.../spark/util/GlobalDictionaryUtil.scala | 57 +++++----
.../org/apache/spark/sql/CarbonContext.scala | 4 +-
.../sql/CarbonDatasourceHadoopRelation.scala | 4 +-
.../spark/sql/CarbonDatasourceRelation.scala | 2 +-
.../org/apache/spark/sql/CarbonSqlParser.scala | 4 +-
.../spark/sql/hive/CarbonMetastoreCatalog.scala | 9 +-
.../spark/sql/hive/cli/CarbonSQLCLIDriver.scala | 14 +--
.../scala/org/apache/spark/util/FileUtils.scala | 11 +-
.../blockprune/BlockPruneQueryTestCase.scala | 6 +-
.../dataload/DefaultSourceTestCase.scala | 4 +-
.../TestDataWithDicExcludeAndInclude.scala | 4 +-
.../spark/util/AllDictionaryTestCase.scala | 4 +-
.../AutoHighCardinalityIdentifyTestCase.scala | 4 +-
.../util/ExternalColumnDictionaryTestCase.scala | 8 +-
...GlobalDictionaryUtilConcurrentTestCase.scala | 2 +-
.../util/GlobalDictionaryUtilTestCase.scala | 8 +-
.../spark/sql/common/util/CarbonFunSuite.scala | 10 +-
.../spark/sql/common/util/QueryTest.scala | 5 +-
31 files changed, 186 insertions(+), 182 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/examples/src/main/scala/org/apache/carbondata/examples/util/AllDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/examples/src/main/scala/org/apache/carbondata/examples/util/AllDictionaryUtil.scala b/examples/src/main/scala/org/apache/carbondata/examples/util/AllDictionaryUtil.scala
index bd625f3..6011bcb 100644
--- a/examples/src/main/scala/org/apache/carbondata/examples/util/AllDictionaryUtil.scala
+++ b/examples/src/main/scala/org/apache/carbondata/examples/util/AllDictionaryUtil.scala
@@ -21,12 +21,13 @@ import java.io.DataOutputStream
import scala.collection.mutable.{ArrayBuffer, HashSet}
-import org.apache.spark.Logging
import org.apache.spark.SparkContext
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.datastorage.store.impl.FileFactory
-object AllDictionaryUtil extends Logging{
+object AllDictionaryUtil {
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def extractDictionary(sc: SparkContext,
srcData: String,
outputPath: String,
@@ -50,7 +51,7 @@ object AllDictionaryUtil extends Logging{
result += ((i, tokens(i)))
} catch {
case ex: ArrayIndexOutOfBoundsException =>
- logError("Read a bad record: " + x)
+ LOGGER.error("Read a bad record: " + x)
}
}
}
@@ -75,7 +76,7 @@ object AllDictionaryUtil extends Logging{
}
} catch {
case ex: Exception =>
- logError("Clean dictionary catching exception:" + ex)
+ LOGGER.error("Clean dictionary catching exception:" + ex)
}
}
@@ -93,14 +94,14 @@ object AllDictionaryUtil extends Logging{
}
} catch {
case ex: Exception =>
- logError("Save dictionary to file catching exception:" + ex)
+ LOGGER.error("Save dictionary to file catching exception:" + ex)
} finally {
if (writer != null) {
try {
writer.close()
} catch {
case ex: Exception =>
- logError("Close output stream catching exception:" + ex)
+ LOGGER.error("Close output stream catching exception:" + ex)
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
index 3596393..a1a2ecb 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/CarbonDataFrameWriter.scala
@@ -18,15 +18,17 @@
package org.apache.carbondata.spark
import org.apache.hadoop.fs.Path
-import org.apache.spark.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.execution.command.LoadTable
import org.apache.spark.sql.types._
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.carbon.metadata.datatype.{DataType => CarbonType}
import org.apache.carbondata.core.constants.CarbonCommonConstants
-class CarbonDataFrameWriter(val dataFrame: DataFrame) extends Logging {
+class CarbonDataFrameWriter(val dataFrame: DataFrame) {
+
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def saveAsCarbonFile(parameters: Map[String, String] = Map()): Unit = {
checkContext()
@@ -84,7 +86,7 @@ class CarbonDataFrameWriter(val dataFrame: DataFrame) extends Logging {
size
}
- logInfo(s"temporary CSV file size: ${countSize / 1024 / 1024} MB")
+ LOGGER.info(s"temporary CSV file size: ${countSize / 1024 / 1024} MB")
try {
cc.sql(makeLoadString(tempCSVFolder, options))
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
index 3a5d952..51d65e3 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonCleanFilesRDD.scala
@@ -20,7 +20,7 @@ package org.apache.carbondata.spark.rdd
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -34,7 +34,7 @@ class CarbonCleanFilesRDD[V: ClassTag](
databaseName: String,
tableName: String,
partitioner: Partitioner)
- extends RDD[V](sc, Nil) with Logging {
+ extends RDD[V](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataLoadRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataLoadRDD.scala
index 2a36f30..0b23657 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataLoadRDD.scala
@@ -26,8 +26,7 @@ import java.util.UUID
import scala.collection.JavaConverters._
import scala.util.Random
-import org.apache.spark.{Logging, Partition, SerializableWritable, SparkContext, SparkEnv,
-TaskContext}
+import org.apache.spark.{Partition, SerializableWritable, SparkContext, SparkEnv, TaskContext}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -83,7 +82,8 @@ class SparkPartitionLoader(model: CarbonLoadModel,
storePath: String,
kettleHomePath: String,
loadCount: Int,
- loadMetadataDetails: LoadMetadataDetails) extends Logging {
+ loadMetadataDetails: LoadMetadataDetails) {
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
var storeLocation: String = ""
@@ -129,7 +129,7 @@ class SparkPartitionLoader(model: CarbonLoadModel,
case e: DataLoadingException => if (e.getErrorCode ==
DataProcessorConstants.BAD_REC_FOUND) {
loadMetadataDetails.setLoadStatus(CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS)
- logInfo("Bad Record Found")
+ LOGGER.info("Bad Record Found")
} else {
throw e
}
@@ -142,17 +142,17 @@ class SparkPartitionLoader(model: CarbonLoadModel,
CarbonLoaderUtil.deleteLocalDataLoadFolderLocation(model, isCompaction)
} catch {
case e: Exception =>
- logError("Failed to delete local data", e)
+ LOGGER.error(e, "Failed to delete local data")
}
if (!CarbonCommonConstants.STORE_LOADSTATUS_FAILURE.equals(
loadMetadataDetails.getLoadStatus)) {
if (CarbonCommonConstants.STORE_LOADSTATUS_PARTIAL_SUCCESS
.equals(loadMetadataDetails.getLoadStatus)) {
- logInfo("DataLoad complete")
- logInfo("Data Load partially successful with LoadCount:" + loadCount)
+ LOGGER.info("DataLoad complete")
+ LOGGER.info("Data Load partially successful with LoadCount:" + loadCount)
} else {
- logInfo("DataLoad complete")
- logInfo("Data Loaded successfully with LoadCount:" + loadCount)
+ LOGGER.info("DataLoad complete")
+ LOGGER.info("Data Loaded successfully with LoadCount:" + loadCount)
CarbonTimeStatisticsFactory.getLoadStatisticsInstance.printStatisticsInfo(
model.getPartitionId)
}
@@ -191,7 +191,7 @@ class DataFileLoaderRDD[K, V](
tableCreationTime: Long,
schemaLastUpdatedTime: Long,
blocksGroupBy: Array[(String, Array[BlockDetails])],
- isTableSplitPartition: Boolean) extends RDD[(K, V)](sc, Nil) with Logging {
+ isTableSplitPartition: Boolean) extends RDD[(K, V)](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
@@ -496,7 +496,7 @@ class DataFrameLoaderRDD[K, V](
loadCount: Integer,
tableCreationTime: Long,
schemaLastUpdatedTime: Long,
- prev: RDD[Row]) extends RDD[(K, V)](prev) with Logging {
+ prev: RDD[Row]) extends RDD[(K, V)](prev) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
index 1382efa..4edfa6b 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDataRDDFactory.scala
@@ -63,9 +63,9 @@ import org.apache.carbondata.spark.util.{CarbonQueryUtil, LoadMetadataUtil}
* This is the factory class which can create different RDD depends on user needs.
*
*/
-object CarbonDataRDDFactory extends Logging {
+object CarbonDataRDDFactory {
- val logger = LogServiceFactory.getLogService(CarbonDataRDDFactory.getClass.getName)
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
def mergeCarbonData(
sqlContext: SQLContext,
@@ -114,7 +114,7 @@ object CarbonDataRDDFactory extends Logging {
if (resultMap.nonEmpty) {
if (resultMap.size == 1) {
if (resultMap.contains("")) {
- logError("Delete by Date request is failed")
+ LOGGER.error("Delete by Date request is failed")
sys.error("Delete by Date request is failed, potential causes " +
"Empty store or Invalid column type, For more details please refer logs.")
}
@@ -152,7 +152,7 @@ object CarbonDataRDDFactory extends Logging {
)
try {
if (carbonLock.lockWithRetries()) {
- logInfo("Successfully got the table metadata file lock")
+ LOGGER.info("Successfully got the table metadata file lock")
if (updatedLoadMetadataDetailsList.nonEmpty) {
// TODO: Load Aggregate tables after retention.
}
@@ -167,14 +167,14 @@ object CarbonDataRDDFactory extends Logging {
}
} finally {
if (carbonLock.unlock()) {
- logInfo("unlock the table metadata file successfully")
+ LOGGER.info("unlock the table metadata file successfully")
} else {
- logError("Unable to unlock the metadata lock")
+ LOGGER.error("Unable to unlock the metadata lock")
}
}
} else {
- logError("Delete by Date request is failed")
- logger.audit(s"The delete load by date is failed for $databaseName.$tableName")
+ LOGGER.error("Delete by Date request is failed")
+ LOGGER.audit(s"The delete load by date is failed for $databaseName.$tableName")
sys.error("Delete by Date request is failed, potential causes " +
"Empty store or Invalid column type, For more details please refer logs.")
}
@@ -190,7 +190,7 @@ object CarbonDataRDDFactory extends Logging {
val spaceConsumed = FileUtils.getSpaceOccupied(filePaths)
val blockSize =
hadoopConfiguration.getLongBytes("dfs.blocksize", CarbonCommonConstants.CARBON_256MB)
- logInfo("[Block Distribution]")
+ LOGGER.info("[Block Distribution]")
// calculate new block size to allow use all the parallelism
if (spaceConsumed < defaultParallelism * blockSize) {
var newSplitSize: Long = spaceConsumed / defaultParallelism
@@ -198,8 +198,9 @@ object CarbonDataRDDFactory extends Logging {
newSplitSize = CarbonCommonConstants.CARBON_16MB
}
hadoopConfiguration.set(FileInputFormat.SPLIT_MAXSIZE, newSplitSize.toString)
- logInfo(s"totalInputSpaceConsumed: $spaceConsumed , defaultParallelism: $defaultParallelism")
- logInfo(s"mapreduce.input.fileinputformat.split.maxsize: ${ newSplitSize.toString }")
+ LOGGER.info(s"totalInputSpaceConsumed: $spaceConsumed , " +
+ s"defaultParallelism: $defaultParallelism")
+ LOGGER.info(s"mapreduce.input.fileinputformat.split.maxsize: ${ newSplitSize.toString }")
}
}
@@ -216,7 +217,7 @@ object CarbonDataRDDFactory extends Logging {
compactionType = CompactionType.MINOR_COMPACTION
}
- logger.audit(s"Compaction request received for table " +
+ LOGGER.audit(s"Compaction request received for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
val tableCreationTime = CarbonEnv.getInstance(sqlContext).carbonCatalog
@@ -247,7 +248,7 @@ object CarbonDataRDDFactory extends Logging {
// if any other request comes at this time then it will create a compaction request file.
// so that this will be taken up by the compaction process which is executing.
if (!isConcurrentCompactionAllowed) {
- logger.info("System level compaction lock is enabled.")
+ LOGGER.info("System level compaction lock is enabled.")
handleCompactionForSystemLocking(sqlContext,
carbonLoadModel,
partitioner,
@@ -266,7 +267,7 @@ object CarbonDataRDDFactory extends Logging {
)
if (lock.lockWithRetries()) {
- logger.info("Acquired the compaction lock for table" +
+ LOGGER.info("Acquired the compaction lock for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
try {
startCompactionThreads(sqlContext,
@@ -280,13 +281,13 @@ object CarbonDataRDDFactory extends Logging {
)
} catch {
case e: Exception =>
- logger.error(s"Exception in start compaction thread. ${ e.getMessage }")
+ LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
}
} else {
- logger.audit("Not able to acquire the compaction lock for table " +
+ LOGGER.audit("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
- logger.error(s"Not able to acquire the compaction lock for table" +
+ LOGGER.error(s"Not able to acquire the compaction lock for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
sys.error("Table is already locked for compaction. Please try after some time.")
}
@@ -307,7 +308,7 @@ object CarbonDataRDDFactory extends Logging {
LockUsage.SYSTEMLEVEL_COMPACTION_LOCK
)
if (lock.lockWithRetries()) {
- logger.info(s"Acquired the compaction lock for table ${ carbonLoadModel.getDatabaseName }" +
+ LOGGER.info(s"Acquired the compaction lock for table ${ carbonLoadModel.getDatabaseName }" +
s".${ carbonLoadModel.getTableName }")
try {
startCompactionThreads(sqlContext,
@@ -321,7 +322,7 @@ object CarbonDataRDDFactory extends Logging {
)
} catch {
case e: Exception =>
- logger.error(s"Exception in start compaction thread. ${ e.getMessage }")
+ LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
// if the compaction is a blocking call then only need to throw the exception.
if (compactionModel.isDDLTrigger) {
@@ -329,9 +330,9 @@ object CarbonDataRDDFactory extends Logging {
}
}
} else {
- logger.audit("Not able to acquire the system level compaction lock for table " +
+ LOGGER.audit("Not able to acquire the system level compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
- logger.error("Not able to acquire the compaction lock for table " +
+ LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
CarbonCompactionUtil
.createCompactionRequiredFile(carbonTable.getMetaDataFilepath, compactionType)
@@ -341,7 +342,7 @@ object CarbonDataRDDFactory extends Logging {
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
" is in queue.")
} else {
- logger.error("Compaction is in progress, compaction request for table " +
+ LOGGER.error("Compaction is in progress, compaction request for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
" is in queue.")
}
@@ -398,7 +399,7 @@ object CarbonDataRDDFactory extends Logging {
)
} catch {
case e: Exception =>
- logger.error(s"Exception in compaction thread ${ e.getMessage }")
+ LOGGER.error(s"Exception in compaction thread ${ e.getMessage }")
throw e
}
@@ -443,7 +444,7 @@ object CarbonDataRDDFactory extends Logging {
storeLocation: String): Unit = {
loadsToMerge.asScala.foreach(seg => {
- logger.info("loads identified for merge is " + seg.getLoadName)
+ LOGGER.info("loads identified for merge is " + seg.getLoadName)
}
)
@@ -484,7 +485,7 @@ object CarbonDataRDDFactory extends Logging {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, true)
} catch {
case e: Exception =>
- logger.error(s"Exception in compaction thread while clean up of stale segments" +
+ LOGGER.error(s"Exception in compaction thread while clean up of stale segments" +
s" ${ e.getMessage }")
}
@@ -505,7 +506,7 @@ object CarbonDataRDDFactory extends Logging {
triggeredCompactionStatus = true
} catch {
case e: Exception =>
- logger.error(s"Exception in compaction thread ${ e.getMessage }")
+ LOGGER.error(s"Exception in compaction thread ${ e.getMessage }")
exception = e
}
// continue in case of exception also, check for all the tables.
@@ -515,14 +516,14 @@ object CarbonDataRDDFactory extends Logging {
).equalsIgnoreCase("true")
if (!isConcurrentCompactionAllowed) {
- logger.info("System level compaction lock is enabled.")
+ LOGGER.info("System level compaction lock is enabled.")
val skipCompactionTables = ListBuffer[CarbonTableIdentifier]()
var tableForCompaction = CarbonCompactionUtil
.getNextTableToCompact(CarbonEnv.getInstance(sqlContext).carbonCatalog.metadata
.tablesMeta.toArray, skipCompactionTables.toList.asJava
)
while (null != tableForCompaction) {
- logger.info("Compaction request has been identified for table " +
+ LOGGER.info("Compaction request has been identified for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
val table: CarbonTable = tableForCompaction.carbonTable
@@ -555,7 +556,7 @@ object CarbonDataRDDFactory extends Logging {
)
} catch {
case e: Exception =>
- logger.error("Exception in compaction thread for table " +
+ LOGGER.error("Exception in compaction thread for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
// not handling the exception. only logging as this is not the table triggered
@@ -567,7 +568,7 @@ object CarbonDataRDDFactory extends Logging {
// if the compaction request file is not been able to delete then
// add those tables details to the skip list so that it wont be considered next.
skipCompactionTables.+=:(tableForCompaction.carbonTableIdentifier)
- logger.error("Compaction request file can not be deleted for table " +
+ LOGGER.error("Compaction request file can not be deleted for table " +
s"${ tableForCompaction.carbonTable.getDatabaseName }." +
s"${ tableForCompaction.carbonTableIdentifier.getTableName }")
}
@@ -621,7 +622,7 @@ object CarbonDataRDDFactory extends Logging {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, true)
} catch {
case e: Exception =>
- logger.error(s"Exception in compaction thread while clean up of stale segments" +
+ LOGGER.error(s"Exception in compaction thread while clean up of stale segments" +
s" ${ e.getMessage }")
}
}
@@ -639,10 +640,10 @@ object CarbonDataRDDFactory extends Logging {
val isAgg = false
// for handling of the segment Merging.
def handleSegmentMerging(tableCreationTime: Long): Unit = {
- logger.info(s"compaction need status is" +
+ LOGGER.info(s"compaction need status is" +
s" ${ CarbonDataMergerUtil.checkIfAutoLoadMergingRequired() }")
if (CarbonDataMergerUtil.checkIfAutoLoadMergingRequired()) {
- logger.audit(s"Compaction request received for table " +
+ LOGGER.audit(s"Compaction request received for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
val compactionSize = 0
val isCompactionTriggerByDDl = false
@@ -687,7 +688,7 @@ object CarbonDataRDDFactory extends Logging {
)
if (lock.lockWithRetries()) {
- logger.info("Acquired the compaction lock.")
+ LOGGER.info("Acquired the compaction lock.")
try {
startCompactionThreads(sqlContext,
carbonLoadModel,
@@ -700,17 +701,17 @@ object CarbonDataRDDFactory extends Logging {
)
} catch {
case e: Exception =>
- logger.error(s"Exception in start compaction thread. ${ e.getMessage }")
+ LOGGER.error(s"Exception in start compaction thread. ${ e.getMessage }")
lock.unlock()
throw e
}
} else {
- logger.audit("Not able to acquire the compaction lock for table " +
+ LOGGER.audit("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
}")
- logger.error("Not able to acquire the compaction lock for table " +
+ LOGGER.error("Not able to acquire the compaction lock for table " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
@@ -721,10 +722,10 @@ object CarbonDataRDDFactory extends Logging {
}
try {
- logger.audit(s"Data load request has been received for table" +
+ LOGGER.audit(s"Data load request has been received for table" +
s" ${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
if (!useKettle) {
- logger.audit("Data is loading with New Data Flow for table " +
+ LOGGER.audit("Data is loading with New Data Flow for table " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
}
// Check if any load need to be deleted before loading new data
@@ -762,7 +763,7 @@ object CarbonDataRDDFactory extends Logging {
CarbonLoaderUtil.deletePartialLoadDataIfExist(carbonLoadModel, false)
} catch {
case e: Exception =>
- logger
+ LOGGER
.error(s"Exception in data load while clean up of stale segments ${ e.getMessage }")
}
@@ -876,12 +877,9 @@ object CarbonDataRDDFactory extends Logging {
.nodeBlockMapping(blockList.toSeq.asJava, -1, activeNodes.toList.asJava).asScala
.toSeq
val timeElapsed: Long = System.currentTimeMillis - startTime
- logInfo("Total Time taken in block allocation: " + timeElapsed)
- logInfo(s"Total no of blocks: ${ blockList.length }, No.of Nodes: ${
- nodeBlockMapping
- .size
- }"
- )
+ LOGGER.info("Total Time taken in block allocation: " + timeElapsed)
+ LOGGER.info(s"Total no of blocks: ${ blockList.length }, " +
+ s"No.of Nodes: ${nodeBlockMapping.size}")
var str = ""
nodeBlockMapping.foreach(entry => {
val tableBlock = entry._2
@@ -897,7 +895,7 @@ object CarbonDataRDDFactory extends Logging {
str = str + "\n"
}
)
- logInfo(str)
+ LOGGER.info(str)
blocksGroupBy = nodeBlockMapping.map(entry => {
val blockDetailsList =
entry._2.asScala.map(distributable => {
@@ -1009,17 +1007,17 @@ object CarbonDataRDDFactory extends Logging {
executorMessage = ex.getCause.getMessage
errorMessage = errorMessage + ": " + executorMessage
}
- logInfo(errorMessage)
- logger.error(ex)
+ LOGGER.info(errorMessage)
+ LOGGER.error(ex)
}
if (loadStatus == CarbonCommonConstants.STORE_LOADSTATUS_FAILURE) {
- logInfo("********starting clean up**********")
+ LOGGER.info("********starting clean up**********")
CarbonLoaderUtil.deleteSegment(carbonLoadModel, currentLoadCount)
- logInfo("********clean up done**********")
- logger.audit(s"Data load is failed for " +
+ LOGGER.info("********clean up done**********")
+ LOGGER.audit(s"Data load is failed for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
- logWarning("Cannot write load metadata file as data load failed")
+ LOGGER.warn("Cannot write load metadata file as data load failed")
throw new Exception(errorMessage)
} else {
val metadataDetails = status(0)._2
@@ -1028,19 +1026,19 @@ object CarbonDataRDDFactory extends Logging {
carbonLoadModel, loadStatus, loadStartTime)
if (!status) {
val errorMessage = "Dataload failed due to failure in table status updation."
- logger.audit("Data load is failed for " +
+ LOGGER.audit("Data load is failed for " +
s"${ carbonLoadModel.getDatabaseName }.${
carbonLoadModel
.getTableName
}")
- logger.error("Dataload failed due to failure in table status updation.")
+ LOGGER.error("Dataload failed due to failure in table status updation.")
throw new Exception(errorMessage)
}
} else if (!carbonLoadModel.isRetentionRequest) {
// TODO : Handle it
- logInfo("********Database updated**********")
+ LOGGER.info("********Database updated**********")
}
- logger.audit("Data load is successful for " +
+ LOGGER.audit("Data load is successful for " +
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }")
try {
// compaction handling
@@ -1088,7 +1086,7 @@ object CarbonDataRDDFactory extends Logging {
try {
// Update load metadate file after cleaning deleted nodes
if (carbonTableStatusLock.lockWithRetries()) {
- logger.info("Table status lock has been successfully acquired.")
+ LOGGER.info("Table status lock has been successfully acquired.")
// read latest table status again.
val latestMetadata = segmentStatusManager.readLoadMetadata(loadMetadataFilePath)
@@ -1108,8 +1106,8 @@ object CarbonDataRDDFactory extends Logging {
s"${ carbonLoadModel.getTableName }" +
". Not able to acquire the table status lock due to other operation " +
"running in the background."
- logger.audit(errorMsg)
- logger.error(errorMsg)
+ LOGGER.audit(errorMsg)
+ LOGGER.error(errorMsg)
throw new Exception(errorMsg + " Please try after some time.")
}
@@ -1143,7 +1141,7 @@ object CarbonDataRDDFactory extends Logging {
)
try {
if (carbonCleanFilesLock.lockWithRetries()) {
- logger.info("Clean files lock has been successfully acquired.")
+ LOGGER.info("Clean files lock has been successfully acquired.")
deleteLoadsAndUpdateMetadata(carbonLoadModel,
table,
partitioner,
@@ -1154,8 +1152,8 @@ object CarbonDataRDDFactory extends Logging {
s"${ carbonLoadModel.getDatabaseName }.${ carbonLoadModel.getTableName }" +
". Not able to acquire the clean files lock due to another clean files " +
"operation is running in the background."
- logger.audit(errorMsg)
- logger.error(errorMsg)
+ LOGGER.audit(errorMsg)
+ LOGGER.error(errorMsg)
throw new Exception(errorMsg + " Please try after some time.")
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadByDateRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadByDateRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadByDateRDD.scala
index 17b487c..15f9b44 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadByDateRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadByDateRDD.scala
@@ -19,7 +19,7 @@ package org.apache.carbondata.spark.rdd
import scala.collection.JavaConverters._
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -42,7 +42,7 @@ class CarbonDeleteLoadByDateRDD[K, V](
dimTableName: String,
storePath: String,
loadMetadataDetails: List[LoadMetadataDetails])
- extends RDD[(K, V)](sc, Nil) with Logging {
+ extends RDD[(K, V)](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadRDD.scala
index 57bf124..a78c67b 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDeleteLoadRDD.scala
@@ -20,7 +20,7 @@ package org.apache.carbondata.spark.rdd
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -34,7 +34,7 @@ class CarbonDeleteLoadRDD[V: ClassTag](
databaseName: String,
tableName: String,
partitioner: Partitioner)
- extends RDD[V](sc, Nil) with Logging {
+ extends RDD[V](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
override def getPartitions: Array[Partition] = {
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropTableRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropTableRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropTableRDD.scala
index 295ff61..abdeaf5 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropTableRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonDropTableRDD.scala
@@ -19,7 +19,7 @@ package org.apache.carbondata.spark.rdd
import scala.reflect.ClassTag
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -32,7 +32,7 @@ class CarbonDropTableRDD[V: ClassTag](
databaseName: String,
tableName: String,
partitioner: Partitioner)
- extends RDD[V](sc, Nil) with Logging {
+ extends RDD[V](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
index bce4eb2..ced45b7 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonGlobalDictionaryRDD.scala
@@ -171,7 +171,7 @@ case class ColumnDistinctValues(values: Array[String], rowCount: Long) extends S
class CarbonAllDictionaryCombineRDD(
prev: RDD[(String, Iterable[String])],
model: DictionaryLoadModel)
- extends RDD[(Int, ColumnDistinctValues)](prev) with Logging {
+ extends RDD[(Int, ColumnDistinctValues)](prev) {
override def getPartitions: Array[Partition] = {
firstParent[(String, Iterable[String])].partitions
@@ -235,7 +235,7 @@ class CarbonAllDictionaryCombineRDD(
class CarbonBlockDistinctValuesCombineRDD(
prev: RDD[Row],
model: DictionaryLoadModel)
- extends RDD[(Int, ColumnDistinctValues)](prev) with Logging {
+ extends RDD[(Int, ColumnDistinctValues)](prev) {
override def getPartitions: Array[Partition] = firstParent[Row].partitions
@@ -285,7 +285,7 @@ class CarbonBlockDistinctValuesCombineRDD(
class CarbonGlobalDictionaryGenerateRDD(
prev: RDD[(Int, ColumnDistinctValues)],
model: DictionaryLoadModel)
- extends RDD[(Int, String, Boolean)](prev) with Logging {
+ extends RDD[(Int, String, Boolean)](prev) {
override def getPartitions: Array[Partition] = firstParent[(Int, ColumnDistinctValues)].partitions
@@ -479,7 +479,7 @@ class CarbonColumnDictGenerateRDD(carbonLoadModel: CarbonLoadModel,
dimensions: Array[CarbonDimension],
hdfsLocation: String,
dictFolderPath: String)
- extends RDD[(Int, ColumnDistinctValues)](sparkContext, Nil) with Logging {
+ extends RDD[(Int, ColumnDistinctValues)](sparkContext, Nil) {
override def getPartitions: Array[Partition] = {
val primDimensions = dictionaryLoadModel.primDimensions
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index 1a7aaf6..6c2e993 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -56,7 +56,7 @@ class CarbonMergerRDD[K, V](
carbonLoadModel: CarbonLoadModel,
carbonMergerMapping: CarbonMergerMapping,
confExecutorsTemp: String)
- extends RDD[(K, V)](sc, Nil) with Logging {
+ extends RDD[(K, V)](sc, Nil) {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
sc.setLocalProperty("spark.job.interruptOnCancel", "true")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index d56b00f..7798e5c 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -25,7 +25,7 @@ import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.InputSplit
import org.apache.hadoop.mapreduce.Job
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.hive.DistributionUtil
@@ -76,7 +76,7 @@ class CarbonScanRDD[V: ClassTag](
tableCreationTime: Long,
schemaLastUpdatedTime: Long,
baseStoreLocation: String)
- extends RDD[V](sc, Nil) with Logging {
+ extends RDD[V](sc, Nil) {
override def getPartitions: Array[Partition] = {
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 6affe66..86c12f8 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -24,7 +24,7 @@ import java.util.{Date, UUID}
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
-import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
+import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.mapred.{CarbonHadoopMapReduceUtil, CarbonSerializableConfiguration}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.execution.command.Partitioner
@@ -55,7 +55,7 @@ class NewCarbonDataLoadRDD[K, V](
loadCount: Integer,
blocksGroupBy: Array[(String, Array[BlockDetails])],
isTableSplitPartition: Boolean)
- extends RDD[(K, V)](sc, Nil) with CarbonHadoopMapReduceUtil with Logging {
+ extends RDD[(K, V)](sc, Nil) with CarbonHadoopMapReduceUtil {
sc.setLocalProperty("spark.scheduler.pool", "DDL")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
index 028c52f..afc6cc5 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
@@ -21,11 +21,11 @@ import java.io.File
import scala.collection.JavaConverters._
-import org.apache.spark.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.hive.{CarbonMetaData, DictionaryMap}
import org.apache.spark.sql.types._
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.carbon.metadata.datatype.{DataType => CarbonDataType}
import org.apache.carbondata.core.carbon.metadata.encoder.Encoding
import org.apache.carbondata.core.carbon.metadata.schema.table.CarbonTable
@@ -33,7 +33,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastorage.store.impl.FileFactory
import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
-object CarbonScalaUtil extends Logging {
+object CarbonScalaUtil {
def convertSparkToCarbonDataType(
dataType: org.apache.spark.sql.types.DataType): CarbonDataType = {
dataType match {
@@ -149,7 +149,8 @@ object CarbonScalaUtil extends Logging {
// find the kettle home under the previous folder
// e.g:file:/srv/bigdata/install/spark/sparkJdbc/carbonlib/cabonplugins
kettleHomePath = carbonLibPath + File.separator + CarbonCommonConstants.KETTLE_HOME_NAME
- logInfo(s"carbon.kettle.home path is not exists, reset it as $kettleHomePath")
+ val logger = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+ logger.error(s"carbon.kettle.home path is not exists, reset it as $kettleHomePath")
val newKettleHomeFileType = FileFactory.getFileType(kettleHomePath)
val newKettleHomeFile = FileFactory.getCarbonFile(kettleHomePath, newKettleHomeFileType)
// check if the found kettle home exists
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
index db01367..f17c62b 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala
@@ -27,13 +27,14 @@ import scala.language.implicitConversions
import scala.util.control.Breaks.{break, breakable}
import org.apache.commons.lang3.{ArrayUtils, StringUtils}
-import org.apache.spark.{Accumulator, Logging}
+import org.apache.spark.Accumulator
import org.apache.spark.rdd.RDD
import org.apache.spark.sql._
import org.apache.spark.sql.hive.CarbonMetastoreCatalog
import org.apache.spark.util.FileUtils
import org.apache.carbondata.common.factory.CarbonCommonFactory
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.dictionary.Dictionary
import org.apache.carbondata.core.carbon.{CarbonDataLoadSchema, CarbonTableIdentifier}
import org.apache.carbondata.core.carbon.metadata.datatype.DataType
@@ -54,7 +55,8 @@ import org.apache.carbondata.spark.rdd._
/**
* A object which provide a method to generate global dictionary from CSV files.
*/
-object GlobalDictionaryUtil extends Logging {
+object GlobalDictionaryUtil {
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* The default separator to use if none is supplied to the constructor.
@@ -436,7 +438,7 @@ object GlobalDictionaryUtil extends Logging {
val columnName = model.primDimensions(x._1).getColName
if (CarbonCommonConstants.STORE_LOADSTATUS_FAILURE.equals(x._2)) {
result = true
- logError(s"table:$tableName column:$columnName generate global dictionary file failed")
+ LOGGER.error(s"table:$tableName column:$columnName generate global dictionary file failed")
}
if (x._3) {
noDictionaryColumns += model.primDimensions(x._1)
@@ -446,10 +448,10 @@ object GlobalDictionaryUtil extends Logging {
updateTableMetadata(carbonLoadModel, sqlContext, model, noDictionaryColumns.toArray)
}
if (result) {
- logError("generate global dictionary files failed")
+ LOGGER.error("generate global dictionary files failed")
throw new Exception("Failed to generate global dictionary files")
} else {
- logInfo("generate global dictionary successfully")
+ LOGGER.info("generate global dictionary successfully")
}
}
@@ -469,8 +471,8 @@ object GlobalDictionaryUtil extends Logging {
val colPathMapTrim = colPathMap.trim
val colNameWithPath = colPathMapTrim.split(":")
if (colNameWithPath.length == 1) {
- logError("the format of external column dictionary should be " +
- "columnName:columnPath, please check")
+ LOGGER.error("the format of external column dictionary should be " +
+ "columnName:columnPath, please check")
throw new DataLoadingException("the format of predefined column dictionary" +
" should be columnName:columnPath, please check")
}
@@ -506,8 +508,8 @@ object GlobalDictionaryUtil extends Logging {
val preDictDimensionOption = dimensions.filter(
_.getColName.equalsIgnoreCase(dimParent))
if (preDictDimensionOption.length == 0) {
- logError(s"Column $dimParent is not a key column " +
- s"in ${ table.getDatabaseName }.${ table.getTableName }")
+ LOGGER.error(s"Column $dimParent is not a key column " +
+ s"in ${ table.getDatabaseName }.${ table.getTableName }")
throw new DataLoadingException(s"Column $dimParent is not a key column. " +
s"Only key column can be part of dictionary " +
s"and used in COLUMNDICT option.")
@@ -607,7 +609,7 @@ object GlobalDictionaryUtil extends Logging {
var value: String = ""
// such as "," , "", throw ex
if (tokens.isEmpty) {
- logError("Read a bad dictionary record: " + x)
+ LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
} else if (tokens.size == 1) {
// such as "1", "jone", throw ex
@@ -618,7 +620,7 @@ object GlobalDictionaryUtil extends Logging {
columnName = csvFileColumns(tokens(0).toInt)
} catch {
case ex: Exception =>
- logError("Read a bad dictionary record: " + x)
+ LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
}
}
@@ -628,7 +630,7 @@ object GlobalDictionaryUtil extends Logging {
value = tokens(1)
} catch {
case ex: Exception =>
- logError("Read a bad dictionary record: " + x)
+ LOGGER.error("Read a bad dictionary record: " + x)
accum += 1
}
}
@@ -662,7 +664,7 @@ object GlobalDictionaryUtil extends Logging {
.filter(x => requireColumnsList.contains(x._1))
} catch {
case ex: Exception =>
- logError("Read dictionary files failed. Caused by: " + ex.getMessage)
+ LOGGER.error("Read dictionary files failed. Caused by: " + ex.getMessage)
throw ex
}
allDictionaryRdd
@@ -686,8 +688,8 @@ object GlobalDictionaryUtil extends Logging {
file.getName.endsWith(dictExt) && file.getSize > 0)) {
true
} else {
- logWarning("No dictionary files found or empty dictionary files! " +
- "Won't generate new dictionary.")
+ LOGGER.warn("No dictionary files found or empty dictionary files! " +
+ "Won't generate new dictionary.")
false
}
} else {
@@ -699,8 +701,8 @@ object GlobalDictionaryUtil extends Logging {
if (filePath.getSize > 0) {
true
} else {
- logWarning("No dictionary files found or empty dictionary files! " +
- "Won't generate new dictionary.")
+ LOGGER.warn("No dictionary files found or empty dictionary files! " +
+ "Won't generate new dictionary.")
false
}
} else {
@@ -729,7 +731,7 @@ object GlobalDictionaryUtil extends Logging {
}
headers = readLine.toLowerCase().split(delimiter)
} else {
- logError("Not found file header! Please set fileheader")
+ LOGGER.error("Not found file header! Please set fileheader")
throw new IOException("Failed to get file header")
}
headers
@@ -746,8 +748,8 @@ object GlobalDictionaryUtil extends Logging {
storePath: String,
dataFrame: Option[DataFrame] = None): Unit = {
try {
- var carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
- var carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier
+ val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+ val carbonTableIdentifier = carbonTable.getAbsoluteTableIdentifier.getCarbonTableIdentifier
// create dictionary folder if not exists
val carbonTablePath = CarbonStorePath.getCarbonTablePath(storePath, carbonTableIdentifier)
val dictfolderPath = carbonTablePath.getMetadataDirectoryPath
@@ -759,7 +761,7 @@ object GlobalDictionaryUtil extends Logging {
val allDictionaryPath = carbonLoadModel.getAllDictPath
if (StringUtils.isEmpty(allDictionaryPath)) {
- logInfo("Generate global dictionary from source data files!")
+ LOGGER.info("Generate global dictionary from source data files!")
// load data by using dataSource com.databricks.spark.csv
var df = if (dataFrame.isDefined) {
dataFrame.get
@@ -782,7 +784,7 @@ object GlobalDictionaryUtil extends Logging {
val msg = "The number of columns in the file header do not match the " +
"number of columns in the data file; Either delimiter " +
"or fileheader provided is not correct"
- logError(msg)
+ LOGGER.error(msg)
throw new DataLoadingException(msg)
}
// use fact file to generate global dict
@@ -801,7 +803,7 @@ object GlobalDictionaryUtil extends Logging {
// check result status
checkStatus(carbonLoadModel, sqlContext, model, statusList)
} else {
- logInfo("No column found for generating global dictionary in source data files")
+ LOGGER.info("No column found for generating global dictionary in source data files")
}
// generate global dict from dimension file
if (carbonLoadModel.getDimFolderPath != null) {
@@ -823,12 +825,13 @@ object GlobalDictionaryUtil extends Logging {
inputRDDforDim, modelforDim).collect()
checkStatus(carbonLoadModel, sqlContext, modelforDim, statusListforDim)
} else {
- logInfo(s"No columns in dimension table $dimTableName to generate global dictionary")
+ LOGGER.info(s"No columns in dimension table $dimTableName " +
+ "to generate global dictionary")
}
}
}
} else {
- logInfo("Generate global dictionary from dictionary files!")
+ LOGGER.info("Generate global dictionary from dictionary files!")
val isNonempty = validateAllDictionaryPath(allDictionaryPath)
if (isNonempty) {
var headers = if (StringUtils.isEmpty(carbonLoadModel.getCsvHeader)) {
@@ -860,13 +863,13 @@ object GlobalDictionaryUtil extends Logging {
"not in correct format!")
}
} else {
- logInfo("have no column need to generate global dictionary")
+ LOGGER.info("have no column need to generate global dictionary")
}
}
}
} catch {
case ex: Exception =>
- logError("generate global dictionary failed", ex)
+ LOGGER.error(ex, "generate global dictionary failed")
throw ex
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
index a792eca..5d2221d 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonContext.scala
@@ -21,7 +21,7 @@ import java.io.File
import scala.language.implicitConversions
-import org.apache.spark.{Logging, SparkContext}
+import org.apache.spark.SparkContext
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.sql.catalyst.ParserDialect
import org.apache.spark.sql.catalyst.analysis.{Analyzer, OverrideCatalog}
@@ -41,7 +41,7 @@ import org.apache.carbondata.spark.rdd.CarbonDataFrameRDD
class CarbonContext(
val sc: SparkContext,
val storePath: String,
- metaStorePath: String) extends HiveContext(sc) with Logging {
+ metaStorePath: String) extends HiveContext(sc) {
self =>
def this(sc: SparkContext) = {
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 2888cb3..c9d2a0f 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -132,9 +132,7 @@ class CarbonHadoopFSRDD[V: ClassTag](
identifier: AbsoluteTableIdentifier,
inputFormatClass: Class[_ <: CarbonInputFormat[V]],
valueClass: Class[V])
- extends RDD[V](sc, Nil)
- with SparkHadoopMapReduceUtil
- with Logging {
+ extends RDD[V](sc, Nil) with SparkHadoopMapReduceUtil {
private val jobTrackerId: String = {
val formatter = new SimpleDateFormat("yyyyMMddHHmm")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
index 3441777..069e106 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonDatasourceRelation.scala
@@ -132,7 +132,7 @@ private[sql] case class CarbonDatasourceRelation(
tableIdentifier: TableIdentifier,
alias: Option[String])
(@transient context: SQLContext)
- extends BaseRelation with Serializable with Logging {
+ extends BaseRelation with Serializable {
lazy val carbonRelation: CarbonRelation = {
CarbonEnv.getInstance(context)
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
index f9a0a9d..724ec6e 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/CarbonSqlParser.scala
@@ -27,7 +27,6 @@ import scala.util.matching.Regex
import org.apache.hadoop.hive.ql.lib.Node
import org.apache.hadoop.hive.ql.parse._
-import org.apache.spark.Logging
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._
import org.apache.spark.sql.catalyst.analysis._
@@ -48,8 +47,7 @@ import org.apache.carbondata.spark.util.CommonUtil
/**
* Parser for All Carbon DDL, DML cases in Unified context
*/
-class CarbonSqlParser()
- extends AbstractSparkSQLParser with Logging {
+class CarbonSqlParser() extends AbstractSparkSQLParser {
val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
protected val AGGREGATE = carbonKeyWord("AGGREGATE")
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
index 3fe62cc..3ddbb41 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonMetastoreCatalog.scala
@@ -100,12 +100,10 @@ case class DictionaryMap(dictionaryMap: Map[String, Boolean]) {
}
class CarbonMetastoreCatalog(hiveContext: HiveContext, val storePath: String,
- client: ClientInterface, queryId: String)
- extends HiveMetastoreCatalog(client, hiveContext)
- with spark.Logging {
+ client: ClientInterface, queryId: String) extends HiveMetastoreCatalog(client, hiveContext) {
- @transient val LOGGER = LogServiceFactory
- .getLogService("org.apache.spark.sql.CarbonMetastoreCatalog")
+ @transient
+ val LOGGER = LogServiceFactory.getLogService("org.apache.spark.sql.CarbonMetastoreCatalog")
val tableModifiedTimeStore = new java.util.HashMap[String, Long]()
tableModifiedTimeStore
@@ -113,7 +111,6 @@ class CarbonMetastoreCatalog(hiveContext: HiveContext, val storePath: String,
val metadata = loadMetadata(storePath)
-
def getTableCreationTime(databaseName: String, tableName: String): Long = {
val tableMeta = metadata.tablesMeta.filter(
c => c.carbonTableIdentifier.getDatabaseName.equalsIgnoreCase(databaseName) &&
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/sql/hive/cli/CarbonSQLCLIDriver.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/hive/cli/CarbonSQLCLIDriver.scala b/integration/spark/src/main/scala/org/apache/spark/sql/hive/cli/CarbonSQLCLIDriver.scala
index 8fbf0cf..ad70027 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/hive/cli/CarbonSQLCLIDriver.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/hive/cli/CarbonSQLCLIDriver.scala
@@ -20,15 +20,18 @@ import java.io.File
import scala.collection.JavaConverters._
-import org.apache.spark.{Logging, SparkConf, SparkContext}
+import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.scheduler.StatsReportListener
import org.apache.spark.sql.CarbonContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.thriftserver.{SparkSQLCLIDriver, SparkSQLEnv}
import org.apache.spark.util.Utils
-object CarbonSQLCLIDriver extends Logging {
+import org.apache.carbondata.common.logging.LogServiceFactory
+object CarbonSQLCLIDriver {
+
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
var hiveContext: HiveContext = _
var sparkContext: SparkContext = _
@@ -71,11 +74,8 @@ object CarbonSQLCLIDriver extends Logging {
store.getCanonicalPath)
hiveContext.setConf("spark.sql.hive.version", HiveContext.hiveExecutionVersion)
-
- if (log.isDebugEnabled) {
- hiveContext.hiveconf.getAllProperties.asScala.toSeq.sorted.foreach { case (k, v) =>
- logDebug(s"HiveConf var: $k=$v")
- }
+ hiveContext.hiveconf.getAllProperties.asScala.toSeq.sorted.foreach { case (k, v) =>
+ LOGGER.debug(s"HiveConf var: $k=$v")
}
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/main/scala/org/apache/spark/util/FileUtils.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/main/scala/org/apache/spark/util/FileUtils.scala b/integration/spark/src/main/scala/org/apache/spark/util/FileUtils.scala
index e755b2e..387d1ef 100644
--- a/integration/spark/src/main/scala/org/apache/spark/util/FileUtils.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/util/FileUtils.scala
@@ -17,14 +17,13 @@
package org.apache.spark.util
-import org.apache.spark.Logging
-
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastorage.store.filesystem.CarbonFile
import org.apache.carbondata.core.datastorage.store.impl.FileFactory
import org.apache.carbondata.processing.etl.DataLoadingException
-object FileUtils extends Logging {
+object FileUtils {
/**
* append all csv file path to a String, file path separated by comma
*/
@@ -38,10 +37,12 @@ object FileUtils extends Logging {
val path = carbonFile.getAbsolutePath
val fileName = carbonFile.getName
if (carbonFile.getSize == 0) {
- logWarning(s"skip empty input file: $path")
+ LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+ .warn(s"skip empty input file: $path")
} else if (fileName.startsWith(CarbonCommonConstants.UNDERSCORE) ||
fileName.startsWith(CarbonCommonConstants.POINT)) {
- logWarning(s"skip invisible input file: $path")
+ LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+ .warn(s"skip invisible input file: $path")
} else {
stringBuild.append(path.replace('\\', '/')).append(CarbonCommonConstants.COMMA)
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/blockprune/BlockPruneQueryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/blockprune/BlockPruneQueryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/blockprune/BlockPruneQueryTestCase.scala
index 77d79e7..0c7b7ce 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/blockprune/BlockPruneQueryTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/blockprune/BlockPruneQueryTestCase.scala
@@ -54,14 +54,14 @@ class BlockPruneQueryTestCase extends QueryTest with BeforeAndAfterAll {
}
} catch {
case ex: Exception =>
- logError("Build test file for block prune failed" + ex)
+ LOGGER.error(ex, "Build test file for block prune failed")
} finally {
if (writer != null) {
try {
writer.close()
} catch {
case ex: Exception =>
- logError("Close output stream catching exception:" + ex)
+ LOGGER.error(ex, "Close output stream catching exception")
}
}
}
@@ -107,7 +107,7 @@ class BlockPruneQueryTestCase extends QueryTest with BeforeAndAfterAll {
}
} catch {
case ex: Exception =>
- logError("Delete temp test data file for block prune catching exception:" + ex)
+ LOGGER.error(ex, "Delete temp test data file for block prune catching exception")
}
sql("DROP TABLE IF EXISTS blockprune")
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/DefaultSourceTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/DefaultSourceTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/DefaultSourceTestCase.scala
index 756f117..20229bb 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/DefaultSourceTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/DefaultSourceTestCase.scala
@@ -23,11 +23,11 @@ import java.io.File
import java.io.FileWriter
import java.util.Random
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.spark.sql.common.util.CarbonHiveContext
import org.apache.spark.sql.common.util.CarbonHiveContext.sql
import org.apache.spark.sql.common.util.QueryTest
-
import org.scalatest.BeforeAndAfterAll
/**
@@ -69,7 +69,7 @@ class DefaultSourceTestCase extends QueryTest with BeforeAndAfterAll {
(c1 string, c2 string, c3 int, c4 int)
STORED BY 'org.apache.carbondata.format'""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
index 50ebf16..4224856 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestDataWithDicExcludeAndInclude.scala
@@ -63,7 +63,7 @@ class TestLoadDataWithDictionaryExcludeAndInclude extends QueryTest with BeforeA
'DICTIONARY_INCLUDE'='ID')
""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
@@ -80,7 +80,7 @@ class TestLoadDataWithDictionaryExcludeAndInclude extends QueryTest with BeforeA
LOAD DATA LOCAL INPATH './src/test/resources/emptyDimensionDataHive.csv' into table exclude_include_hive_t3
""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
index c215a46..ef2adbc 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala
@@ -89,7 +89,7 @@ class AllDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
"age INT) STORED BY 'org.apache.carbondata.format'"
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
sql(
@@ -102,7 +102,7 @@ class AllDictionaryTestCase extends QueryTest with BeforeAndAfterAll {
"TBLPROPERTIES('DICTIONARY_EXCLUDE'='ROMSize')"
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
index 3b06311..fca7504 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/AutoHighCardinalityIdentifyTestCase.scala
@@ -96,7 +96,7 @@ class AutoHighCardinalityIdentifyTestCase extends QueryTest with BeforeAndAfterA
(hc1 string, c2 string, c3 int)
STORED BY 'org.apache.carbondata.format'""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
@@ -107,7 +107,7 @@ class AutoHighCardinalityIdentifyTestCase extends QueryTest with BeforeAndAfterA
(hc1 string, c2 string, c3 int)
STORED BY 'org.apache.carbondata.format' tblproperties('COLUMN_GROUPS'='(hc1,c2)')""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
def relation(tableName: String): CarbonRelation = {
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
index 07d1a16..1531ade 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala
@@ -83,7 +83,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
@@ -93,7 +93,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
@@ -108,7 +108,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
TBLPROPERTIES('DICTIONARY_INCLUDE' = 'deviceInformationId')
""")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
@@ -204,7 +204,7 @@ class ExternalColumnDictionaryTestCase extends QueryTest with BeforeAndAfterAll
""")
} catch {
case ex: Exception =>
- logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
assert(false)
}
DictionaryTestCaseUtil.checkDictionary(
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
index 9cbdb7e..7d6e994 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilConcurrentTestCase.scala
@@ -88,7 +88,7 @@ class GlobalDictionaryUtilConcurrentTestCase extends QueryTest with BeforeAndAft
sql(
"CREATE TABLE IF NOT EXISTS employee (empid STRING) STORED BY 'org.apache.carbondata.format'")
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
index 74633e2..b78ffdb 100644
--- a/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
+++ b/integration/spark/src/test/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtilTestCase.scala
@@ -97,7 +97,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
"age INT) STORED BY 'org.apache.carbondata.format'"
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
sql(
@@ -106,7 +106,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
"TBLPROPERTIES('DICTIONARY_EXCLUDE'='id,name')"
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
sql(
@@ -120,7 +120,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
try {
@@ -134,7 +134,7 @@ class GlobalDictionaryUtilTestCase extends QueryTest with BeforeAndAfterAll {
"TBLPROPERTIES('DICTIONARY_INCLUDE'='deviceInformationId')"
)
} catch {
- case ex: Throwable => logError(ex.getMessage + "\r\n" + ex.getStackTraceString)
+ case ex: Throwable => LOGGER.error(ex.getMessage + "\r\n" + ex.getStackTraceString)
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala b/integration/spark/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala
index 8109d7a..4647e78 100644
--- a/integration/spark/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/sql/common/util/CarbonFunSuite.scala
@@ -19,11 +19,13 @@
package org.apache.spark.sql.common.util
-import org.apache.spark.Logging
+import org.apache.carbondata.common.logging.LogServiceFactory
import org.scalatest.{FunSuite, Outcome}
-private[spark] abstract class CarbonFunSuite extends FunSuite with Logging {
+private[spark] abstract class CarbonFunSuite extends FunSuite {
+
+ private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/**
* Log the suite name and the test name before and after each test.
@@ -37,10 +39,10 @@ private[spark] abstract class CarbonFunSuite extends FunSuite with Logging {
val suiteName = this.getClass.getName
val shortSuiteName = suiteName.replaceAll("org.apache.spark", "o.a.s")
try {
- logInfo(s"\n\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\n")
+ LOGGER.info(s"\n\n===== TEST OUTPUT FOR $shortSuiteName: '$testName' =====\n")
test()
} finally {
- logInfo(s"\n\n===== FINISHED $shortSuiteName: '$testName' =====\n")
+ LOGGER.info(s"\n\n===== FINISHED $shortSuiteName: '$testName' =====\n")
}
}
http://git-wip-us.apache.org/repos/asf/incubator-carbondata/blob/a28e605c/integration/spark/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala b/integration/spark/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
index f9af61d..44d3bfa 100644
--- a/integration/spark/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
+++ b/integration/spark/src/test/scala/org/apache/spark/sql/common/util/QueryTest.scala
@@ -19,14 +19,17 @@ package org.apache.spark.sql.common.util
import java.util.{Locale, TimeZone}
-import scala.collection.JavaConversions._
+import org.apache.carbondata.common.logging.LogServiceFactory
+import scala.collection.JavaConversions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
class QueryTest extends PlanTest {
+ val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
// Timezone is fixed to America/Los_Angeles for those timezone sensitive tests (timestamp_*)
TimeZone.setDefault(TimeZone.getTimeZone("America/Los_Angeles"))
// Add Locale setting
[2/2] incubator-carbondata git commit: [CARBONDATA-447] Use Carbon
log service instead of spark Logging This closes #351
Posted by ch...@apache.org.
[CARBONDATA-447] Use Carbon log service instead of spark Logging This closes #351
Project: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/commit/75e02caf
Tree: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/tree/75e02caf
Diff: http://git-wip-us.apache.org/repos/asf/incubator-carbondata/diff/75e02caf
Branch: refs/heads/master
Commit: 75e02caf47711d5526125913996712ed0f99b8f1
Parents: 068288d a28e605
Author: chenliang613 <ch...@apache.org>
Authored: Fri Nov 25 09:31:17 2016 +0800
Committer: chenliang613 <ch...@apache.org>
Committed: Fri Nov 25 09:31:17 2016 +0800
----------------------------------------------------------------------
.../examples/util/AllDictionaryUtil.scala | 13 +-
.../spark/CarbonDataFrameWriter.scala | 8 +-
.../spark/rdd/CarbonCleanFilesRDD.scala | 4 +-
.../spark/rdd/CarbonDataLoadRDD.scala | 22 ++--
.../spark/rdd/CarbonDataRDDFactory.scala | 124 +++++++++----------
.../spark/rdd/CarbonDeleteLoadByDateRDD.scala | 4 +-
.../spark/rdd/CarbonDeleteLoadRDD.scala | 4 +-
.../spark/rdd/CarbonDropTableRDD.scala | 4 +-
.../spark/rdd/CarbonGlobalDictionaryRDD.scala | 8 +-
.../carbondata/spark/rdd/CarbonMergerRDD.scala | 2 +-
.../carbondata/spark/rdd/CarbonScanRDD.scala | 4 +-
.../spark/rdd/NewCarbonDataLoadRDD.scala | 4 +-
.../carbondata/spark/util/CarbonScalaUtil.scala | 7 +-
.../spark/util/GlobalDictionaryUtil.scala | 57 +++++----
.../org/apache/spark/sql/CarbonContext.scala | 4 +-
.../sql/CarbonDatasourceHadoopRelation.scala | 4 +-
.../spark/sql/CarbonDatasourceRelation.scala | 2 +-
.../org/apache/spark/sql/CarbonSqlParser.scala | 4 +-
.../spark/sql/hive/CarbonMetastoreCatalog.scala | 9 +-
.../spark/sql/hive/cli/CarbonSQLCLIDriver.scala | 14 +--
.../scala/org/apache/spark/util/FileUtils.scala | 11 +-
.../blockprune/BlockPruneQueryTestCase.scala | 6 +-
.../dataload/DefaultSourceTestCase.scala | 4 +-
.../TestDataWithDicExcludeAndInclude.scala | 4 +-
.../spark/util/AllDictionaryTestCase.scala | 4 +-
.../AutoHighCardinalityIdentifyTestCase.scala | 4 +-
.../util/ExternalColumnDictionaryTestCase.scala | 8 +-
...GlobalDictionaryUtilConcurrentTestCase.scala | 2 +-
.../util/GlobalDictionaryUtilTestCase.scala | 8 +-
.../spark/sql/common/util/CarbonFunSuite.scala | 10 +-
.../spark/sql/common/util/QueryTest.scala | 5 +-
31 files changed, 186 insertions(+), 182 deletions(-)
----------------------------------------------------------------------