You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ak...@apache.org on 2020/04/24 05:21:09 UTC
[carbondata] branch master updated: [CARBONDATA-3782] changes to
reflect default behaviour in case of invalid configuration
This is an automated email from the ASF dual-hosted git repository.
akashrn5 pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git
The following commit(s) were added to refs/heads/master by this push:
new 4540313 [CARBONDATA-3782] changes to reflect default behaviour in case of invalid configuration
4540313 is described below
commit 4540313e4cfed73331d73e736ea80e3d263aa1ac
Author: akkio-97 <ak...@gmail.com>
AuthorDate: Tue Apr 21 13:33:46 2020 +0530
[CARBONDATA-3782] changes to reflect default behaviour in case of invalid configuration
Why is this PR needed?
Invalid configuration values in carbon.properties causes operation failure instead of reflecting
default configuration value behaviour.
What changes were proposed in this PR?
when invalid configurations are provided, instead of failing consider the default behavior.
This closes #3722
---
.../carbondata/core/util/CarbonProperties.java | 29 ++++++++++++++--------
.../spark/rdd/NewCarbonDataLoadRDD.scala | 4 ++-
2 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index 6af7014..c813569 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -1850,17 +1850,24 @@ public final class CarbonProperties {
* This method validates the numOfThreadsForPruning
*/
public static int getNumOfThreadsForPruning() {
- int numOfThreadsForPruning = Integer.parseInt(CarbonProperties.getInstance()
- .getProperty(CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING,
- CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING_DEFAULT));
- if (numOfThreadsForPruning > Integer
- .parseInt(CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING_DEFAULT)
- || numOfThreadsForPruning < 1) {
- LOGGER.info("Invalid value for carbon.max.driver.threads.for.block.pruning, value :"
- + numOfThreadsForPruning + " .using the default threads : "
- + CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING_DEFAULT);
- numOfThreadsForPruning = Integer
- .parseInt(CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING_DEFAULT);
+ int numOfThreadsForPruning;
+ String maxDriverThreadsForBockPruning =
+ CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING;
+ int defaultNumberOfThreads =
+ Integer.parseInt(CarbonCommonConstants.CARBON_MAX_DRIVER_THREADS_FOR_BLOCK_PRUNING_DEFAULT);
+ String logMessage = " is not a valid input for " + maxDriverThreadsForBockPruning
+ + ". Using the default number of threads : " + defaultNumberOfThreads;
+ try {
+ numOfThreadsForPruning = Integer.parseInt(CarbonProperties.getInstance()
+ .getProperty(maxDriverThreadsForBockPruning, String.valueOf(defaultNumberOfThreads)));
+ if (numOfThreadsForPruning > defaultNumberOfThreads || numOfThreadsForPruning < 1) {
+ LOGGER.info(numOfThreadsForPruning + logMessage);
+ numOfThreadsForPruning = defaultNumberOfThreads;
+ }
+ } catch (NumberFormatException e) {
+ LOGGER.info(
+ CarbonProperties.getInstance().getProperty(maxDriverThreadsForBockPruning + logMessage));
+ numOfThreadsForPruning = defaultNumberOfThreads;
}
return numOfThreadsForPruning;
}
diff --git a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 7caf644..68f994c 100644
--- a/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -22,6 +22,7 @@ import java.text.SimpleDateFormat
import java.util.{Date, UUID}
import scala.collection.mutable
+import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapreduce.{TaskAttemptID, TaskType}
@@ -135,7 +136,8 @@ class NewCarbonDataLoadRDD[K, V](
val preFetch = CarbonProperties.getInstance().getProperty(CarbonCommonConstants
.USE_PREFETCH_WHILE_LOADING, CarbonCommonConstants.USE_PREFETCH_WHILE_LOADING_DEFAULT)
- carbonLoadModel.setPreFetch(preFetch.toBoolean)
+ carbonLoadModel.setPreFetch(Try(preFetch.toBoolean).getOrElse(CarbonCommonConstants
+ .USE_PREFETCH_WHILE_LOADING_DEFAULT.toBoolean))
val recordReaders = getInputIterators
val loader = new SparkPartitionLoader(model,
theSplit.index,