You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2020/03/20 14:34:17 UTC

[carbondata] branch master updated: [HOTFIX] Fix ClassName for load datamaps parallel job

This is an automated email from the ASF dual-hosted git repository.

jackylk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/carbondata.git


The following commit(s) were added to refs/heads/master by this push:
     new 4c05dc4  [HOTFIX] Fix ClassName for load datamaps parallel job
4c05dc4 is described below

commit 4c05dc44c38ed8c730a372792432c9f7f5e869a4
Author: Indhumathi27 <in...@gmail.com>
AuthorDate: Wed Mar 18 15:52:18 2020 +0530

    [HOTFIX] Fix ClassName for load datamaps parallel job
    
    Why is this PR needed?
    Load Datamap parallel was not launching job, because the class name was not correct
    
    What changes were proposed in this PR?
    Change className for load datamaps parallel job
    
    Does this PR introduce any user interface change?
    No
    
    Is any new testcase added?
    No
    
    This closes #3674
---
 .../main/java/org/apache/carbondata/core/datamap/DataMapUtil.java    | 5 +++--
 .../sql/secondaryindex/Jobs/SparkBlockletDataMapLoaderJob.scala      | 5 +----
 2 files changed, 4 insertions(+), 6 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapUtil.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapUtil.java
index 8e7449e..41da1c2 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapUtil.java
@@ -195,9 +195,10 @@ public class DataMapUtil {
     if (!CarbonProperties.getInstance()
         .isDistributedPruningEnabled(carbonTable.getDatabaseName(), carbonTable.getTableName())
         && BlockletDataMapUtil.loadDataMapsParallel(carbonTable)) {
-      String clsName = "org.apache.carbondata.spark.rdd.SparkBlockletDataMapLoaderJob";
+      String clsName = "org.apache.spark.sql.secondaryindex.Jobs.SparkBlockletDataMapLoaderJob";
       DataMapJob dataMapJob = (DataMapJob) createDataMapJob(clsName);
-      String className = "org.apache.carbondata.hadoop.DistributableBlockletDataMapLoader";
+      String className =
+          "org.apache.spark.sql.secondaryindex.Jobs.DistributableBlockletDataMapLoader";
       SegmentStatusManager.ValidAndInvalidSegmentsInfo validAndInvalidSegmentsInfo =
           getValidAndInvalidSegments(carbonTable, FileFactory.getConfiguration());
       List<Segment> invalidSegments = validAndInvalidSegmentsInfo.getInvalidSegments();
diff --git a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletDataMapLoaderJob.scala b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletDataMapLoaderJob.scala
index 48e7312..4d6c6b9 100644
--- a/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletDataMapLoaderJob.scala
+++ b/integration/spark/src/main/scala/org/apache/spark/sql/secondaryindex/Jobs/SparkBlockletDataMapLoaderJob.scala
@@ -80,15 +80,12 @@ class SparkBlockletDataMapLoaderJob extends AbstractDataMapJob {
   private def addSegmentProperties(carbonTable: CarbonTable,
       dataMapIndexWrappers: Array[(TableBlockIndexUniqueIdentifier,
         BlockletDataMapDetailsWithSchema)]): Unit = {
-    val dataMapWrapperList = scala.collection.mutable.ArrayBuffer
-      .empty[(TableBlockIndexUniqueIdentifier,
-      BlockletDataMapDetailsWithSchema)]
     // use the carbon table schema only as this flow is called when schema is not modified
     val tableColumnSchema = CarbonUtil
       .getColumnSchemaList(carbonTable.getVisibleDimensions,
         carbonTable.getVisibleMeasures)
     // add segmentProperties in the segmentPropertyCache
-    dataMapWrapperList.foreach { entry =>
+    dataMapIndexWrappers.foreach { entry =>
       val segmentId = entry._1.getSegmentId
       val wrapper = SegmentPropertiesAndSchemaHolder.getInstance()
         .addSegmentProperties(carbonTable, tableColumnSchema, segmentId)