You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ja...@apache.org on 2018/08/07 13:09:25 UTC

[01/50] [abbrv] carbondata git commit: [CARBONDATA-2782]delete dead code in class 'CarbonCleanFilesCommand' [Forced Update!]

Repository: carbondata
Updated Branches:
  refs/heads/external-format ccf64ce5a -> 12ab57992 (forced update)


[CARBONDATA-2782]delete dead code in class 'CarbonCleanFilesCommand'

The variables(dms、indexDms) in function processMetadata are nerver used.

This closes #2557


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/c79fc90d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/c79fc90d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/c79fc90d

Branch: refs/heads/external-format
Commit: c79fc90d5d9804d65aea8d363f2249b12696aef0
Parents: d62fe9e
Author: Sssan520 <li...@aliyun.com>
Authored: Wed Jul 25 19:36:00 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Fri Jul 27 10:47:24 2018 +0800

----------------------------------------------------------------------
 .../execution/command/management/CarbonCleanFilesCommand.scala   | 4 ----
 1 file changed, 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/c79fc90d/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonCleanFilesCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonCleanFilesCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonCleanFilesCommand.scala
index 9d0f149..e561a5a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonCleanFilesCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonCleanFilesCommand.scala
@@ -58,10 +58,6 @@ case class CarbonCleanFilesCommand(
   override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
     carbonTable = CarbonEnv.getCarbonTable(databaseNameOp, tableName.get)(sparkSession)
 
-    val dms = carbonTable.getTableInfo.getDataMapSchemaList.asScala.map(_.getDataMapName)
-    val indexDms = DataMapStoreManager.getInstance.getAllDataMap(carbonTable).asScala
-      .filter(_.getDataMapSchema.isIndexDataMap)
-
     if (carbonTable.hasAggregationDataMap) {
       cleanFileCommands = carbonTable.getTableInfo.getDataMapSchemaList.asScala.map {
         dataMapSchema =>


[20/50] [abbrv] carbondata git commit: [CARBONDATA-2625] While BlockletDataMap loading, avoid multiple times listing of files

Posted by ja...@apache.org.
[CARBONDATA-2625] While BlockletDataMap loading, avoid multiple times listing of files

CarbonReader is very slow for many files as blockletDataMap lists files
of folder while loading each segment. This optimization lists once across segment loads.

This closes #2441


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e580d64e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e580d64e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e580d64e

Branch: refs/heads/external-format
Commit: e580d64ef5353ed033343d854da7e02539cdbeb4
Parents: 6351c3a
Author: rahul <ra...@knoldus.in>
Authored: Wed Jul 4 19:31:51 2018 +0530
Committer: Venkata Ramana G <ra...@huawei.com>
Committed: Wed Aug 1 16:40:29 2018 +0530

----------------------------------------------------------------------
 .../carbondata/core/datamap/TableDataMap.java   |  5 +--
 .../core/datamap/dev/DataMapFactory.java        | 16 +++++++++
 .../indexstore/BlockletDataMapIndexStore.java   | 29 ++++++++++++++---
 .../indexstore/BlockletDataMapIndexWrapper.java |  9 +++++-
 .../blockletindex/BlockletDataMapFactory.java   | 34 ++++++++++++++++++++
 .../core/util/BlockletDataMapUtil.java          |  6 ++--
 .../TestBlockletDataMapFactory.java             |  2 +-
 .../partition/TestAlterPartitionTable.scala     |  5 +++
 8 files changed, 95 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
index f6da73e..aed8c60 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
@@ -19,6 +19,7 @@ package org.apache.carbondata.core.datamap;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 
 import org.apache.carbondata.common.annotations.InterfaceAudience;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
@@ -89,15 +90,15 @@ public final class TableDataMap extends OperationEventListener {
       List<PartitionSpec> partitions) throws IOException {
     List<ExtendedBlocklet> blocklets = new ArrayList<>();
     SegmentProperties segmentProperties;
+    Map<Segment, List<DataMap>> dataMaps = dataMapFactory.getDataMaps(segments);
     for (Segment segment : segments) {
       List<Blocklet> pruneBlocklets = new ArrayList<>();
       // if filter is not passed then return all the blocklets
       if (filterExp == null) {
         pruneBlocklets = blockletDetailsFetcher.getAllBlocklets(segment, partitions);
       } else {
-        List<DataMap> dataMaps = dataMapFactory.getDataMaps(segment);
         segmentProperties = segmentPropertiesFetcher.getSegmentProperties(segment);
-        for (DataMap dataMap : dataMaps) {
+        for (DataMap dataMap : dataMaps.get(segment)) {
           pruneBlocklets.addAll(dataMap.prune(filterExp, segmentProperties, partitions));
         }
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index ab0f8ea..67f82b2 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -17,8 +17,10 @@
 package org.apache.carbondata.core.datamap.dev;
 
 import java.io.IOException;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException;
@@ -26,6 +28,7 @@ import org.apache.carbondata.core.datamap.DataMapDistributable;
 import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.DataMapMeta;
 import org.apache.carbondata.core.datamap.Segment;
+import org.apache.carbondata.core.datamap.dev.cgdatamap.CoarseGrainDataMap;
 import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.features.TableOperation;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
@@ -67,6 +70,19 @@ public abstract class DataMapFactory<T extends DataMap> {
    */
   public abstract DataMapBuilder createBuilder(Segment segment, String shardName,
       SegmentProperties segmentProperties) throws IOException;
+
+  /**
+   * Get the datamap for all segments
+   */
+  public Map<Segment, List<CoarseGrainDataMap>> getDataMaps(List<Segment> segments)
+      throws IOException {
+    Map<Segment, List<CoarseGrainDataMap>> dataMaps = new HashMap<>();
+    for (Segment segment : segments) {
+      dataMaps.put(segment, (List<CoarseGrainDataMap>) this.getDataMaps(segment));
+    }
+    return dataMaps;
+  }
+
   /**
    * Get the datamap for segmentid
    */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
index 3a8aa52..fa84f30 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexStore.java
@@ -18,6 +18,7 @@ package org.apache.carbondata.core.indexstore;
 
 import java.io.IOException;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -73,6 +74,11 @@ public class BlockletDataMapIndexStore
   @Override
   public BlockletDataMapIndexWrapper get(TableBlockIndexUniqueIdentifierWrapper identifierWrapper)
       throws IOException {
+    return get(identifierWrapper, null);
+  }
+
+  private BlockletDataMapIndexWrapper get(TableBlockIndexUniqueIdentifierWrapper identifierWrapper,
+      Map<String, Map<String, BlockMetaInfo>> segInfoCache) throws IOException {
     TableBlockIndexUniqueIdentifier identifier =
         identifierWrapper.getTableBlockIndexUniqueIdentifier();
     String lruCacheKey = identifier.getUniqueTableSegmentIdentifier();
@@ -84,8 +90,16 @@ public class BlockletDataMapIndexStore
         SegmentIndexFileStore indexFileStore = new SegmentIndexFileStore();
         Set<String> filesRead = new HashSet<>();
         String segmentFilePath = identifier.getIndexFilePath();
-        Map<String, BlockMetaInfo> carbonDataFileBlockMetaInfoMapping = BlockletDataMapUtil
-            .createCarbonDataFileBlockMetaInfoMapping(segmentFilePath);
+        if (segInfoCache == null) {
+          segInfoCache = new HashMap<String, Map<String, BlockMetaInfo>>();
+        }
+        Map<String, BlockMetaInfo> carbonDataFileBlockMetaInfoMapping =
+            segInfoCache.get(segmentFilePath);
+        if (carbonDataFileBlockMetaInfoMapping == null) {
+          carbonDataFileBlockMetaInfoMapping =
+              BlockletDataMapUtil.createCarbonDataFileBlockMetaInfoMapping(segmentFilePath);
+          segInfoCache.put(segmentFilePath, carbonDataFileBlockMetaInfoMapping);
+        }
         // if the identifier is not a merge file we can directly load the datamaps
         if (identifier.getMergeIndexFileName() == null) {
           Map<String, BlockMetaInfo> blockMetaInfoMap = BlockletDataMapUtil
@@ -95,7 +109,8 @@ public class BlockletDataMapIndexStore
               loadAndGetDataMap(identifier, indexFileStore, blockMetaInfoMap,
                   identifierWrapper.getCarbonTable(), identifierWrapper.isAddTableBlockToUnsafe());
           dataMaps.add(blockletDataMap);
-          blockletDataMapIndexWrapper = new BlockletDataMapIndexWrapper(dataMaps);
+          blockletDataMapIndexWrapper =
+              new BlockletDataMapIndexWrapper(identifier.getSegmentId(), dataMaps);
         } else {
           // if the identifier is a merge file then collect the index files and load the datamaps
           List<TableBlockIndexUniqueIdentifier> tableBlockIndexUniqueIdentifiers =
@@ -114,7 +129,8 @@ public class BlockletDataMapIndexStore
               dataMaps.add(blockletDataMap);
             }
           }
-          blockletDataMapIndexWrapper = new BlockletDataMapIndexWrapper(dataMaps);
+          blockletDataMapIndexWrapper =
+              new BlockletDataMapIndexWrapper(identifier.getSegmentId(), dataMaps);
         }
         lruCache.put(identifier.getUniqueTableSegmentIdentifier(), blockletDataMapIndexWrapper,
             blockletDataMapIndexWrapper.getMemorySize());
@@ -133,6 +149,9 @@ public class BlockletDataMapIndexStore
   @Override public List<BlockletDataMapIndexWrapper> getAll(
       List<TableBlockIndexUniqueIdentifierWrapper> tableSegmentUniqueIdentifiers)
       throws IOException {
+    Map<String, Map<String, BlockMetaInfo>> segInfoCache
+        = new HashMap<String, Map<String, BlockMetaInfo>>();
+
     List<BlockletDataMapIndexWrapper> blockletDataMapIndexWrappers =
         new ArrayList<>(tableSegmentUniqueIdentifiers.size());
     List<TableBlockIndexUniqueIdentifierWrapper> missedIdentifiersWrapper = new ArrayList<>();
@@ -151,7 +170,7 @@ public class BlockletDataMapIndexStore
       }
       if (missedIdentifiersWrapper.size() > 0) {
         for (TableBlockIndexUniqueIdentifierWrapper identifierWrapper : missedIdentifiersWrapper) {
-          blockletDataMapIndexWrapper = get(identifierWrapper);
+          blockletDataMapIndexWrapper = get(identifierWrapper, segInfoCache);
           blockletDataMapIndexWrappers.add(blockletDataMapIndexWrapper);
         }
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexWrapper.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexWrapper.java
index 52f2432..b0fb13e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDataMapIndexWrapper.java
@@ -30,12 +30,15 @@ public class BlockletDataMapIndexWrapper implements Cacheable, Serializable {
 
   private List<BlockDataMap> dataMaps;
 
+  private String segmentId;
+
   // size of the wrapper. basically the total size of the datamaps this wrapper is holding
   private long wrapperSize;
 
-  public BlockletDataMapIndexWrapper(List<BlockDataMap> dataMaps) {
+  public BlockletDataMapIndexWrapper(String segmentId,List<BlockDataMap> dataMaps) {
     this.dataMaps = dataMaps;
     this.wrapperSize = 0L;
+    this.segmentId = segmentId;
     // add the size of each and every datamap in this wrapper
     for (BlockDataMap dataMap : dataMaps) {
       this.wrapperSize += dataMap.getMemorySize();
@@ -57,4 +60,8 @@ public class BlockletDataMapIndexWrapper implements Cacheable, Serializable {
   public List<BlockDataMap> getDataMaps() {
     return dataMaps;
   }
+
+  public String getSegmentId() {
+    return segmentId;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index 4dd78ee..61d93f7 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -120,6 +120,40 @@ public class BlockletDataMapFactory extends CoarseGrainDataMapFactory
     throw new UnsupportedOperationException("not implemented");
   }
 
+  /**
+   * Get the datamap for all segments
+   */
+  public Map<Segment, List<CoarseGrainDataMap>> getDataMaps(List<Segment> segments)
+      throws IOException {
+    List<TableBlockIndexUniqueIdentifierWrapper> tableBlockIndexUniqueIdentifierWrappers =
+        new ArrayList<>();
+    Map<Segment, List<CoarseGrainDataMap>> dataMaps = new HashMap<>();
+    Map<String, Segment> segmentMap = new HashMap<>();
+    for (Segment segment : segments) {
+      segmentMap.put(segment.getSegmentNo(), segment);
+      Set<TableBlockIndexUniqueIdentifier> identifiers =
+          getTableBlockIndexUniqueIdentifiers(segment);
+
+      for (TableBlockIndexUniqueIdentifier tableBlockIndexUniqueIdentifier : identifiers) {
+        tableBlockIndexUniqueIdentifierWrappers.add(
+            new TableBlockIndexUniqueIdentifierWrapper(tableBlockIndexUniqueIdentifier,
+                this.getCarbonTable()));
+      }
+    }
+    List<BlockletDataMapIndexWrapper> blockletDataMapIndexWrappers =
+        cache.getAll(tableBlockIndexUniqueIdentifierWrappers);
+    for (BlockletDataMapIndexWrapper wrapper : blockletDataMapIndexWrappers) {
+      Segment segment = segmentMap.get(wrapper.getSegmentId());
+      List<CoarseGrainDataMap> datamapList = dataMaps.get(segment);
+      if (null == datamapList) {
+        datamapList = new ArrayList<CoarseGrainDataMap>();
+      }
+      datamapList.addAll(wrapper.getDataMaps());
+      dataMaps.put(segment, datamapList);
+    }
+    return dataMaps;
+  }
+
   @Override public List<CoarseGrainDataMap> getDataMaps(Segment segment) throws IOException {
     List<CoarseGrainDataMap> dataMaps = new ArrayList<>();
     Set<TableBlockIndexUniqueIdentifier> identifiers =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
index db41e73..68ce1fb 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
@@ -115,7 +115,7 @@ public class BlockletDataMapUtil {
         CarbonTable.updateTableByTableInfo(carbonTable, carbonTable.getTableInfo());
       }
       String blockPath = footer.getBlockInfo().getTableBlockInfo().getFilePath();
-      if (null == blockMetaInfoMap.get(blockPath)) {
+      if (null != fileNameToMetaInfoMapping && null == blockMetaInfoMap.get(blockPath)) {
         BlockMetaInfo blockMetaInfo = createBlockMetaInfo(fileNameToMetaInfoMapping, blockPath);
         // if blockMetaInfo is null that means the file has been deleted from the file system.
         // This can happen in case IUD scenarios where after deleting or updating the data the
@@ -123,6 +123,8 @@ public class BlockletDataMapUtil {
         if (null != blockMetaInfo) {
           blockMetaInfoMap.put(blockPath, blockMetaInfo);
         }
+      } else {
+        blockMetaInfoMap.put(blockPath, new BlockMetaInfo(new String[] {},0));
       }
     }
     return blockMetaInfoMap;
@@ -151,7 +153,7 @@ public class BlockletDataMapUtil {
         String[] location = file.getLocations();
         long len = file.getSize();
         BlockMetaInfo blockMetaInfo = new BlockMetaInfo(location, len);
-        fileNameToMetaInfoMapping.put(file.getPath().toString(), blockMetaInfo);
+        fileNameToMetaInfoMapping.put(file.getPath(), blockMetaInfo);
       }
     }
     return fileNameToMetaInfoMapping;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletDataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletDataMapFactory.java b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletDataMapFactory.java
index d2a6f18..a3acfab 100644
--- a/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletDataMapFactory.java
+++ b/core/src/test/java/org/apache/carbondata/core/indexstore/blockletindex/TestBlockletDataMapFactory.java
@@ -103,7 +103,7 @@ public class TestBlockletDataMapFactory {
             BlockletDataMapIndexWrapper.class);
     method.setAccessible(true);
     method.invoke(blockletDataMapFactory, tableBlockIndexUniqueIdentifierWrapper,
-        new BlockletDataMapIndexWrapper(dataMaps));
+        new BlockletDataMapIndexWrapper(tableBlockIndexUniqueIdentifier.getSegmentId(), dataMaps));
     BlockletDataMapIndexWrapper result = cache.getIfPresent(tableBlockIndexUniqueIdentifierWrapper);
     assert null != result;
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e580d64e/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
index 882630a..af17252 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala
@@ -43,6 +43,9 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
       .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+      // deactivating the merge-index for old partition implimentation because we are not supporting
+      // merge-index for the same currently.
+        .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT,"false")
     /**
      * list_table_area_origin
      * list_table_area
@@ -891,6 +894,8 @@ class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
     .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
+      .addProperty(CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT,
+        CarbonCommonConstants.CARBON_MERGE_INDEX_IN_SEGMENT_DEFAULT)
   }
 
   def dropTable {


[17/50] [abbrv] carbondata git commit: [CARBONDATA-2798] Fix Dictionary_Include for ComplexDataType

Posted by ja...@apache.org.
[CARBONDATA-2798] Fix Dictionary_Include for ComplexDataType

Problem1:
Select Filter is throwing BufferUnderFlow Exception as cardinality is filled for Non-Dictionary columns.
Solution:
Check if a complex column has Encoding => Dictionary and fill cardinality for that column only.

Problem2:
Transactional Table is throwing NullPointerException if csv fileheader is not proper.
Solution:
Throw CarbonDataLoadingException if csv fileheader is not proper.

This closes #2578


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/cfbf7b6e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/cfbf7b6e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/cfbf7b6e

Branch: refs/heads/external-format
Commit: cfbf7b6ec38525b478d6d689aae36a4f3d7b22c4
Parents: 1cea4d3
Author: Indhumathi27 <in...@gmail.com>
Authored: Mon Jul 30 14:18:44 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Wed Aug 1 11:14:13 2018 +0530

----------------------------------------------------------------------
 .../src/test/resources/nontransactional1.csv    |  2 ++
 .../complexType/TestComplexDataType.scala       |  7 +++++
 .../TestNonTransactionalCarbonTable.scala       | 30 +++++++++++++++++++
 .../processing/datatypes/ArrayDataType.java     | 24 +++++++++++++--
 .../processing/datatypes/StructDataType.java    | 31 +++++++++++++++-----
 .../converter/impl/FieldEncoderFactory.java     |  6 ++--
 .../processing/loading/model/LoadOption.java    |  4 ++-
 7 files changed, 91 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/integration/spark-common-test/src/test/resources/nontransactional1.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/nontransactional1.csv b/integration/spark-common-test/src/test/resources/nontransactional1.csv
new file mode 100644
index 0000000..ac9ec54
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/nontransactional1.csv
@@ -0,0 +1,2 @@
+arvind, 33, 6.2
+bill, 35, 7.3
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 1451f7b..1ad7889 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -971,6 +971,13 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "('dictionary_include'='b')")
     sql("insert into test values(1,2) ")
     checkAnswer(sql("select b[0] from test"),Seq(Row(2)))
+    sql("DROP TABLE IF EXISTS test")
+    sql(
+      "create table test(intval array<array<int>>,str array<array<string>>, bool " +
+      "array<array<boolean>>, sint array<array<short>>, big array<array<bigint>>)  stored by " +
+      "'carbondata' tblproperties('dictionary_include'='bool,sint,big')")
+    sql("insert into test values(1,'ab',true,22,33)")
+    checkExistence(sql("select * from test"), true, "33")
   }
 
   test("date with struct and array") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 8a1d465..b92d41d 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -52,6 +52,7 @@ import org.apache.carbondata.core.datastore.page.encoding.DefaultEncodingFactory
 import org.apache.carbondata.core.metadata.ColumnarFormatVersion
 import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataFileFooterConverterV3}
+import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException
 import org.apache.carbondata.sdk.file._
 
 
@@ -350,6 +351,35 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     cleanTestData()
   }
 
+  test(" test csv fileheader for transactional table") {
+    FileUtils.deleteDirectory(new File(writerPath))
+    buildTestDataWithSameUUID(3, false, null, List("name"))
+    assert(new File(writerPath).exists())
+
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+
+    checkAnswer(sql("SELECT name,name FROM sdkOutputTable"), Seq(
+      Row("robot0", "robot0"),
+      Row("robot1", "robot1"),
+      Row("robot2", "robot2")))
+    //load csvfile without fileheader
+    var exception = intercept[CarbonDataLoadingException] {
+      sql(s"""load data inpath '$resourcesPath/nontransactional1.csv' into table sdkOutputTable""").show(200,false)
+    }
+    assert(exception.getMessage()
+      .contains("CSV header in input file is not proper. Column names in schema and csv header are not the same."))
+
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).exists())
+    cleanTestData()
+  }
+
+
   test("test count star with multiple loads files with same schema and UUID") {
     FileUtils.deleteDirectory(new File(writerPath))
     buildTestDataWithSameUUID(3, false, null, List("name"))

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
index 60972e8..0a1eba8 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
@@ -63,6 +63,11 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
   private int outputArrayIndex;
 
   /**
+   * Dictionary column
+   */
+  private boolean isDictionaryColumn;
+
+  /**
    * current data counter
    */
   private int dataCounter;
@@ -88,6 +93,21 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
     this.columnId = columnId;
   }
 
+  /**
+   * constructor
+   * @param name
+   * @param parentname
+   * @param columnId
+   * @param isDictionaryColumn
+   */
+  public ArrayDataType(String name, String parentname, String columnId,
+      Boolean isDictionaryColumn) {
+    this.name = name;
+    this.parentname = parentname;
+    this.columnId = columnId;
+    this.isDictionaryColumn = isDictionaryColumn;
+  }
+
   /*
    * to add child dimensions
    */
@@ -153,7 +173,7 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
   }
 
   @Override public boolean getIsColumnDictionary() {
-    return true;
+    return isDictionaryColumn;
   }
 
   @Override public void writeByteArray(ArrayObject input, DataOutputStream dataOutputStream,
@@ -172,7 +192,7 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
 
   @Override
   public void fillCardinality(List<Integer> dimCardWithComplex) {
-    if (children.getIsColumnDictionary()) {
+    if (this.getIsColumnDictionary()) {
       dimCardWithComplex.add(0);
       children.fillCardinality(dimCardWithComplex);
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
index af95de6..31f2234 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
@@ -57,6 +57,12 @@ public class StructDataType implements GenericDataType<StructObject> {
    * output array index
    */
   private int outputArrayIndex;
+
+  /**
+   * Dictionary column
+   */
+  private boolean isDictionaryColumn;
+
   /**
    * data counter
    */
@@ -82,6 +88,21 @@ public class StructDataType implements GenericDataType<StructObject> {
     this.columnId = columnId;
   }
 
+  /**
+   * constructor
+   * @param name
+   * @param parentname
+   * @param columnId
+   * @param isDictionaryColumn
+   */
+  public StructDataType(String name, String parentname, String columnId,
+      Boolean isDictionaryColumn) {
+    this.name = name;
+    this.parentname = parentname;
+    this.columnId = columnId;
+    this.isDictionaryColumn = isDictionaryColumn;
+  }
+
   /*
    * add child dimensions
    */
@@ -153,7 +174,7 @@ public class StructDataType implements GenericDataType<StructObject> {
   }
 
   @Override public boolean getIsColumnDictionary() {
-    return true;
+    return isDictionaryColumn;
   }
 
   @Override public void writeByteArray(StructObject input, DataOutputStream dataOutputStream,
@@ -178,13 +199,7 @@ public class StructDataType implements GenericDataType<StructObject> {
 
   @Override
   public void fillCardinality(List<Integer> dimCardWithComplex) {
-    boolean isDictionaryColumn = false;
-    for (GenericDataType child : children) {
-      if (child.getIsColumnDictionary()) {
-        isDictionaryColumn = true;
-      }
-    }
-    if (isDictionaryColumn) {
+    if (this.getIsColumnDictionary()) {
       dimCardWithComplex.add(0);
       for (int i = 0; i < children.size(); i++) {
         children.get(i).fillCardinality(dimCardWithComplex);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
index 39c12a9..e9d2b02 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
@@ -144,7 +144,8 @@ public class FieldEncoderFactory {
           ((CarbonDimension) carbonColumn).getListOfChildDimensions();
       // Create array parser with complex delimiter
       ArrayDataType arrayDataType =
-          new ArrayDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId());
+          new ArrayDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId(),
+              carbonColumn.hasEncoding(Encoding.DICTIONARY));
       for (CarbonDimension dimension : listOfChildDimensions) {
         arrayDataType.addChildren(
             createComplexType(dimension, carbonColumn.getColName(), absoluteTableIdentifier,
@@ -156,7 +157,8 @@ public class FieldEncoderFactory {
           ((CarbonDimension) carbonColumn).getListOfChildDimensions();
       // Create struct parser with complex delimiter
       StructDataType structDataType =
-          new StructDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId());
+          new StructDataType(carbonColumn.getColName(), parentName, carbonColumn.getColumnId(),
+              carbonColumn.hasEncoding(Encoding.DICTIONARY));
       for (CarbonDimension dimension : dimensions) {
         structDataType.addChildren(
             createComplexType(dimension, carbonColumn.getColName(), absoluteTableIdentifier,

http://git-wip-us.apache.org/repos/asf/carbondata/blob/cfbf7b6e/processing/src/main/java/org/apache/carbondata/processing/loading/model/LoadOption.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/LoadOption.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/LoadOption.java
index 9733816..98cd90d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/LoadOption.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/LoadOption.java
@@ -236,7 +236,9 @@ public class LoadOption {
       }
     }
 
-    if (carbonLoadModel.isCarbonTransactionalTable() && !CarbonDataProcessorUtil
+    // In SDK flow, hadoopConf will always be null,
+    // hence FileHeader check is not required for nontransactional table
+    if (hadoopConf != null && !CarbonDataProcessorUtil
         .isHeaderValid(carbonLoadModel.getTableName(), csvColumns,
             carbonLoadModel.getCarbonDataLoadSchema(), staticPartitionCols)) {
       if (csvFile == null) {


[38/50] [abbrv] carbondata git commit: [CARBONDATA-2804] fix the bug when bloom filter or preaggregate datamap tried to be created on older V1-V2 version stores

Posted by ja...@apache.org.
[CARBONDATA-2804] fix the bug when bloom filter or preaggregate datamap tried to be created on older V1-V2 version stores

This PR change read carbon file version from carbondata file header to
carbonindex file header, because the version filed of carondata file
header is not compatible with older v1/v2 version.

This closes #2601


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9336924d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9336924d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9336924d

Branch: refs/heads/external-format
Commit: 9336924de3f3103507335b5ffbf9c0145be4812f
Parents: 26d9f3d
Author: ndwangsen <lu...@huawei.com>
Authored: Thu Aug 2 16:21:22 2018 +0800
Committer: xuchuanyin <xu...@hust.edu.cn>
Committed: Fri Aug 3 15:25:46 2018 +0800

----------------------------------------------------------------------
 .../apache/carbondata/core/util/CarbonUtil.java | 85 ++++++++++++++------
 1 file changed, 59 insertions(+), 26 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9336924d/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index 3eb6aae..205d160 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -43,6 +43,7 @@ import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.exception.InvalidConfigurationException;
 import org.apache.carbondata.core.indexstore.BlockletDetailInfo;
+import org.apache.carbondata.core.indexstore.blockletindex.SegmentIndexFileStore;
 import org.apache.carbondata.core.keygenerator.mdkey.NumberCompressor;
 import org.apache.carbondata.core.localdictionary.generator.ColumnLocalDictionaryGenerator;
 import org.apache.carbondata.core.localdictionary.generator.LocalDictionaryGenerator;
@@ -88,7 +89,7 @@ import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.format.BlockletHeader;
 import org.apache.carbondata.format.DataChunk2;
 import org.apache.carbondata.format.DataChunk3;
-import org.apache.carbondata.format.FileHeader;
+import org.apache.carbondata.format.IndexHeader;
 
 import com.google.gson.Gson;
 import com.google.gson.GsonBuilder;
@@ -3194,11 +3195,33 @@ public final class CarbonUtil {
    */
   public static ColumnarFormatVersion getFormatVersion(CarbonTable carbonTable)
       throws IOException {
-    String storePath = null;
-    // if the carbontable is support flat folder
+    String segmentPath = null;
     boolean supportFlatFolder = carbonTable.isSupportFlatFolder();
+    CarbonIndexFileReader indexReader = new CarbonIndexFileReader();
+    ColumnarFormatVersion version = null;
+    SegmentIndexFileStore fileStore = new SegmentIndexFileStore();
+    CarbonProperties carbonProperties = CarbonProperties.getInstance();
+    // if the carbontable is support flat folder
     if (supportFlatFolder) {
-      storePath = carbonTable.getTablePath();
+      segmentPath = carbonTable.getTablePath();
+      FileFactory.FileType fileType = FileFactory.getFileType(segmentPath);
+      if (FileFactory.isFileExist(segmentPath, fileType)) {
+        fileStore.readAllIIndexOfSegment(segmentPath);
+        Map<String, byte[]> carbonIndexMap = fileStore.getCarbonIndexMap();
+        if (carbonIndexMap.size() == 0) {
+          version = carbonProperties.getFormatVersion();
+        }
+        for (byte[] fileData : carbonIndexMap.values()) {
+          try {
+            indexReader.openThriftReader(fileData);
+            IndexHeader indexHeader = indexReader.readIndexHeader();
+            version = ColumnarFormatVersion.valueOf((short)indexHeader.getVersion());
+            break;
+          } finally {
+            indexReader.closeThriftReader();
+          }
+        }
+      }
     } else {
       // get the valid segments
       SegmentStatusManager segmentStatusManager =
@@ -3206,34 +3229,44 @@ public final class CarbonUtil {
       SegmentStatusManager.ValidAndInvalidSegmentsInfo validAndInvalidSegmentsInfo =
           segmentStatusManager.getValidAndInvalidSegments();
       List<Segment> validSegments = validAndInvalidSegmentsInfo.getValidSegments();
-      CarbonProperties carbonProperties = CarbonProperties.getInstance();
       if (validSegments.isEmpty()) {
         return carbonProperties.getFormatVersion();
       }
-      storePath = carbonTable.getSegmentPath(validSegments.get(0).getSegmentNo());
-    }
-
-    CarbonFile[] carbonFiles = FileFactory
-        .getCarbonFile(storePath)
-        .listFiles(new CarbonFileFilter() {
-          @Override
-          public boolean accept(CarbonFile file) {
-            if (file == null) {
-              return false;
+      // get the carbon index file header from a valid segment
+      for (Segment segment : validSegments) {
+        segmentPath = carbonTable.getSegmentPath(segment.getSegmentNo());
+        FileFactory.FileType fileType = FileFactory.getFileType(segmentPath);
+        if (FileFactory.isFileExist(segmentPath, fileType)) {
+          fileStore.readAllIIndexOfSegment(segmentPath);
+          Map<String, byte[]> carbonIndexMap = fileStore.getCarbonIndexMap();
+          if (carbonIndexMap.size() == 0) {
+            LOGGER.warn("the valid segment path: " + segmentPath +
+                " does not exist in the system of table: " + carbonTable.getTableUniqueName());
+            continue;
+          }
+          for (byte[] fileData : carbonIndexMap.values()) {
+            try {
+              indexReader.openThriftReader(fileData);
+              IndexHeader indexHeader = indexReader.readIndexHeader();
+              version = ColumnarFormatVersion.valueOf((short)indexHeader.getVersion());
+              break;
+            } finally {
+              indexReader.closeThriftReader();
             }
-            return file.getName().endsWith("carbondata");
           }
-        });
-    if (carbonFiles == null || carbonFiles.length < 1) {
-      return CarbonProperties.getInstance().getFormatVersion();
+          // if get the carbon file version from a valid segment, then end
+          if (version != null) {
+            break;
+          }
+        }
+      }
+      // if all valid segments path does not in the system,
+      // then the carbon file verion as default
+      if (version == null) {
+        version = CarbonProperties.getInstance().getFormatVersion();
+      }
     }
-
-    CarbonFile carbonFile = carbonFiles[0];
-    // get the carbon file header
-    CarbonHeaderReader headerReader = new CarbonHeaderReader(carbonFile.getCanonicalPath());
-    FileHeader fileHeader = headerReader.readHeader();
-    int version = fileHeader.getVersion();
-    return ColumnarFormatVersion.valueOf((short)version);
+    return version;
   }
 
   /**


[36/50] [abbrv] carbondata git commit: [CARBONDATA-2813] Fixed code to get data size from LoadDetails if size is written there

Posted by ja...@apache.org.
[CARBONDATA-2813] Fixed code to get data size from LoadDetails if size is written there

Problem:
In 1.3.x when index files are merged to form mergeindex file a mapping of which index files if merged to which mergeindex is kept in the segments
file. In 1.4.x both the index and merge index files are scanned to calculate the size of segments for major compaction. As the index file was
deleted in the 1.3.x store therefore in 1.4.x it was throwing "Unable to get File status exception".

Solution:
Try to the size of the segments from LoadMetadataDetails. If not present then try to read the size from index files.

This closes #2600


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/38384cb9
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/38384cb9
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/38384cb9

Branch: refs/heads/external-format
Commit: 38384cb9f309cc7eb83e61e85c48dd8583921004
Parents: f2e898a
Author: kunal642 <ku...@gmail.com>
Authored: Thu Aug 2 11:44:20 2018 +0530
Committer: manishgupta88 <to...@gmail.com>
Committed: Thu Aug 2 18:14:56 2018 +0530

----------------------------------------------------------------------
 .../processing/merger/CarbonDataMergerUtil.java         | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/38384cb9/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
index 1162fc2..e3da86d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
@@ -49,6 +49,8 @@ import org.apache.carbondata.core.writer.CarbonDeleteDeltaWriterImpl;
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel;
 import org.apache.carbondata.processing.util.CarbonLoaderUtil;
 
+import org.apache.commons.lang.StringUtils;
+
 /**
  * utility class for load merging.
  */
@@ -649,8 +651,14 @@ public final class CarbonDataMergerUtil {
       // variable to store one  segment size across partition.
       long sizeOfOneSegmentAcrossPartition;
       if (segment.getSegmentFile() != null) {
-        sizeOfOneSegmentAcrossPartition = CarbonUtil.getSizeOfSegment(
-            carbonTable.getTablePath(), new Segment(segId, segment.getSegmentFile()));
+        // If LoadMetaDataDetail already has data size no need to calculate the data size from
+        // index files. If not there then read the index file and calculate size.
+        if (!StringUtils.isEmpty(segment.getDataSize())) {
+          sizeOfOneSegmentAcrossPartition = Long.parseLong(segment.getDataSize());
+        } else {
+          sizeOfOneSegmentAcrossPartition = CarbonUtil.getSizeOfSegment(carbonTable.getTablePath(),
+              new Segment(segId, segment.getSegmentFile()));
+        }
       } else {
         sizeOfOneSegmentAcrossPartition = getSizeOfSegment(carbonTable.getTablePath(), segId);
       }


[21/50] [abbrv] carbondata git commit: [CARBONDATA-2781] Added fix for Null Pointer Excpetion when create datamap killed from UI

Posted by ja...@apache.org.
[CARBONDATA-2781] Added fix for Null Pointer Excpetion when create datamap killed from UI

What was the issue?
In undo meta, datamap was not being dropped.
In case of Pre-aggregate table or timeseries table, the datamap was not being dropped from schema as undo meta method was not handling the same.

What is the solution?
Datamap gets dropped when create command is killed from UI during undo meta.

This closes #2552


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3816e90e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3816e90e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3816e90e

Branch: refs/heads/external-format
Commit: 3816e90e758dde86c778b88af3d47d5b63f0f4db
Parents: e580d64
Author: praveenmeenakshi56 <pr...@gmail.com>
Authored: Wed Jul 25 21:52:54 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Wed Aug 1 19:01:36 2018 +0530

----------------------------------------------------------------------
 .../carbondata/datamap/PreAggregateDataMapProvider.java     | 9 ++++++---
 .../command/datamap/CarbonCreateDataMapCommand.scala        | 6 +++++-
 2 files changed, 11 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3816e90e/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
index a1f80b1..099d65d 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
@@ -39,11 +39,15 @@ public class PreAggregateDataMapProvider extends DataMapProvider {
   protected PreAggregateTableHelper helper;
   protected CarbonDropTableCommand dropTableCommand;
   protected SparkSession sparkSession;
+  private String dbName;
+  private String tableName;
 
   PreAggregateDataMapProvider(CarbonTable table, DataMapSchema schema,
       SparkSession sparkSession) {
     super(table, schema);
     this.sparkSession = sparkSession;
+    this.dbName = table.getDatabaseName();
+    this.tableName = table.getTableName() + '_' + schema.getDataMapName();
   }
 
   @Override
@@ -74,11 +78,10 @@ public class PreAggregateDataMapProvider extends DataMapProvider {
 
   @Override
   public void cleanMeta() {
-    DataMapSchema dataMapSchema = getDataMapSchema();
     dropTableCommand = new CarbonDropTableCommand(
         true,
-        new Some<>(dataMapSchema.getRelationIdentifier().getDatabaseName()),
-        dataMapSchema.getRelationIdentifier().getTableName(),
+        new Some<>(dbName),
+        tableName,
         true);
     dropTableCommand.processMetadata(sparkSession);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3816e90e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index c40dcb0..1e4c2c3 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -181,7 +181,11 @@ case class CarbonCreateDataMapCommand(
 
   override def undoMetadata(sparkSession: SparkSession, exception: Exception): Seq[Row] = {
     if (dataMapProvider != null) {
-      dataMapProvider.cleanMeta()
+        CarbonDropDataMapCommand(
+          dataMapName,
+          true,
+          Some(TableIdentifier(mainTable.getTableName, Some(mainTable.getDatabaseName))),
+          forceDrop = false).run(sparkSession)
     }
     Seq.empty
   }


[25/50] [abbrv] carbondata git commit: [CARBONDATA-2806] Delete delete delta files upon clean files for flat folder

Posted by ja...@apache.org.
[CARBONDATA-2806] Delete delete delta files upon clean files for flat folder

Problem:
Delete delta files are not removed after clean files operation.
Solution:
Get the delta files using Segment Status Manager and remove them during clean operation.

This closes #2587


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/af984101
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/af984101
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/af984101

Branch: refs/heads/external-format
Commit: af984101ebcd55f71e370e52e837157b45c529dd
Parents: a302cd1
Author: ravipesala <ra...@gmail.com>
Authored: Mon Jul 30 20:30:58 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Aug 1 22:15:48 2018 +0800

----------------------------------------------------------------------
 .../core/metadata/SegmentFileStore.java         | 18 ++++++----
 .../carbondata/core/util/DeleteLoadFolders.java | 35 +++++++++++++-------
 .../core/util/path/CarbonTablePath.java         |  8 +++++
 .../FlatFolderTableLoadingTestCase.scala        | 25 ++++++++++++++
 4 files changed, 68 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/af984101/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 67e58d1..111e444 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -56,6 +56,7 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
+import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataFileFooterConverter;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
@@ -793,25 +794,30 @@ public class SegmentFileStore {
   /**
    * Deletes the segment file and its physical files like partition folders from disk
    * @param tablePath
-   * @param segmentFile
+   * @param segment
    * @param partitionSpecs
    * @throws IOException
    */
-  public static void deleteSegment(String tablePath, String segmentFile,
-      List<PartitionSpec> partitionSpecs) throws IOException {
-    SegmentFileStore fileStore = new SegmentFileStore(tablePath, segmentFile);
+  public static void deleteSegment(String tablePath, Segment segment,
+      List<PartitionSpec> partitionSpecs,
+      SegmentUpdateStatusManager updateStatusManager) throws Exception {
+    SegmentFileStore fileStore = new SegmentFileStore(tablePath, segment.getSegmentFileName());
     List<String> indexOrMergeFiles = fileStore.readIndexFiles(SegmentStatus.SUCCESS, true);
     Map<String, List<String>> indexFilesMap = fileStore.getIndexFilesMap();
     for (Map.Entry<String, List<String>> entry : indexFilesMap.entrySet()) {
       FileFactory.deleteFile(entry.getKey(), FileFactory.getFileType(entry.getKey()));
       for (String file : entry.getValue()) {
+        String[] deltaFilePaths =
+            updateStatusManager.getDeleteDeltaFilePath(file, segment.getSegmentNo());
+        for (String deltaFilePath : deltaFilePaths) {
+          FileFactory.deleteFile(deltaFilePath, FileFactory.getFileType(deltaFilePath));
+        }
         FileFactory.deleteFile(file, FileFactory.getFileType(file));
       }
     }
     deletePhysicalPartition(partitionSpecs, indexFilesMap, indexOrMergeFiles, tablePath);
     String segmentFilePath =
-        CarbonTablePath.getSegmentFilesLocation(tablePath) + CarbonCommonConstants.FILE_SEPARATOR
-            + segmentFile;
+        CarbonTablePath.getSegmentFilePath(tablePath, segment.getSegmentFileName());
     // Deletes the physical segment file
     FileFactory.deleteFile(segmentFilePath, FileFactory.getFileType(segmentFilePath));
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/af984101/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java b/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
index 0433ba4..a65294e 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DeleteLoadFolders.java
@@ -40,6 +40,7 @@ import org.apache.carbondata.core.mutate.CarbonUpdateUtil;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
+import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
 public final class DeleteLoadFolders {
@@ -75,21 +76,29 @@ public final class DeleteLoadFolders {
         absoluteTableIdentifier,
         currentDetails,
         isForceDelete,
-        specs);
+        specs,
+        currentDetails);
     if (newAddedLoadHistoryList != null && newAddedLoadHistoryList.length > 0) {
       physicalFactAndMeasureMetadataDeletion(
           absoluteTableIdentifier,
           newAddedLoadHistoryList,
           isForceDelete,
-          specs);
+          specs,
+          currentDetails);
     }
   }
 
-  public static void physicalFactAndMeasureMetadataDeletion(
-      AbsoluteTableIdentifier absoluteTableIdentifier,
-      LoadMetadataDetails[] loadDetails,
-      boolean isForceDelete,
-      List<PartitionSpec> specs) {
+  /**
+   * Delete the invalid data physically from table.
+   * @param absoluteTableIdentifier table identifier
+   * @param loadDetails Load details which need clean up
+   * @param isForceDelete is Force delete requested by user
+   * @param specs Partition specs
+   * @param currLoadDetails Current table status load details which are required for update manager.
+   */
+  private static void physicalFactAndMeasureMetadataDeletion(
+      AbsoluteTableIdentifier absoluteTableIdentifier, LoadMetadataDetails[] loadDetails,
+      boolean isForceDelete, List<PartitionSpec> specs, LoadMetadataDetails[] currLoadDetails) {
     CarbonTable carbonTable = DataMapStoreManager.getInstance().getCarbonTable(
         absoluteTableIdentifier);
     List<TableDataMap> indexDataMaps = new ArrayList<>();
@@ -104,14 +113,16 @@ public final class DeleteLoadFolders {
           "Failed to get datamaps for %s.%s, therefore the datamap files could not be cleaned.",
           absoluteTableIdentifier.getDatabaseName(), absoluteTableIdentifier.getTableName()));
     }
-
+    SegmentUpdateStatusManager updateStatusManager =
+        new SegmentUpdateStatusManager(carbonTable, currLoadDetails);
     for (final LoadMetadataDetails oneLoad : loadDetails) {
       if (checkIfLoadCanBeDeletedPhysically(oneLoad, isForceDelete)) {
         try {
           if (oneLoad.getSegmentFile() != null) {
-            SegmentFileStore
-                .deleteSegment(absoluteTableIdentifier.getTablePath(), oneLoad.getSegmentFile(),
-                    specs);
+            SegmentFileStore.deleteSegment(
+                absoluteTableIdentifier.getTablePath(),
+                new Segment(oneLoad.getLoadName(), oneLoad.getSegmentFile()),
+                specs, updateStatusManager);
           } else {
             String path = getSegmentPath(absoluteTableIdentifier, oneLoad);
             boolean status = false;
@@ -161,7 +172,7 @@ public final class DeleteLoadFolders {
             segments.add(new Segment(oneLoad.getLoadName()));
             dataMap.deleteDatamapData(segments);
           }
-        } catch (IOException e) {
+        } catch (Exception e) {
           LOGGER.warn("Unable to delete the file as per delete command " + oneLoad.getLoadName());
         }
       }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/af984101/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
index 275d3d6..6493e34 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
@@ -694,6 +694,14 @@ public class CarbonTablePath {
   }
 
   /**
+   * Get the segment file path of table
+   */
+  public static String getSegmentFilePath(String tablePath, String segmentFileName) {
+    return getMetadataPath(tablePath) + CarbonCommonConstants.FILE_SEPARATOR + "segments"
+        + CarbonCommonConstants.FILE_SEPARATOR + segmentFileName;
+  }
+
+  /**
    * Get the lock files directory
    */
   public static String getLockFilesDirPath(String tablePath) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/af984101/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
index 9a60978..68f8ca7 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
@@ -127,6 +127,31 @@ class FlatFolderTableLoadingTestCase extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists t1")
   }
 
+  test("merge index flat folder and delete delta issue") {
+    sql("drop table if exists flatfolder_delete")
+    sql(
+      """
+        | CREATE TABLE flatfolder_delete (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int,empno int)
+        | STORED BY 'org.apache.carbondata.format' tblproperties('flat_folder'='true')
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", "flatfolder_delete")
+    sql(s"""delete from flatfolder_delete where empname='anandh'""")
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 4)
+    sql("Alter table flatfolder_delete compact 'minor'")
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 4)
+    sql("clean files for table flatfolder_delete")
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length == 1)
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 0)
+
+  }
+
   override def afterAll = {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,


[11/50] [abbrv] carbondata git commit: Fixed Spelling

Posted by ja...@apache.org.
Fixed Spelling

Fixed Spelling

This closes #2584


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1cf3f398
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1cf3f398
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1cf3f398

Branch: refs/heads/external-format
Commit: 1cf3f398dd037fea0a3c39daa015843921b81959
Parents: 7628571
Author: Jimmy Casey <ca...@gmail.com>
Authored: Sun Jul 29 21:35:27 2018 +0000
Committer: chenliang613 <ch...@huawei.com>
Committed: Tue Jul 31 16:08:49 2018 +0800

----------------------------------------------------------------------
 .../carbondata/processing/loading/csvinput/BoundedInputStream.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cf3f398/processing/src/main/java/org/apache/carbondata/processing/loading/csvinput/BoundedInputStream.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/csvinput/BoundedInputStream.java b/processing/src/main/java/org/apache/carbondata/processing/loading/csvinput/BoundedInputStream.java
index 6fe9107..121dbd4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/csvinput/BoundedInputStream.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/csvinput/BoundedInputStream.java
@@ -22,7 +22,7 @@ import java.io.InputStream;
 
 /**
  * Customarized reader class to read data from file
- * untill the upper threshold reached.
+ * until the upper threshold reached.
  */
 public class BoundedInputStream extends InputStream {
 


[07/50] [abbrv] carbondata git commit: [CARBONDATA-2791]Fix Encoding for Double if exceeds LONG.Max_value

Posted by ja...@apache.org.
[CARBONDATA-2791]Fix Encoding for Double if exceeds LONG.Max_value

If Factor(decimalcount) * absMaxValue exceeds LONG.MAX_VALUE, then go for direct compression.

This closes #2569


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/91837a6f
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/91837a6f
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/91837a6f

Branch: refs/heads/external-format
Commit: 91837a6fb0353a80c7be56640045ae821c7d6477
Parents: fc8510a
Author: Indhumathi27 <in...@gmail.com>
Authored: Fri Jul 27 12:22:25 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Sun Jul 29 21:26:54 2018 +0530

----------------------------------------------------------------------
 .../page/encoding/DefaultEncodingFactory.java   | 27 ++++++++++++--------
 .../complexType/TestAdaptiveComplexType.scala   | 17 ++++++++++++
 2 files changed, 33 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/91837a6f/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
index fa8bfad..1cc2ba8 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
@@ -300,18 +300,23 @@ public class DefaultEncodingFactory extends EncodingFactory {
       return new DirectCompressCodec(DataTypes.DOUBLE);
     } else {
       // double
-      long max = (long) (Math.pow(10, decimalCount) * absMaxValue);
-      DataType adaptiveDataType = fitLongMinMax(max, 0);
-      DataType deltaDataType = compareMinMaxAndSelectDataType(
-          (long) (Math.pow(10, decimalCount) * (maxValue - minValue)));
-      if (adaptiveDataType.getSizeInBytes() > deltaDataType.getSizeInBytes()) {
-        return new AdaptiveDeltaFloatingCodec(srcDataType, deltaDataType, stats);
-      } else if (adaptiveDataType.getSizeInBytes() < DataTypes.DOUBLE.getSizeInBytes() || (
-          (isComplexPrimitive) && (adaptiveDataType.getSizeInBytes() == DataTypes.DOUBLE
-              .getSizeInBytes()))) {
-        return new AdaptiveFloatingCodec(srcDataType, adaptiveDataType, stats);
-      } else {
+      // If absMaxValue exceeds LONG.MAX_VALUE, then go for direct compression
+      if ((Math.pow(10, decimalCount) * absMaxValue) > Long.MAX_VALUE) {
         return new DirectCompressCodec(DataTypes.DOUBLE);
+      } else {
+        long max = (long) (Math.pow(10, decimalCount) * absMaxValue);
+        DataType adaptiveDataType = fitLongMinMax(max, 0);
+        DataType deltaDataType = compareMinMaxAndSelectDataType(
+            (long) (Math.pow(10, decimalCount) * (maxValue - minValue)));
+        if (adaptiveDataType.getSizeInBytes() > deltaDataType.getSizeInBytes()) {
+          return new AdaptiveDeltaFloatingCodec(srcDataType, deltaDataType, stats);
+        } else if (adaptiveDataType.getSizeInBytes() < DataTypes.DOUBLE.getSizeInBytes() || (
+            (isComplexPrimitive) && (adaptiveDataType.getSizeInBytes() == DataTypes.DOUBLE
+                .getSizeInBytes()))) {
+          return new AdaptiveFloatingCodec(srcDataType, adaptiveDataType, stats);
+        } else {
+          return new DirectCompressCodec(DataTypes.DOUBLE);
+        }
       }
     }
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/91837a6f/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveComplexType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveComplexType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveComplexType.scala
index 6b0a13f..7fff15d 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveComplexType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveComplexType.scala
@@ -551,4 +551,21 @@ trait TestAdaptiveComplexType extends QueryTest {
       Seq(Row(1, Row(true, "abc", mutable.WrappedArray.make(Array(false, true, false))))))
   }
 
+  test("test Double with large decimalcount") {
+    sql("Drop table if exists adaptive")
+    sql(
+      "create table adaptive(array1 array<struct<double1:double,double2:double,double3:double>>) " +
+      "stored by 'carbondata'")
+    sql(
+      "insert into adaptive values('10.35:40000.35:1.7976931348623157$67890985.888:65.5656:200')," +
+      "('20.25:50000.25:4.945464565654656546546546324$10000000:300000:3000')")
+    checkExistence(sql("select * from adaptive"), true, "1.0E7,300000.0,3000.0")
+    sql("Drop table if exists adaptive")
+    sql("create table adaptive(struct_arr struct<array_db1:array<double>>) stored by 'carbondata'")
+    sql("insert into adaptive values('5555555.9559:12345678991234567:3444.999')")
+    checkExistence(sql("select * from adaptive"),
+      true,
+      "5555555.9559, 1.2345678991234568E16, 3444.999")
+  }
+
 }


[45/50] [abbrv] carbondata git commit: [CARBONDATA-2823] Support streaming property with datamap

Posted by ja...@apache.org.
[CARBONDATA-2823] Support streaming property with datamap

Since during query, carbondata get splits from streaming segment and
columnar segments repectively, we can support streaming with index
datamap.

For preaggregate datamap, it already supported streaming table, so here
we will remove the outdated comments.

This closes #2609


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b9e51064
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b9e51064
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b9e51064

Branch: refs/heads/external-format
Commit: b9e51064017e428962d86ff8fccb3c1493e9864b
Parents: abcd4f6
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Mon Aug 6 15:34:51 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Tue Aug 7 18:21:30 2018 +0800

----------------------------------------------------------------------
 .../datamap/bloom/BloomCoarseGrainDataMapFactory.java        | 2 +-
 .../datamap/lucene/LuceneFineGrainDataMapFactory.java        | 2 +-
 .../apache/spark/sql/execution/strategy/DDLStrategy.scala    | 8 --------
 3 files changed, 2 insertions(+), 10 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b9e51064/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 80a86cc..0d240c4 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -391,7 +391,7 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
       case ALTER_CHANGE_DATATYPE:
         return true;
       case STREAMING:
-        return true;
+        return false;
       case DELETE:
         return true;
       case UPDATE:

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b9e51064/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
index 8c7539f..ff2ffdd 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
@@ -98,7 +98,7 @@ public class LuceneFineGrainDataMapFactory extends LuceneDataMapFactoryBase<Fine
       case ALTER_CHANGE_DATATYPE:
         return true;
       case STREAMING:
-        return true;
+        return false;
       case DELETE:
         return true;
       case UPDATE:

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b9e51064/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
index 416dbec..94b988c 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/strategy/DDLStrategy.scala
@@ -35,7 +35,6 @@ import org.apache.spark.util.{CarbonReflectionUtils, FileUtils}
 
 import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
 import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
-import org.apache.carbondata.core.features.TableOperation
 import org.apache.carbondata.core.util.CarbonProperties
 
 /**
@@ -257,8 +256,6 @@ class DDLStrategy(sparkSession: SparkSession) extends SparkStrategy {
         if CarbonEnv.getInstance(sparkSession).carbonMetastore
           .tableExists(tableName)(sparkSession) => {
 
-        // TODO remove this limiation after streaming table support 'preaggregate' DataMap
-        // if the table has 'preaggregate' DataMap, it doesn't support streaming now
         val carbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
           .lookupRelation(tableName)(sparkSession).asInstanceOf[CarbonRelation].carbonTable
         if (carbonTable != null && !carbonTable.getTableInfo.isTransactionalTable) {
@@ -266,11 +263,6 @@ class DDLStrategy(sparkSession: SparkSession) extends SparkStrategy {
             "Unsupported operation on non transactional table")
         }
 
-        if (carbonTable != null && !carbonTable.canAllow(carbonTable, TableOperation.STREAMING)) {
-          throw new MalformedCarbonCommandException(
-            "streaming is not supported for index datamap")
-        }
-
         // TODO remove this limitation later
         val property = properties.find(_._1.equalsIgnoreCase("streaming"))
         if (property.isDefined) {


[49/50] [abbrv] carbondata git commit: [CARBONDATA-2613] Support csv based carbon table

Posted by ja...@apache.org.
[CARBONDATA-2613] Support csv based carbon table

1. create csv based carbon table using
CREATE TABLE fact_table (col1 bigint, col2 string, ..., col100 string)
STORED BY 'CarbonData'
TBLPROPERTIES(
  'foramt'='csv',
  'csv.delimiter'=',',
  'csv.header'='col1,col2,col100')

2. Load data to this table using
ALTER TABLE fact_table ADD SEGMENT LOCATION 'path/to/data1'

This closes #2374


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1a26ac16
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1a26ac16
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1a26ac16

Branch: refs/heads/external-format
Commit: 1a26ac16e9f178b1a0914e0f9e8ad0ebf707a0c2
Parents: 7843845
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Wed Jun 13 09:03:28 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Tue Aug 7 21:06:13 2018 +0800

----------------------------------------------------------------------
 .../carbondata/common/annotations/Since.java    |  38 ++
 .../core/constants/CarbonCommonConstants.java   |   4 +
 .../core/metadata/schema/table/TableInfo.java   |  52 ++
 .../core/statusmanager/FileFormat.java          |  10 +-
 .../statusmanager/FileFormatProperties.java     |  32 ++
 .../core/statusmanager/LoadMetadataDetails.java |  16 +-
 .../hadoop/CarbonMultiBlockSplit.java           |   6 +
 .../carbondata/hadoop/CsvRecordReader.java      | 506 +++++++++++++++++++
 .../hadoop/api/CarbonFileInputFormat.java       |  11 +-
 .../hadoop/api/CarbonInputFormat.java           | 120 ++++-
 .../hadoop/api/CarbonTableInputFormat.java      |  12 +-
 .../datawithoutheader_delimiter_separator.csv   |  10 +
 .../CsvBasedCarbonTableSuite.scala              | 244 +++++++++
 .../carbondata/spark/format/CsvReadSupport.java | 107 ++++
 .../spark/format/VectorCsvReadSupport.java      | 130 +++++
 .../carbondata/spark/rdd/CarbonScanRDD.scala    |  77 ++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala |   1 +
 .../command/carbonTableSchemaCommon.scala       |  10 +
 .../management/CarbonAddSegmentCommand.scala    | 135 +++++
 .../sql/parser/CarbonSpark2SqlParser.scala      |  13 +-
 20 files changed, 1499 insertions(+), 35 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
----------------------------------------------------------------------
diff --git a/common/src/main/java/org/apache/carbondata/common/annotations/Since.java b/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
new file mode 100644
index 0000000..b7e4391
--- /dev/null
+++ b/common/src/main/java/org/apache/carbondata/common/annotations/Since.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.common.annotations;
+
+import java.lang.annotation.Documented;
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+
+/**
+ * The annotation indicates that the version number since a member or a type has been present.
+ */
+@Documented
+@Retention(RetentionPolicy.RUNTIME)
+@Target({ElementType.FIELD, ElementType.TYPE, ElementType.METHOD})
+public @interface Since {
+  /**
+   * the value indicating a version number since this member
+   * or type has been present.
+   */
+  String value();
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index f2b9308..e10d1d5 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -971,6 +971,8 @@ public final class CarbonCommonConstants {
    */
   public static final String DICTIONARY_PATH = "dictionary_path";
   public static final String SORT_COLUMNS = "sort_columns";
+  // file format for the data files
+  public static final String FORMAT = "format";
   public static final String PARTITION_TYPE = "partition_type";
   public static final String NUM_PARTITIONS = "num_partitions";
   public static final String RANGE_INFO = "range_info";
@@ -993,6 +995,8 @@ public final class CarbonCommonConstants {
   // Flat folder support on table. when it is true all carbondata files store directly under table
   // path instead of sub folders.
   public static final String FLAT_FOLDER = "flat_folder";
+  // this will be used in hadoop conf to pass the format type to executor
+  public static final String CARBON_EXTERNAL_FORMAT_CONF_KEY = "carbon_external_format_type";
 
   /**
    * 16 mb size

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
index 38145e5..46328f7 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
@@ -25,11 +25,13 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.Serializable;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
+import org.apache.carbondata.common.annotations.Since;
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
@@ -89,6 +91,17 @@ public class TableInfo implements Serializable, Writable {
    *
    */
   private boolean isTransactionalTable = true;
+  /**
+   * The format of the fact table.
+   * By default it is carbondata, and we also support other format like CSV
+   */
+  @Since("1.4.1")
+  private String format = "carbondata";
+  /**
+   * properties for the format, such as delimiter/header for csv format
+   */
+  @Since("1.4.1")
+  private Map<String, String> formatProperties;
 
   // this identifier is a lazy field which will be created when it is used first time
   private AbsoluteTableIdentifier identifier;
@@ -104,6 +117,7 @@ public class TableInfo implements Serializable, Writable {
 
   public TableInfo() {
     dataMapSchemaList = new ArrayList<>();
+    formatProperties = new HashMap<>();
     isTransactionalTable = true;
   }
 
@@ -196,6 +210,22 @@ public class TableInfo implements Serializable, Writable {
     this.tablePath = tablePath;
   }
 
+  public String getFormat() {
+    return format;
+  }
+
+  public void setFormat(String format) {
+    this.format = format;
+  }
+
+  public Map<String, String> getFormatProperties() {
+    return formatProperties;
+  }
+
+  public void setFormatProperties(Map<String, String> formatProperties) {
+    this.formatProperties = formatProperties;
+  }
+
   public List<DataMapSchema> getDataMapSchemaList() {
     return dataMapSchemaList;
   }
@@ -291,6 +321,17 @@ public class TableInfo implements Serializable, Writable {
       }
     }
     out.writeBoolean(isSchemaModified);
+
+    out.writeUTF(format);
+    boolean isFormatPropertiesExists = null != formatProperties && formatProperties.size() > 0;
+    out.writeBoolean(isFormatPropertiesExists);
+    if (isFormatPropertiesExists) {
+      out.writeShort(formatProperties.size());
+      for (Map.Entry<String, String> entry : formatProperties.entrySet()) {
+        out.writeUTF(entry.getKey());
+        out.writeUTF(entry.getValue());
+      }
+    }
   }
 
   @Override public void readFields(DataInput in) throws IOException {
@@ -327,6 +368,17 @@ public class TableInfo implements Serializable, Writable {
       }
     }
     this.isSchemaModified = in.readBoolean();
+
+    this.format = in.readUTF();
+    boolean isFormatPropertiesExists = in.readBoolean();
+    if (isFormatPropertiesExists) {
+      short size = in.readShort();
+      for (int i = 0; i < size; i++) {
+        String key = in.readUTF();
+        String value = in.readUTF();
+        this.formatProperties.put(key, value);
+      }
+    }
   }
 
   public AbsoluteTableIdentifier getOrCreateAbsoluteTableIdentifier() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormat.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormat.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormat.java
index c154c5f..2b61f0d 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormat.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormat.java
@@ -18,7 +18,8 @@
 package org.apache.carbondata.core.statusmanager;
 
 /**
- * The data file format supported in carbondata project
+ * The data file format supported in carbondata project.
+ * The fileformat along with its property will be stored in tableinfo
  */
 public enum FileFormat {
 
@@ -26,7 +27,10 @@ public enum FileFormat {
   COLUMNAR_V3,
 
   // carbondata row file format, optimized for write
-  ROW_V1;
+  ROW_V1,
+
+  // external file format, such as parquet/csv
+  EXTERNAL;
 
   public static FileFormat getByOrdinal(int ordinal) {
     if (ordinal < 0 || ordinal >= FileFormat.values().length) {
@@ -38,6 +42,8 @@ public enum FileFormat {
         return COLUMNAR_V3;
       case 1:
         return ROW_V1;
+      case 2:
+        return EXTERNAL;
     }
 
     return COLUMNAR_V3;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
new file mode 100644
index 0000000..862c36c
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.statusmanager;
+
+/**
+ * Provides the constant name for the file format properties
+ */
+public class FileFormatProperties {
+  public static class CSV {
+    public static final String HEADER = "csv.header";
+    public static final String DELIMITER = "csv.delimiter";
+    public static final String COMMENT = "csv.comment";
+    public static final String SKIP_EMPTY_LINE = "csv.skipemptyline";
+    public static final String QUOTE = "csv.quote";
+    public static final String ESCAPE = "csv.escape";
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
index 9dc8fe6..4339e34 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java
@@ -122,6 +122,11 @@ public class LoadMetadataDetails implements Serializable {
    * the file format of this segment
    */
   private FileFormat fileFormat = FileFormat.COLUMNAR_V3;
+  /**
+   * path of the fact files.
+   * Since format and formatProperties are stored in tableInfo, we do not store it in each segment
+   */
+  private String factFilePath;
 
   /**
    * Segment file name where it has the information of partition information.
@@ -429,8 +434,17 @@ public class LoadMetadataDetails implements Serializable {
     this.segmentFile = segmentFile;
   }
 
+  public String getFactFilePath() {
+    return factFilePath;
+  }
+
+  public void setFactFilePath(String factFilePath) {
+    this.factFilePath = factFilePath;
+  }
+
   @Override public String toString() {
     return "LoadMetadataDetails{" + "loadStatus=" + loadStatus + ", loadName='" + loadName + '\''
-        + ", loadStartTime='" + loadStartTime + '\'' + ", segmentFile='" + segmentFile + '\'' + '}';
+        + ", loadStartTime='" + loadStartTime + '\'' + ", factFilePath='" + factFilePath + '\''
+        + ", segmentFile='" + segmentFile + '\'' + '}';
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonMultiBlockSplit.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonMultiBlockSplit.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonMultiBlockSplit.java
index 0b991cb..ae11cf2 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonMultiBlockSplit.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/CarbonMultiBlockSplit.java
@@ -64,12 +64,18 @@ public class CarbonMultiBlockSplit extends InputSplit implements Serializable, W
       this.splitList.add((CarbonInputSplit)block);
     }
     this.locations = new String[]{hostname};
+    if (splitList.size() > 0) {
+      this.fileFormat = splitList.get(0).getFileFormat();
+    }
   }
 
   public CarbonMultiBlockSplit(List<CarbonInputSplit> splitList,
       String[] locations) {
     this.splitList = splitList;
     this.locations = locations;
+    if (splitList.size() > 0) {
+      this.fileFormat = splitList.get(0).getFileFormat();
+    }
     calculateLength();
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/hadoop/src/main/java/org/apache/carbondata/hadoop/CsvRecordReader.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/CsvRecordReader.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/CsvRecordReader.java
new file mode 100644
index 0000000..70f58c3
--- /dev/null
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/CsvRecordReader.java
@@ -0,0 +1,506 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.hadoop;
+
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.Reader;
+import java.io.UnsupportedEncodingException;
+import java.math.BigDecimal;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.common.annotations.InterfaceStability;
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
+import org.apache.carbondata.core.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.scan.expression.exception.FilterUnsupportedException;
+import org.apache.carbondata.core.scan.filter.FilterUtil;
+import org.apache.carbondata.core.scan.filter.GenericQueryType;
+import org.apache.carbondata.core.scan.filter.executer.FilterExecuter;
+import org.apache.carbondata.core.scan.filter.intf.RowImpl;
+import org.apache.carbondata.core.scan.filter.intf.RowIntf;
+import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
+import org.apache.carbondata.core.scan.model.QueryModel;
+import org.apache.carbondata.core.statusmanager.FileFormatProperties;
+import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
+import org.apache.carbondata.hadoop.api.CarbonTableInputFormat;
+import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport;
+import org.apache.carbondata.processing.loading.csvinput.CSVInputFormat;
+
+import com.univocity.parsers.csv.CsvParser;
+import com.univocity.parsers.csv.CsvParserSettings;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.InputSplit;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.lib.input.FileSplit;
+
+/**
+ * scan csv file and filter on it
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Internal
+public class CsvRecordReader<T> extends AbstractRecordReader<T> {
+  private static final LogService LOGGER = LogServiceFactory.getLogService(
+      CsvRecordReader.class.getName());
+  private static final int MAX_BATCH_SIZE =
+      CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+  // vector reader
+  private boolean isVectorReader;
+  private T columnarBatch;
+
+  // metadata
+  private CarbonTable carbonTable;
+  private CarbonColumn[] carbonColumns;
+  // input
+  private QueryModel queryModel;
+  private CarbonReadSupport<T> readSupport;
+  private FileSplit fileSplit;
+  private Configuration hadoopConf;
+  // the index is schema ordinal, the value is the csv ordinal
+  private int[] schema2csvIdx;
+
+  // filter
+  private FilterExecuter filter;
+  // the index is the dimension ordinal, the value is the schema ordinal
+  private int[] filterColumn2SchemaIdx;
+  private Object[] internalValues;
+  private RowIntf internalRow;
+
+  // output
+  private CarbonColumn[] projection;
+  // the index is the projection column ordinal, the value is the schema ordinal
+  private int[] projectionColumn2SchemaIdx;
+  private Object[] outputValues;
+  private Object[][] batchOutputValues;
+  private T outputRow;
+
+  // inputMetricsStats
+  private InputMetricsStats inputMetricsStats;
+
+  // scan
+  private Reader reader;
+  private CsvParser csvParser;
+
+  public CsvRecordReader(QueryModel queryModel, CarbonReadSupport<T> readSupport) {
+    this.queryModel = queryModel;
+    this.readSupport = readSupport;
+  }
+
+  public CsvRecordReader(QueryModel queryModel, CarbonReadSupport readSupport,
+      InputMetricsStats inputMetricsStats) {
+    this(queryModel, readSupport);
+    this.inputMetricsStats = inputMetricsStats;
+  }
+
+  public boolean isVectorReader() {
+    return isVectorReader;
+  }
+
+  public void setVectorReader(boolean vectorReader) {
+    isVectorReader = vectorReader;
+  }
+
+  public void setQueryModel(QueryModel queryModel) {
+    this.queryModel = queryModel;
+  }
+
+  public void setInputMetricsStats(InputMetricsStats inputMetricsStats) {
+    this.inputMetricsStats = inputMetricsStats;
+  }
+
+  public void setReadSupport(CarbonReadSupport<T> readSupport) {
+    this.readSupport = readSupport;
+  }
+
+  @Override
+  public void initialize(InputSplit split, TaskAttemptContext context)
+      throws IOException, InterruptedException {
+    if (split instanceof CarbonInputSplit) {
+      fileSplit = (CarbonInputSplit) split;
+    } else if (split instanceof CarbonMultiBlockSplit) {
+      fileSplit = ((CarbonMultiBlockSplit) split).getAllSplits().get(0);
+    } else {
+      fileSplit = (FileSplit) split;
+    }
+
+    hadoopConf = context.getConfiguration();
+    if (queryModel == null) {
+      CarbonTableInputFormat inputFormat = new CarbonTableInputFormat<Object>();
+      queryModel = inputFormat.createQueryModel(split, context);
+    }
+
+    carbonTable = queryModel.getTable();
+
+    // since the sequence of csv header, schema, carbon internal row, projection are different,
+    // we need to init the column mappings
+    initializedIdxMapping();
+
+    // init filter
+    if (null != queryModel.getFilterExpressionResolverTree()) {
+      initializeFilter();
+    }
+
+    // init reading
+    initializeCsvReader();
+
+    this.readSupport.initialize(projection, carbonTable);
+  }
+
+  private void initializedIdxMapping() {
+    carbonColumns =
+        carbonTable.getCreateOrderColumn(carbonTable.getTableName()).toArray(new CarbonColumn[0]);
+    // for schema to csv mapping
+    schema2csvIdx = new int[carbonColumns.length];
+    if (!carbonTable.getTableInfo().getFormatProperties().containsKey(
+        FileFormatProperties.CSV.HEADER)) {
+      // if no header specified, it means that they are the same
+      LOGGER.info("no header specified, will take the schema from table as header");
+      for (int i = 0; i < carbonColumns.length; i++) {
+        schema2csvIdx[i] = i;
+      }
+    } else {
+      String[] csvHeader = carbonTable.getTableInfo().getFormatProperties().get(
+          FileFormatProperties.CSV.HEADER).split(",");
+      for (int i = 0; i < csvHeader.length; i++) {
+        boolean found = false;
+        for (int j = 0; j < carbonColumns.length; j++) {
+          if (StringUtils.strip(csvHeader[i]).equalsIgnoreCase(carbonColumns[j].getColName())) {
+            schema2csvIdx[carbonColumns[j].getSchemaOrdinal()] = i;
+            found = true;
+            break;
+          }
+        }
+        if (!found) {
+          throw new RuntimeException(
+              String.format("Can not find csv header '%s' in table fields", csvHeader[i]));
+        }
+      }
+    }
+
+    // for carbon internal row to schema mapping
+    filterColumn2SchemaIdx = new int[carbonColumns.length];
+    int filterIdx = 0;
+    for (CarbonDimension dimension : carbonTable.getDimensions()) {
+      filterColumn2SchemaIdx[filterIdx++] = dimension.getSchemaOrdinal();
+    }
+    for (CarbonMeasure measure : carbonTable.getMeasures()) {
+      filterColumn2SchemaIdx[filterIdx++] = measure.getSchemaOrdinal();
+    }
+
+    // for projects to schema mapping
+    projection = queryModel.getProjectionColumns();
+    projectionColumn2SchemaIdx = new int[projection.length];
+
+    for (int i = 0; i < projection.length; i++) {
+      for (int j = 0; j < carbonColumns.length; j++) {
+        if (projection[i].getColName().equals(carbonColumns[j].getColName())) {
+          projectionColumn2SchemaIdx[i] = projection[i].getSchemaOrdinal();
+          break;
+        }
+      }
+    }
+  }
+
+  private void initializeFilter() {
+    List<ColumnSchema> wrapperColumnSchemaList = CarbonUtil
+        .getColumnSchemaList(carbonTable.getDimensionByTableName(carbonTable.getTableName()),
+            carbonTable.getMeasureByTableName(carbonTable.getTableName()));
+    int[] dimLensWithComplex = new int[wrapperColumnSchemaList.size()];
+    for (int i = 0; i < dimLensWithComplex.length; i++) {
+      dimLensWithComplex[i] = Integer.MAX_VALUE;
+    }
+
+    int[] dictionaryColumnCardinality =
+        CarbonUtil.getFormattedCardinality(dimLensWithComplex, wrapperColumnSchemaList);
+    SegmentProperties segmentProperties =
+        new SegmentProperties(wrapperColumnSchemaList, dictionaryColumnCardinality);
+    Map<Integer, GenericQueryType> complexDimensionInfoMap = new HashMap<>();
+
+    FilterResolverIntf resolverIntf = queryModel.getFilterExpressionResolverTree();
+    filter = FilterUtil.getFilterExecuterTree(resolverIntf, segmentProperties,
+        complexDimensionInfoMap);
+    // for row filter, we need update column index
+    FilterUtil.updateIndexOfColumnExpression(resolverIntf.getFilterExpression(),
+        carbonTable.getDimensionOrdinalMax());
+  }
+
+  private void initializeCsvReader() throws IOException {
+    internalValues = new Object[carbonColumns.length];
+    internalRow = new RowImpl();
+    internalRow.setValues(internalValues);
+
+    outputValues = new Object[projection.length];
+    batchOutputValues = new Object[MAX_BATCH_SIZE][projection.length];
+
+    Path file = fileSplit.getPath();
+    FileSystem fs = file.getFileSystem(hadoopConf);
+    int bufferSize = Integer.parseInt(
+        hadoopConf.get(CSVInputFormat.READ_BUFFER_SIZE, CSVInputFormat.READ_BUFFER_SIZE_DEFAULT));
+    // note that here we read the whole file, not a split of the file
+    FSDataInputStream fsStream = fs.open(file, bufferSize);
+    reader = new InputStreamReader(fsStream, CarbonCommonConstants.DEFAULT_CHARSET);
+    // use default csv settings first, then update it using user specified properties later
+    CsvParserSettings settings = CSVInputFormat.extractCsvParserSettings(hadoopConf);
+    initCsvSettings(settings);
+    csvParser = new CsvParser(settings);
+    csvParser.beginParsing(reader);
+  }
+
+  /**
+   * update the settings using properties from user
+   */
+  private void initCsvSettings(CsvParserSettings settings) {
+    Map<String, String> csvProperties = carbonTable.getTableInfo().getFormatProperties();
+
+    if (csvProperties.containsKey(FileFormatProperties.CSV.DELIMITER)) {
+      settings.getFormat().setDelimiter(
+          csvProperties.get(FileFormatProperties.CSV.DELIMITER).charAt(0));
+    }
+
+    if (csvProperties.containsKey(FileFormatProperties.CSV.COMMENT)) {
+      settings.getFormat().setComment(
+          csvProperties.get(FileFormatProperties.CSV.COMMENT).charAt(0));
+    }
+
+    if (csvProperties.containsKey(FileFormatProperties.CSV.QUOTE)) {
+      settings.getFormat().setQuote(
+          csvProperties.get(FileFormatProperties.CSV.QUOTE).charAt(0));
+    }
+
+    if (csvProperties.containsKey(FileFormatProperties.CSV.ESCAPE)) {
+      settings.getFormat().setQuoteEscape(
+          csvProperties.get(FileFormatProperties.CSV.ESCAPE).charAt(0));
+    }
+
+    if (csvProperties.containsKey(FileFormatProperties.CSV.SKIP_EMPTY_LINE)) {
+      settings.setSkipEmptyLines(
+          Boolean.parseBoolean(csvProperties.get(FileFormatProperties.CSV.SKIP_EMPTY_LINE)));
+    }
+  }
+
+  @Override
+  public boolean nextKeyValue() throws IOException, InterruptedException {
+    if (isVectorReader) {
+      return nextColumnarBatch();
+    }
+
+    return nextRow();
+  }
+
+  private boolean nextColumnarBatch() throws IOException {
+    return scanAndFillBatch();
+  }
+
+  private boolean scanAndFillBatch() throws IOException {
+    int rowNum = 0;
+    if (null == filter) {
+      while (readRowFromFile() && rowNum < MAX_BATCH_SIZE) {
+        System.arraycopy(outputValues, 0, batchOutputValues[rowNum++], 0, outputValues.length);
+      }
+    } else {
+      try {
+        while (readRowFromFile() && rowNum < MAX_BATCH_SIZE) {
+          if (filter.applyFilter(internalRow, carbonTable.getDimensionOrdinalMax())) {
+            System.arraycopy(outputValues, 0, batchOutputValues[rowNum++], 0, outputValues.length);
+          }
+        }
+      } catch (FilterUnsupportedException e) {
+        throw new IOException("Failed to filter row in CarbonCsvRecordReader", e);
+      }
+    }
+    if (rowNum < MAX_BATCH_SIZE) {
+      Object[][] tmpBatchOutputValues = new Object[rowNum][];
+      for (int i = 0; i < rowNum; i++) {
+        tmpBatchOutputValues[i] = batchOutputValues[i];
+      }
+      System.arraycopy(batchOutputValues, 0, tmpBatchOutputValues, 0, rowNum);
+      for (int i = 0; i < tmpBatchOutputValues.length; i++) {
+      }
+      columnarBatch = readSupport.readRow(tmpBatchOutputValues);
+    } else {
+      columnarBatch = readSupport.readRow(batchOutputValues);
+    }
+    return rowNum > 0;
+  }
+
+  private boolean nextRow() throws IOException {
+    if (csvParser == null) {
+      return false;
+    }
+
+    if (!readRowFromFile()) {
+      return false;
+    }
+
+    if (null == filter) {
+      outputRow = readSupport.readRow(outputValues);
+      return true;
+    } else {
+      try {
+        boolean scanMore;
+        do {
+          scanMore = !filter.applyFilter(internalRow, carbonTable.getDimensionOrdinalMax());
+          if (!scanMore) {
+            outputRow = readSupport.readRow(outputValues);
+            return true;
+          }
+        } while (readRowFromFile());
+        // if we read the end of file and still need scanMore, it means that there is no row
+        return false;
+      } catch (FilterUnsupportedException e) {
+        throw new IOException("Failed to filter row in CarbonCsvRecordReader", e);
+      }
+    }
+  }
+
+  /**
+   * read from csv file and convert to internal row
+   * todo: prune with project/filter
+   * @return false, if it comes to an end
+   */
+  private boolean readRowFromFile() {
+    String[] parsedOut = csvParser.parseNext();
+    if (parsedOut == null) {
+      return false;
+    } else {
+      convertToInternalRow(parsedOut);
+      convertToOutputRow(parsedOut);
+      return true;
+    }
+  }
+
+  /**
+   * convert origin csv string row to carbondata internal row.
+   * The row will be used to do filter on it. Note that the dimensions are at the head
+   * while measures are at the end, so we need to adjust the values.
+   */
+  private void convertToInternalRow(String[] csvLine) {
+    try {
+      for (int i = 0; i < carbonColumns.length; i++) {
+        internalValues[i] = convertOriginValue2Carbon(
+            csvLine[schema2csvIdx[filterColumn2SchemaIdx[i]]],
+            carbonColumns[filterColumn2SchemaIdx[i]].getDataType());
+      }
+    } catch (UnsupportedEncodingException e) {
+      LOGGER.error(e, "Error occurs while convert input to internal row");
+      throw new RuntimeException(e);
+    }
+    internalRow.setValues(internalValues);
+  }
+
+  /**
+   * Since output the sequence of columns is not the same as input, we need to adjust them
+   */
+  private void convertToOutputRow(String[] csvLine) {
+    for (int i = 0; i < projection.length; i++) {
+      outputValues[i] = csvLine[schema2csvIdx[projectionColumn2SchemaIdx[i]]];
+    }
+  }
+
+  private Object convertOriginValue2Carbon(String value,
+      org.apache.carbondata.core.metadata.datatype.DataType t) throws UnsupportedEncodingException {
+    if (null == value) {
+      return null;
+    } else {
+      if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.BOOLEAN) {
+        return Boolean.parseBoolean(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.BYTE) {
+        return Byte.parseByte(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.SHORT) {
+        return Short.parseShort(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.INT) {
+        return Integer.parseInt(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.LONG) {
+        return Long.parseLong(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.FLOAT) {
+        return Float.parseFloat(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.DOUBLE) {
+        return Double.parseDouble(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.STRING) {
+        return value.getBytes(CarbonCommonConstants.DEFAULT_CHARSET);
+      } else if (org.apache.carbondata.core.metadata.datatype.DataTypes.isDecimal(t)) {
+        BigDecimal javaDecimal = new BigDecimal(value);
+        return DataTypeUtil.bigDecimalToByte(javaDecimal);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.DATE) {
+        return Integer.parseInt(value);
+      } else if (t == org.apache.carbondata.core.metadata.datatype.DataTypes.TIMESTAMP) {
+        return Long.parseLong(value);
+      } else {
+        throw new RuntimeException("Unsupport datatype in CarbonCsvRecordReader");
+      }
+    }
+  }
+
+  @Override
+  public Void getCurrentKey() throws IOException, InterruptedException {
+    return null;
+  }
+
+  @Override
+  public T getCurrentValue() throws IOException, InterruptedException {
+    if (isVectorReader) {
+      if (inputMetricsStats != null) {
+        inputMetricsStats.incrementRecordRead(1L);
+      }
+      return (T) columnarBatch;
+    } else {
+      if (inputMetricsStats != null) {
+        inputMetricsStats.incrementRecordRead(1L);
+      }
+      return (T) outputRow;
+    }
+  }
+
+  @Override
+  public float getProgress() throws IOException, InterruptedException {
+    return 0;
+  }
+
+  @Override
+  public void close() throws IOException {
+    try {
+      if (reader != null) {
+        reader.close();
+      }
+      if (csvParser != null) {
+        csvParser.stopParsing();
+      }
+      if (readSupport != null) {
+        readSupport.close();
+      }
+    } finally {
+      reader = null;
+      csvParser = null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
index 0f02e12..605b681 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java
@@ -172,9 +172,14 @@ public class CarbonFileInputFormat<T> extends CarbonInputFormat<T> implements Se
     List<InputSplit> result = new LinkedList<InputSplit>();
 
     // for each segment fetch blocks matching filter in Driver BTree
-    List<CarbonInputSplit> dataBlocksOfSegment =
-        getDataBlocksOfSegment(job, carbonTable, filterResolver, matchedPartitions,
-            validSegments, partitionInfo, oldPartitionIdList);
+    List<CarbonInputSplit> dataBlocksOfSegment;
+    if (carbonTable.getTableInfo().getFormat().equals("carbondata")) {
+      dataBlocksOfSegment = getDataBlocksOfSegment(job, carbonTable, filterResolver,
+          matchedPartitions, validSegments, partitionInfo, oldPartitionIdList);
+    } else {
+      dataBlocksOfSegment = getDataBlocksOfSegment4ExternalFormat(job, carbonTable, filterResolver,
+          validSegments);
+    }
     numBlocks = dataBlocksOfSegment.size();
     result.addAll(dataBlocksOfSegment);
     return result;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index eeb3ae8..5fdc522 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -34,6 +34,7 @@ import org.apache.carbondata.core.datamap.DataMapUtil;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapExprWrapper;
 import org.apache.carbondata.core.datamap.dev.expr.DataMapWrapperSimpleInfo;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.exception.InvalidConfigurationException;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
@@ -55,6 +56,9 @@ import org.apache.carbondata.core.stats.QueryStatistic;
 import org.apache.carbondata.core.stats.QueryStatisticsConstants;
 import org.apache.carbondata.core.stats.QueryStatisticsRecorder;
 import org.apache.carbondata.core.util.BlockletDataMapUtil;
+import org.apache.carbondata.core.statusmanager.FileFormat;
+import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
+import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -66,6 +70,7 @@ import org.apache.carbondata.hadoop.CarbonInputSplit;
 import org.apache.carbondata.hadoop.CarbonMultiBlockSplit;
 import org.apache.carbondata.hadoop.CarbonProjection;
 import org.apache.carbondata.hadoop.CarbonRecordReader;
+import org.apache.carbondata.hadoop.CsvRecordReader;
 import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport;
 import org.apache.carbondata.hadoop.readsupport.impl.DictionaryDecodeReadSupport;
 
@@ -73,6 +78,8 @@ import org.apache.commons.collections.CollectionUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
@@ -361,6 +368,31 @@ m filterExpression
     }
   }
 
+  protected List<CarbonInputSplit> getDataBlocksOfSegment4ExternalFormat(JobContext job,
+      CarbonTable carbonTable, FilterResolverIntf resolver, List<Segment> segmentIds)
+      throws IOException {
+
+    QueryStatisticsRecorder recorder = CarbonTimeStatisticsFactory.createDriverRecorder();
+    QueryStatistic statistic = new QueryStatistic();
+
+    // get tokens for all the required FileSystem for table path
+    TokenCache.obtainTokensForNamenodes(job.getCredentials(),
+        new Path[] { new Path(carbonTable.getTablePath()) }, job.getConfiguration());
+    List<ExtendedBlocklet> prunedFiles = getPrunedFiles4ExternalFormat(job, carbonTable,
+        resolver, segmentIds);
+    List<CarbonInputSplit> resultFilteredFiles = new ArrayList<>();
+
+    for (ExtendedBlocklet blocklet : prunedFiles) {
+      List<CarbonInputSplit> inputSplits = convertToInputSplit4ExternalFormat(job, blocklet);
+      resultFilteredFiles.addAll(inputSplits);
+    }
+
+    statistic
+        .addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_DRIVER, System.currentTimeMillis());
+    recorder.recordStatisticsForDriver(statistic, job.getConfiguration().get("query.id"));
+    return resultFilteredFiles;
+  }
+
   /**
    * get data blocks of given segment
    */
@@ -512,6 +544,32 @@ m filterExpression
     return prunedBlocklets;
   }
 
+  private List<ExtendedBlocklet> getPrunedFiles4ExternalFormat(JobContext job,
+      CarbonTable carbonTable,
+      FilterResolverIntf resolver, List<Segment> segmentIds) throws IOException {
+    ExplainCollector.addPruningInfo(carbonTable.getTableName());
+    if (resolver != null) {
+      ExplainCollector.setFilterStatement(resolver.getFilterExpression().getStatement());
+    } else {
+      ExplainCollector.setFilterStatement("none");
+    }
+
+    // there is no default datamap for external format, so return all files
+    List<ExtendedBlocklet> prunedFiles = new ArrayList<>();
+    LoadMetadataDetails[] loadMetadatas = SegmentStatusManager.readTableStatusFile(
+        CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath()));
+    for (LoadMetadataDetails loadMetadata : loadMetadatas) {
+      for (String file : loadMetadata.getFactFilePath().split(",")) {
+        ExtendedBlocklet extendedBlocklet = new ExtendedBlocklet(file, "0");
+        extendedBlocklet.setSegmentId(loadMetadata.getLoadName());
+        prunedFiles.add(extendedBlocklet);
+      }
+    }
+
+    // todo: skip datamap prune now, will add it back later
+    return prunedFiles;
+  }
+
   /**
    * Prune the segments from the already pruned blocklets.
    * @param segments
@@ -552,12 +610,72 @@ m filterExpression
     return split;
   }
 
+  private List<CarbonInputSplit> convertToInputSplit4ExternalFormat(JobContext jobContext,
+      ExtendedBlocklet extendedBlocklet) throws IOException {
+    List<CarbonInputSplit> splits = new ArrayList<CarbonInputSplit>();
+    String factFilePath = extendedBlocklet.getFilePath();
+    Path path = new Path(factFilePath);
+    FileSystem fs = FileFactory.getFileSystem(path);
+    FileStatus fileStatus = fs.getFileStatus(path);
+    long length = fileStatus.getLen();
+    if (length != 0) {
+      BlockLocation[] blkLocations = fs.getFileBlockLocations(path, 0, length);
+      long blkSize = fileStatus.getBlockSize();
+      long minSplitSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(jobContext));
+      long maxSplitSize = getMaxSplitSize(jobContext);
+      long splitSize = computeSplitSize(blkSize, minSplitSize, maxSplitSize);
+      long bytesRemaining = fileStatus.getLen();
+      while (((double) bytesRemaining) / splitSize > 1.1) {
+        int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
+        splits.add(new CarbonInputSplit(extendedBlocklet.getSegmentId(), path,
+            length - bytesRemaining,
+            splitSize, blkLocations[blkIndex].getHosts(),
+            blkLocations[blkIndex].getCachedHosts(), FileFormat.EXTERNAL));
+        bytesRemaining -= splitSize;
+      }
+      if (bytesRemaining != 0) {
+        int blkIndex = getBlockIndex(blkLocations, length - bytesRemaining);
+        splits.add(new CarbonInputSplit(extendedBlocklet.getSegmentId(), path,
+            length - bytesRemaining,
+            bytesRemaining, blkLocations[blkIndex].getHosts(),
+            blkLocations[blkIndex].getCachedHosts(), FileFormat.EXTERNAL));
+      }
+    } else {
+      splits.add(new CarbonInputSplit(extendedBlocklet.getSegmentId(), path, 0, length,
+          new String[0], FileFormat.EXTERNAL));
+    }
+    return splits;
+  }
+
   @Override public RecordReader<Void, T> createRecordReader(InputSplit inputSplit,
       TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException {
     Configuration configuration = taskAttemptContext.getConfiguration();
     QueryModel queryModel = createQueryModel(inputSplit, taskAttemptContext);
     CarbonReadSupport<T> readSupport = getReadSupportClass(configuration);
-    return new CarbonRecordReader<T>(queryModel, readSupport);
+    if (inputSplit instanceof CarbonMultiBlockSplit
+        && ((CarbonMultiBlockSplit) inputSplit).getFileFormat() == FileFormat.EXTERNAL) {
+      return createRecordReaderForExternalFormat(queryModel, readSupport,
+          configuration.get(CarbonCommonConstants.CARBON_EXTERNAL_FORMAT_CONF_KEY));
+    } else if (inputSplit instanceof CarbonInputSplit
+        && ((CarbonInputSplit) inputSplit).getFileFormat() == FileFormat.EXTERNAL) {
+      return createRecordReaderForExternalFormat(queryModel, readSupport,
+          configuration.get(CarbonCommonConstants.CARBON_EXTERNAL_FORMAT_CONF_KEY));
+    } else {
+      return new CarbonRecordReader<T>(queryModel, readSupport);
+    }
+  }
+
+  private RecordReader<Void, T> createRecordReaderForExternalFormat(QueryModel queryModel,
+      CarbonReadSupport readSupport, String format) {
+    try {
+      if ("csv".equals(format)) {
+        return new CsvRecordReader<T>(queryModel, readSupport);
+      } else {
+        throw new RuntimeException("Unsupported external file format " + format);
+      }
+    } catch (Throwable e) {
+      throw new RuntimeException("Failed to create recordReader for format " + format, e);
+    }
   }
 
   public QueryModel createQueryModel(InputSplit inputSplit, TaskAttemptContext taskAttemptContext)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index be72983..2c9c1af 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -535,9 +535,15 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
     isIUDTable = (updateStatusManager.getUpdateStatusDetails().length != 0);
 
     // for each segment fetch blocks matching filter in Driver BTree
-    List<org.apache.carbondata.hadoop.CarbonInputSplit> dataBlocksOfSegment =
-        getDataBlocksOfSegment(job, carbonTable, filterResolver, matchedPartitions,
-            validSegments, partitionInfo, oldPartitionIdList);
+    List<org.apache.carbondata.hadoop.CarbonInputSplit> dataBlocksOfSegment;
+    if (carbonTable.getTableInfo().getFormat().equals("")
+        || carbonTable.getTableInfo().getFormat().equals("carbondata")) {
+      dataBlocksOfSegment = getDataBlocksOfSegment(job, carbonTable, filterResolver,
+          matchedPartitions, validSegments, partitionInfo, oldPartitionIdList);
+    } else {
+      dataBlocksOfSegment = getDataBlocksOfSegment4ExternalFormat(job, carbonTable, filterResolver,
+          validSegments);
+    }
     numBlocks = dataBlocksOfSegment.size();
     for (org.apache.carbondata.hadoop.CarbonInputSplit inputSplit : dataBlocksOfSegment) {
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common-test/src/test/resources/datawithoutheader_delimiter_separator.csv
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/resources/datawithoutheader_delimiter_separator.csv b/integration/spark-common-test/src/test/resources/datawithoutheader_delimiter_separator.csv
new file mode 100644
index 0000000..62efa68
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/datawithoutheader_delimiter_separator.csv
@@ -0,0 +1,10 @@
+11|arvind|SE|17-01-2007|1|developer|10|network|928478|17-02-2007|29-11-2016|96|96|5040
+12|krithin|SSE|29-05-2008|1|developer|11|protocol|928378|29-06-2008|30-12-2016|85|95|7124
+13|madhan|TPL|07-07-2009|2|tester|10|network|928478|07-08-2009|30-12-2016|88|99|9054
+14|anandh|SA|29-12-2010|3|manager|11|protocol|928278|29-01-2011|29-06-2016|77|92|11248
+15|ayushi|SSA|09-11-2011|1|developer|12|security|928375|09-12-2011|29-05-2016|99|91|13245
+16|pramod|SE|14-10-2012|1|developer|13|configManagement|928478|14-11-2012|29-12-2016|86|93|5040
+17|gawrav|PL|22-09-2013|2|tester|12|security|928778|22-10-2013|15-11-2016|78|97|9574
+18|sibi|TL|15-08-2014|2|tester|14|Learning|928176|15-09-2014|29-05-2016|84|98|7245
+19|shivani|PL|12-05-2015|1|developer|10|network|928977|12-06-2015|12-11-2016|88|91|11254
+20|bill|PM|01-12-2015|3|manager|14|Learning|928479|01-01-2016|30-11-2016|75|94|13547

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
new file mode 100644
index 0000000..7f07878
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
@@ -0,0 +1,244 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.externalformat
+
+
+import org.apache.spark.sql.CarbonEnv
+import org.apache.spark.sql.test.Spark2TestQueryExecutor
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.statusmanager.SegmentStatusManager
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.core.util.path.CarbonTablePath
+
+class CsvBasedCarbonTableSuite extends QueryTest
+  with BeforeAndAfterEach with BeforeAndAfterAll {
+
+  val carbonTable = "fact_carbon_table"
+  val csvCarbonTable = "fact_carbon_csv_table"
+  val csvFile = s"$resourcesPath/datawithoutheader.csv"
+  val csvFile_delimiter_separator = s"$resourcesPath/datawithoutheader_delimiter_separator.csv"
+
+  // prepare normal carbon table for comparison
+  override protected def beforeAll(): Unit = {
+    sql(s"DROP TABLE IF EXISTS $carbonTable")
+    sql(
+      s"""
+         | CREATE TABLE $carbonTable(empno smallint, empname String, designation string,
+         | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+         | deptname String, projectcode int, projectjoindate String,projectenddate String,
+         | attendance String, utilization String,salary String)
+         | STORED BY 'carbondata'
+       """.stripMargin
+    )
+    sql(
+      s"""
+         | LOAD DATA LOCAL INPATH '$csvFile' INTO TABLE $carbonTable
+         | OPTIONS('DELIMITER'=',',
+         | 'QUOTECHAR'='\"',
+         | 'FILEHEADER'='EMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY')
+        """.stripMargin)
+  }
+
+  override protected def afterAll(): Unit = {
+    sql(s"DROP TABLE IF EXISTS $carbonTable")
+  }
+
+  override protected def beforeEach(): Unit = {
+    sql(s"DROP TABLE IF EXISTS $csvCarbonTable")
+  }
+
+  override protected def afterEach(): Unit = {
+    sql(s"DROP TABLE IF EXISTS $csvCarbonTable")
+  }
+
+  private def checkQuery() {
+    // query all the columns
+    checkAnswer(sql(s"SELECT eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY FROM $csvCarbonTable WHERE empno = 15"),
+      sql(s"SELECT eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY FROM $carbonTable WHERE empno = 15"))
+    // query part of the columns
+    checkAnswer(sql(s"SELECT empno,empname, deptname, doj FROM $csvCarbonTable WHERE empno = 15"),
+      sql(s"SELECT empno,empname, deptname, doj FROM $carbonTable WHERE empno = 15"))
+    // sequence of projection column are not same with that in DDL
+    checkAnswer(sql(s"SELECT empname, empno, deptname, doj FROM $csvCarbonTable WHERE empno = 15"),
+      sql(s"SELECT empname, empno, deptname, doj FROM $carbonTable WHERE empno = 15"))
+    // query with greater
+    checkAnswer(sql(s"SELECT empname, empno, deptname, doj FROM $csvCarbonTable WHERE empno > 15"),
+      sql(s"SELECT empname, empno, deptname, doj FROM $carbonTable WHERE empno > 15"))
+    // query with filter on dimension
+    checkAnswer(sql(s"SELECT empname, empno, deptname, doj FROM $csvCarbonTable WHERE empname = 'ayushi'"),
+      sql(s"SELECT empname, empno, deptname, doj FROM $carbonTable WHERE empname = 'ayushi'"))
+    // aggreate query
+    checkAnswer(sql(s"SELECT designation, sum(empno), avg(empno) FROM $csvCarbonTable GROUP BY designation"),
+      sql(s"SELECT designation, sum(empno), avg(empno) FROM $carbonTable GROUP BY designation"))
+  }
+
+  test("test csv based carbon table") {
+    // create csv based carbon table
+    sql(
+      s"""
+         | CREATE TABLE $csvCarbonTable(empno smallint, empname String, designation string,
+         | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+         | deptname String, projectcode int, projectjoindate String,projectenddate String,
+         | attendance String, utilization String,salary String)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES(
+         | 'format'='csv',
+         | 'csv.header'='eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY'
+         | )
+       """.stripMargin
+    )
+    // check that the external format info is stored in tableinfo
+    val tblInfo =
+      CarbonEnv.getCarbonTable(Option("default"), csvCarbonTable)(Spark2TestQueryExecutor.spark)
+    assertResult("csv")(tblInfo.getTableInfo.getFormat)
+    assertResult(1)(tblInfo.getTableInfo.getFormatProperties.size())
+    assertResult(
+      "eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY".toLowerCase)(
+      tblInfo.getTableInfo.getFormatProperties.get("csv.header"))
+
+    // add segment for csv based carbontable
+    sql(s"ALTER TABLE $csvCarbonTable ADD SEGMENT LOCATION '$csvFile'")
+
+    // check that the fact files has been stored in tablestatus
+    val metadataPath = CarbonTablePath.getMetadataPath(tblInfo.getTablePath)
+    val details = SegmentStatusManager.readLoadMetadata(metadataPath)
+    assertResult(1)(details.length)
+    assertResult(csvFile)(details(0).getFactFilePath)
+
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "true")
+    // check query on csv based carbontable
+    // query with vector reader on
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "false")
+    // query with vector reader off
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER,
+      CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
+  }
+
+  test("test csv based carbon table: only support csv now") {
+    val expectedException = intercept[Exception] {
+      sql(
+        s"""
+           | CREATE TABLE $csvCarbonTable(empname String, empno smallint, designation string,
+           | deptname String, projectcode int, projectjoindate String,projectenddate String,
+           | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+           | attendance String, utilization String,salary String)
+           | STORED BY 'carbondata'
+           | TBLPROPERTIES(
+           | 'format'='parquet',
+           | 'csv.header'='eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY'
+           | )
+       """.stripMargin
+      )
+    }
+
+    assert(expectedException.getMessage.contains("Currently we only support csv as external file format"))
+  }
+
+  test("test csv based carbon table: the sequence of header does not match schema") {
+    // create csv based carbon table, the sequence in schema is not the same in csv.header
+    sql(
+      s"""
+         | CREATE TABLE $csvCarbonTable(empname String, empno smallint, designation string,
+         | deptname String, projectcode int, projectjoindate String,projectenddate String,
+         | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+         | attendance String, utilization String,salary String)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES(
+         | 'format'='csv',
+         | 'csv.header'='eMPno, empname,designation, doj, workgroupcategory, workgroupcategoryname, deptno, deptname, projectcode, projectjoindate, projectenddate, attendance, utilization, SALARY'
+         | )
+       """.stripMargin
+    )
+    // add segment for csv based carbontable
+    sql(s"ALTER TABLE $csvCarbonTable ADD SEGMENT LOCATION '$csvFile'")
+
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "true")
+    // check query on csv based carbontable
+    // query with vector reader on
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "false")
+    // query with vector reader off
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER,
+      CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
+  }
+
+  test("test csv based carbon table: not specify the header") {
+    // create csv based carbon table, the sequence in schema is not the same in csv.header
+    sql(
+      s"""
+         | CREATE TABLE $csvCarbonTable(empno smallint, empname String, designation string,
+         | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+         | deptname String, projectcode int, projectjoindate String,projectenddate String,
+         | attendance String, utilization String,salary String)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES(
+         | 'format'='csv'
+         | )
+       """.stripMargin
+    )
+
+    // add segment for csv based carbontable
+    sql(s"ALTER TABLE $csvCarbonTable ADD SEGMENT LOCATION '$csvFile'")
+
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "true")
+    // check query on csv based carbontable
+    // query with vector reader on
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "false")
+    // query with vector reader off
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER,
+      CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
+  }
+
+  test("test csv based carbon table: user specified delimiter") {
+    // create csv based carbon table, the sequence in schema is not the same in csv.header
+    sql(
+      s"""
+         | CREATE TABLE $csvCarbonTable(empno smallint, empname String, designation string,
+         | doj String, workgroupcategory int, workgroupcategoryname String,deptno int,
+         | deptname String, projectcode int, projectjoindate String,projectenddate String,
+         | attendance String, utilization String,salary String)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES(
+         | 'format'='csv',
+         | 'csv.delimiter'='|'
+         | )
+       """.stripMargin
+    )
+
+    // add segment for csv based carbontable
+    sql(s"ALTER TABLE $csvCarbonTable ADD SEGMENT LOCATION '$csvFile_delimiter_separator'")
+
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "true")
+    // check query on csv based carbontable
+    // query with vector reader on
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER, "false")
+    // query with vector reader off
+    checkQuery()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_VECTOR_READER,
+      CarbonCommonConstants.ENABLE_VECTOR_READER_DEFAULT)
+  }
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/CsvReadSupport.java
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/CsvReadSupport.java b/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/CsvReadSupport.java
new file mode 100644
index 0000000..53d6d7f
--- /dev/null
+++ b/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/CsvReadSupport.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.format;
+
+import java.io.IOException;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.common.annotations.InterfaceStability;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport;
+import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl;
+
+import org.apache.spark.sql.catalyst.expressions.GenericInternalRow;
+import org.apache.spark.sql.types.CalendarIntervalType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.StructField;
+import org.apache.spark.sql.types.StructType;
+import org.apache.spark.unsafe.types.CalendarInterval;
+import org.apache.spark.unsafe.types.UTF8String;
+
+/**
+ * read support for csv
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Internal
+public class CsvReadSupport<T> implements CarbonReadSupport<T> {
+  private CarbonColumn[] carbonColumns;
+  private StructType outputSchema;
+  private Object[] finalOutputValues;
+  @Override
+  public void initialize(CarbonColumn[] carbonColumns, CarbonTable carbonTable)
+      throws IOException {
+    this.carbonColumns = carbonColumns;
+    this.finalOutputValues = new Object[carbonColumns.length];
+    outputSchema = new StructType(convertCarbonColumnSpark(carbonColumns));
+  }
+
+  private StructField[] convertCarbonColumnSpark(CarbonColumn[] columns) {
+    return (StructField[]) new SparkDataTypeConverterImpl().convertCarbonSchemaToSparkSchema(
+        columns);
+  }
+
+  @Override
+  public T readRow(Object[] data) {
+    for (int i = 0; i < carbonColumns.length; i++) {
+      Object originValue = data[i];
+      org.apache.spark.sql.types.DataType t = outputSchema.apply(i).dataType();
+      finalOutputValues[i] = convertToSparkValue(originValue, t);
+    }
+    return (T) new GenericInternalRow(finalOutputValues);
+  }
+  private Object convertToSparkValue(Object originValue, org.apache.spark.sql.types.DataType t) {
+    if (null == originValue) {
+      return null;
+    } else {
+      String value = String.valueOf(originValue);
+      if (t == org.apache.spark.sql.types.DataTypes.BooleanType) {
+        return Boolean.parseBoolean(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.ByteType) {
+        return Byte.parseByte(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.ShortType) {
+        return Short.parseShort(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.IntegerType) {
+        return Integer.parseInt(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.LongType) {
+        return Long.parseLong(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.FloatType) {
+        return Float.parseFloat(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.DoubleType) {
+        return Double.parseDouble(value);
+      } else if (t == org.apache.spark.sql.types.DataTypes.StringType) {
+        return UTF8String.fromString(value);
+      } else if (t instanceof org.apache.spark.sql.types.DecimalType) {
+        return Decimal.fromDecimal(value);
+      } else if (t instanceof CalendarIntervalType) {
+        return CalendarInterval.fromString(value);
+      } else if (t instanceof org.apache.spark.sql.types.DateType) {
+        return Integer.parseInt(value);
+      } else if (t instanceof org.apache.spark.sql.types.TimestampType) {
+        return Long.parseLong(value);
+      } else {
+        return null;
+      }
+    }
+  }
+
+  @Override
+  public void close() {
+
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/VectorCsvReadSupport.java
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/VectorCsvReadSupport.java b/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/VectorCsvReadSupport.java
new file mode 100644
index 0000000..81bd25d
--- /dev/null
+++ b/integration/spark-common/src/main/java/org/apache/carbondata/spark/format/VectorCsvReadSupport.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.format;
+
+import java.io.IOException;
+import java.math.BigInteger;
+
+import org.apache.carbondata.common.annotations.InterfaceAudience;
+import org.apache.carbondata.common.annotations.InterfaceStability;
+import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
+import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport;
+import org.apache.carbondata.spark.util.SparkDataTypeConverterImpl;
+
+import org.apache.spark.memory.MemoryMode;
+import org.apache.spark.sql.execution.vectorized.ColumnVector;
+import org.apache.spark.sql.execution.vectorized.ColumnarBatch;
+import org.apache.spark.sql.types.CalendarIntervalType;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.DecimalType;
+import org.apache.spark.sql.types.StructField;
+import org.apache.spark.sql.types.StructType;
+import org.apache.spark.unsafe.types.CalendarInterval;
+import org.apache.spark.unsafe.types.UTF8String;
+
+/**
+ * read support for csv vector reader
+ */
+@InterfaceStability.Evolving
+@InterfaceAudience.Internal
+public class VectorCsvReadSupport<T> implements CarbonReadSupport<T> {
+  private static final int MAX_BATCH_SIZE =
+      CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+  private CarbonColumn[] carbonColumns;
+  private ColumnarBatch columnarBatch;
+  private StructType outputSchema;
+
+  @Override
+  public void initialize(CarbonColumn[] carbonColumns, CarbonTable carbonTable)
+      throws IOException {
+    this.carbonColumns = carbonColumns;
+    outputSchema = new StructType(convertCarbonColumnSpark(carbonColumns));
+  }
+
+  private StructField[] convertCarbonColumnSpark(CarbonColumn[] columns) {
+    return (StructField[]) new SparkDataTypeConverterImpl().convertCarbonSchemaToSparkSchema(
+        columns);
+  }
+
+  @Override
+  public T readRow(Object[] data) {
+    columnarBatch = ColumnarBatch.allocate(outputSchema, MemoryMode.OFF_HEAP, MAX_BATCH_SIZE);
+    int rowId = 0;
+    for (; rowId < data.length; rowId++) {
+      for (int colIdx = 0; colIdx < carbonColumns.length; colIdx++) {
+        Object originValue = ((Object[]) data[rowId])[colIdx];
+        ColumnVector col = columnarBatch.column(colIdx);
+        org.apache.spark.sql.types.DataType t = col.dataType();
+        if (null == originValue) {
+          col.putNull(rowId);
+        } else {
+          String value = String.valueOf(originValue);
+          if (t == org.apache.spark.sql.types.DataTypes.BooleanType) {
+            col.putBoolean(rowId, Boolean.parseBoolean(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.ByteType) {
+            col.putByte(rowId, Byte.parseByte(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.ShortType) {
+            col.putShort(rowId, Short.parseShort(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.IntegerType) {
+            col.putInt(rowId, Integer.parseInt(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.LongType) {
+            col.putLong(rowId, Long.parseLong(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.FloatType) {
+            col.putFloat(rowId, Float.parseFloat(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.DoubleType) {
+            col.putDouble(rowId, Double.parseDouble(value));
+          } else if (t == org.apache.spark.sql.types.DataTypes.StringType) {
+            UTF8String v = UTF8String.fromString(value);
+            col.putByteArray(rowId, v.getBytes());
+          } else if (t instanceof org.apache.spark.sql.types.DecimalType) {
+            DecimalType dt = (DecimalType)t;
+            Decimal d = Decimal.fromDecimal(value);
+            if (dt.precision() <= Decimal.MAX_INT_DIGITS()) {
+              col.putInt(rowId, (int)d.toUnscaledLong());
+            } else if (dt.precision() <= Decimal.MAX_LONG_DIGITS()) {
+              col.putLong(rowId, d.toUnscaledLong());
+            } else {
+              final BigInteger integer = d.toJavaBigDecimal().unscaledValue();
+              byte[] bytes = integer.toByteArray();
+              col.putByteArray(rowId, bytes, 0, bytes.length);
+            }
+          } else if (t instanceof CalendarIntervalType) {
+            CalendarInterval c = CalendarInterval.fromString(value);
+            col.getChildColumn(0).putInt(rowId, c.months);
+            col.getChildColumn(1).putLong(rowId, c.microseconds);
+          } else if (t instanceof org.apache.spark.sql.types.DateType) {
+            col.putInt(rowId, Integer.parseInt(value));
+          } else if (t instanceof org.apache.spark.sql.types.TimestampType) {
+            col.putLong(rowId, Long.parseLong(value));
+          }
+        }
+      }
+    }
+    columnarBatch.setNumRows(rowId);
+    return (T) columnarBatch;
+  }
+
+  @Override
+  public void close() {
+    if (columnarBatch != null) {
+      columnarBatch.close();
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 6b43999..3461d3a 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -59,6 +59,7 @@ import org.apache.carbondata.hadoop.readsupport.CarbonReadSupport
 import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.processing.util.CarbonLoaderUtil
 import org.apache.carbondata.spark.InitInputMetrics
+import org.apache.carbondata.spark.format.{CsvReadSupport, VectorCsvReadSupport}
 import org.apache.carbondata.spark.util.{SparkDataTypeConverterImpl, Util}
 import org.apache.carbondata.streaming.{CarbonStreamInputFormat, CarbonStreamRecordReader}
 
@@ -88,6 +89,7 @@ class CarbonScanRDD[T: ClassTag](
   private var vectorReader = false
 
   private val bucketedTable = tableInfo.getFactTable.getBucketingInfo
+  private val storageFormat = tableInfo.getFormat
 
   @transient val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
 
@@ -134,10 +136,13 @@ class CarbonScanRDD[T: ClassTag](
       // 2. for stream splits, create partition for each split by default
       val columnarSplits = new ArrayList[InputSplit]()
       val streamSplits = new ArrayBuffer[InputSplit]()
+      val externalSplits = new ArrayBuffer[InputSplit]()
       splits.asScala.foreach { split =>
         val carbonInputSplit = split.asInstanceOf[CarbonInputSplit]
         if (FileFormat.ROW_V1 == carbonInputSplit.getFileFormat) {
           streamSplits += split
+        } else if (FileFormat.EXTERNAL == carbonInputSplit.getFileFormat) {
+          externalSplits += split
         } else {
           columnarSplits.add(split)
         }
@@ -147,31 +152,39 @@ class CarbonScanRDD[T: ClassTag](
       distributeEndTime = System.currentTimeMillis()
       // check and remove InExpression from filterExpression
       checkAndRemoveInExpressinFromFilterExpression(batchPartitions)
-      if (streamSplits.isEmpty) {
-        partitions = batchPartitions.toArray
-      } else {
-        val index = batchPartitions.length
-        val streamPartitions: mutable.Buffer[Partition] =
-          streamSplits.zipWithIndex.map { splitWithIndex =>
-            val multiBlockSplit =
-              new CarbonMultiBlockSplit(
-                Seq(splitWithIndex._1.asInstanceOf[CarbonInputSplit]).asJava,
-                splitWithIndex._1.getLocations,
-                FileFormat.ROW_V1)
-            new CarbonSparkPartition(id, splitWithIndex._2 + index, multiBlockSplit)
-          }
-        if (batchPartitions.isEmpty) {
-          partitions = streamPartitions.toArray
-        } else {
-          logInfo(
-            s"""
-               | Identified no.of Streaming Blocks: ${ streamPartitions.size },
-          """.stripMargin)
-          // should keep the order by index of partition
-          batchPartitions.appendAll(streamPartitions)
-          partitions = batchPartitions.toArray
+
+      def generateNonBatchPartitions(index: Int, splits : ArrayBuffer[InputSplit],
+          format: FileFormat): mutable.Buffer[Partition] = {
+        splits.zipWithIndex.map { splitWithIndex =>
+          val multiBlockSplit =
+            new CarbonMultiBlockSplit(
+              Seq(splitWithIndex._1.asInstanceOf[CarbonInputSplit]).asJava,
+              splitWithIndex._1.getLocations,
+              format)
+          new CarbonSparkPartition(id, splitWithIndex._2 + index, multiBlockSplit)
         }
       }
+
+      val allPartitions: mutable.Buffer[Partition] = mutable.Buffer()
+      val index = batchPartitions.length
+      val streamPartitions: mutable.Buffer[Partition] = generateNonBatchPartitions(
+        index, streamSplits, FileFormat.ROW_V1)
+      val externalPartitions: mutable.Buffer[Partition] = generateNonBatchPartitions(
+        index + streamPartitions.length, externalSplits, FileFormat.EXTERNAL)
+
+      if (batchPartitions.nonEmpty) {
+        LOGGER.info(s"Identified no.of batch blocks: ${batchPartitions.size}")
+        allPartitions.appendAll(batchPartitions)
+      }
+      if (streamPartitions.nonEmpty) {
+        LOGGER.info(s"Identified no.of stream blocks: ${streamPartitions.size}")
+        allPartitions.appendAll(streamPartitions)
+      }
+      if (externalPartitions.nonEmpty) {
+        LOGGER.info(s"Identified no.of external blocks: ${externalPartitions.size}")
+        allPartitions.appendAll(externalPartitions)
+      }
+      partitions = allPartitions.toArray
       partitions
     } finally {
       Profiler.invokeIfEnable {
@@ -359,7 +372,7 @@ class CarbonScanRDD[T: ClassTag](
     }
     logInfo(
       s"""
-         | Identified no.of.blocks: $noOfBlocks,
+         | Identified no.of.blocks(columnar): $noOfBlocks,
          | no.of.tasks: $noOfTasks,
          | no.of.nodes: $noOfNodes,
          | parallelism: $parallelism
@@ -427,6 +440,22 @@ class CarbonScanRDD[T: ClassTag](
             CarbonTimeStatisticsFactory.createExecutorRecorder(model.getQueryId))
           streamReader.setQueryModel(model)
           streamReader
+        case FileFormat.EXTERNAL =>
+          require(storageFormat.equals("csv"),
+            "Currently we only support csv as external file format")
+          attemptContext.getConfiguration.set(
+            CarbonCommonConstants.CARBON_EXTERNAL_FORMAT_CONF_KEY, storageFormat)
+          val externalRecordReader = format.createRecordReader(inputSplit, attemptContext)
+            .asInstanceOf[CsvRecordReader[Object]]
+          externalRecordReader.setVectorReader(vectorReader)
+          externalRecordReader.setInputMetricsStats(inputMetricsStats)
+          externalRecordReader.setQueryModel(model)
+          if (vectorReader) {
+            externalRecordReader.setReadSupport(new VectorCsvReadSupport[Object]())
+          } else {
+            externalRecordReader.setReadSupport(new CsvReadSupport[Object]())
+          }
+          externalRecordReader
         case _ =>
           // create record reader for CarbonData file format
           if (vectorReader) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 12999d0..8466bfc 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -187,6 +187,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   protected val STREAM = carbonKeyWord("STREAM")
   protected val STREAMS = carbonKeyWord("STREAMS")
   protected val STMPROPERTIES = carbonKeyWord("STMPROPERTIES")
+  protected val LOCATION = carbonKeyWord("LOCATION")
 
   protected val doubleQuotedString = "\"([^\"]+)\"".r
   protected val singleQuotedString = "'([^']+)'".r

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index 1b48c08..56e91f9 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -890,6 +890,16 @@ class TableNewProcessor(cm: TableModel) {
       cm.tableName))
     tableInfo.setLastUpdatedTime(System.currentTimeMillis())
     tableInfo.setFactTable(tableSchema)
+    val format = cm.tableProperties.get(CarbonCommonConstants.FORMAT)
+    if (format.isDefined) {
+      if (!format.get.equalsIgnoreCase("csv")) {
+        CarbonException.analysisException(s"Currently we only support csv as external file format")
+      }
+      tableInfo.setFormat(format.get)
+      val formatProperties = cm.tableProperties.filter(pair =>
+        pair._1.startsWith(s"${format.get.toLowerCase}.")).asJava
+      tableInfo.setFormatProperties(formatProperties)
+    }
     tableInfo
   }
 


[18/50] [abbrv] carbondata git commit: [CARBONDATA-2805] Fix the ordering mismatch of segment numbers during cutom compaction

Posted by ja...@apache.org.
[CARBONDATA-2805] Fix the ordering mismatch of segment numbers during cutom compaction

Problem:
when we have segments from 0 to 6 and i give 1, 2, 3 for custom compaction, then it should create 1.1 as compacted segment, but sometimes
it will create 3.1 as compacted segment which is wrong. This is beacuse custom Segment IDs were passing in hashset and finally inserted in
hashmap, while identifying segments to be merged. hashmap and hashset does not guarantee the insertion order which may lead to missmatch of segment numbers.

Solution:
Use LinkedHashSet and LinkedHashMap which always sure about the insertion order.

This closes #2585


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/c29aef88
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/c29aef88
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/c29aef88

Branch: refs/heads/external-format
Commit: c29aef880a57d1f1297361a5296e77af3904d661
Parents: cfbf7b6
Author: akashrn5 <ak...@gmail.com>
Authored: Mon Jul 30 19:22:29 2018 +0530
Committer: manishgupta88 <to...@gmail.com>
Committed: Wed Aug 1 13:38:08 2018 +0530

----------------------------------------------------------------------
 .../processing/merger/CarbonDataMergerUtil.java           | 10 ++++------
 1 file changed, 4 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/c29aef88/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
index 78af751..1162fc2 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonDataMergerUtil.java
@@ -391,7 +391,6 @@ public final class CarbonDataMergerUtil {
           CarbonLoadModel carbonLoadModel, long compactionSize,
           List<LoadMetadataDetails> segments, CompactionType compactionType,
           List<String> customSegmentIds) throws IOException, MalformedCarbonCommandException {
-    String tablePath = carbonLoadModel.getTablePath();
     Map<String, String> tableLevelProperties = carbonLoadModel.getCarbonDataLoadSchema()
             .getCarbonTable().getTableInfo().getFactTable().getTableProperties();
     List<LoadMetadataDetails> sortedSegments = new ArrayList<LoadMetadataDetails>(segments);
@@ -400,7 +399,7 @@ public final class CarbonDataMergerUtil {
 
     if (CompactionType.CUSTOM == compactionType) {
       return identitySegmentsToBeMergedBasedOnSpecifiedSegments(sortedSegments,
-              new HashSet<>(customSegmentIds));
+              new LinkedHashSet<>(customSegmentIds));
     }
 
     // Check for segments which are qualified for IUD compaction.
@@ -424,7 +423,7 @@ public final class CarbonDataMergerUtil {
     if (CompactionType.MAJOR == compactionType) {
 
       listOfSegmentsToBeMerged = identifySegmentsToBeMergedBasedOnSize(compactionSize,
-              listOfSegmentsLoadedInSameDateInterval, carbonLoadModel, tablePath);
+              listOfSegmentsLoadedInSameDateInterval, carbonLoadModel);
     } else {
 
       listOfSegmentsToBeMerged =
@@ -462,7 +461,7 @@ public final class CarbonDataMergerUtil {
           List<LoadMetadataDetails> listOfSegments,
           Set<String> segmentIds) throws MalformedCarbonCommandException {
     Map<String, LoadMetadataDetails> specifiedSegments =
-            new HashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
+            new LinkedHashMap<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
     for (LoadMetadataDetails detail : listOfSegments) {
       if (segmentIds.contains(detail.getLoadName())) {
         specifiedSegments.put(detail.getLoadName(), detail);
@@ -623,13 +622,12 @@ public final class CarbonDataMergerUtil {
    * @param listOfSegmentsAfterPreserve  the segments list after
    *        preserving the configured number of latest loads
    * @param carbonLoadModel carbon load model
-   * @param tablePath the store location of the segment
    * @return the list of segments that need to be merged
    *         based on the Size in case of Major compaction
    */
   private static List<LoadMetadataDetails> identifySegmentsToBeMergedBasedOnSize(
       long compactionSize, List<LoadMetadataDetails> listOfSegmentsAfterPreserve,
-      CarbonLoadModel carbonLoadModel, String tablePath) throws IOException {
+      CarbonLoadModel carbonLoadModel) throws IOException {
 
     List<LoadMetadataDetails> segmentsToBeMerged =
         new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);


[12/50] [abbrv] carbondata git commit: [CARBONDATA-2789] Support Hadoop 2.8.3 eco-system integration

Posted by ja...@apache.org.
[CARBONDATA-2789] Support Hadoop 2.8.3 eco-system integration

Add hadoop 2.8.3 profile and passed the compile

This closes #2566


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/7b538906
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/7b538906
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/7b538906

Branch: refs/heads/external-format
Commit: 7b5389064b6e847b220ab12ce09f091425ac3322
Parents: 1cf3f39
Author: chenliang613 <ch...@huawei.com>
Authored: Fri Jul 27 10:51:45 2018 +0800
Committer: ravipesala <ra...@gmail.com>
Committed: Tue Jul 31 17:47:37 2018 +0530

----------------------------------------------------------------------
 pom.xml | 6 ++++++
 1 file changed, 6 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/7b538906/pom.xml
----------------------------------------------------------------------
diff --git a/pom.xml b/pom.xml
index ec68c1d..5029056 100644
--- a/pom.xml
+++ b/pom.xml
@@ -466,6 +466,12 @@
       </build>
     </profile>
     <profile>
+      <id>hadoop-2.8</id>
+      <properties>
+        <hadoop.version>2.8.3</hadoop.version>
+      </properties>
+    </profile>
+    <profile>
       <id>spark-2.1</id>
       <properties>
         <spark.version>2.1.0</spark.version>


[19/50] [abbrv] carbondata git commit: [CARBONDATA-2790][BloomDataMap]Optimize default parameter for bloomfilter datamap

Posted by ja...@apache.org.
[CARBONDATA-2790][BloomDataMap]Optimize default parameter for bloomfilter datamap

To provide better query performance for bloomfilter datamap by default,
we optimize bloom_size from 32000 to 640000 and optimize bloom_fpp from
0.01 to 0.00001.

This closes #2567


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6351c3a0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6351c3a0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6351c3a0

Branch: refs/heads/external-format
Commit: 6351c3a077c0fa47390c4b30b05de7d830b387d1
Parents: c29aef8
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Fri Jul 27 11:54:21 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Aug 1 17:43:49 2018 +0800

----------------------------------------------------------------------
 .../datamap/bloom/BloomCoarseGrainDataMapFactory.java          | 6 +++---
 docs/datamap/bloomfilter-datamap-guide.md                      | 4 ++--
 2 files changed, 5 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6351c3a0/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
index 652e1fc..80a86cc 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFactory.java
@@ -69,15 +69,15 @@ public class BloomCoarseGrainDataMapFactory extends DataMapFactory<CoarseGrainDa
    * default size for bloom filter, cardinality of the column.
    */
   private static final int DEFAULT_BLOOM_FILTER_SIZE =
-      CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+      CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT * 20;
   /**
    * property for fpp(false-positive-probability) of bloom filter
    */
   private static final String BLOOM_FPP = "bloom_fpp";
   /**
-   * default value for fpp of bloom filter is 1%
+   * default value for fpp of bloom filter is 0.001%
    */
-  private static final double DEFAULT_BLOOM_FILTER_FPP = 0.01d;
+  private static final double DEFAULT_BLOOM_FILTER_FPP = 0.00001d;
 
   /**
    * property for compressing bloom while saving to disk.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6351c3a0/docs/datamap/bloomfilter-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/bloomfilter-datamap-guide.md b/docs/datamap/bloomfilter-datamap-guide.md
index 325a508..2dba3dc 100644
--- a/docs/datamap/bloomfilter-datamap-guide.md
+++ b/docs/datamap/bloomfilter-datamap-guide.md
@@ -83,8 +83,8 @@ User can create BloomFilter datamap using the Create DataMap DDL:
 | Property | Is Required | Default Value | Description |
 |-------------|----------|--------|---------|
 | INDEX_COLUMNS | YES |  | Carbondata will generate BloomFilter index on these columns. Queries on there columns are usually like 'COL = VAL'. |
-| BLOOM_SIZE | NO | 32000 | This value is internally used by BloomFilter as the number of expected insertions, it will affects the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the value is the approximate records in a blocklet. In another word, the value 32000 * #noOfPagesInBlocklet. The value should be an integer. |
-| BLOOM_FPP | NO | 0.01 | This value is internally used by BloomFilter as the False-Positive Probability, it will affects the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in range (0, 1). |
+| BLOOM_SIZE | NO | 640000 | This value is internally used by BloomFilter as the number of expected insertions, it will affects the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the default value is the approximate distinct index values in a blocklet assuming that each blocklet contains 20 pages and each page contains 32000 records. The value should be an integer. |
+| BLOOM_FPP | NO | 0.00001 | This value is internally used by BloomFilter as the False-Positive Probability, it will affects the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in range (0, 1). In one test scenario, a 96GB TPCH customer table with bloom_size=320000 and bloom_fpp=0.00001 will result in 18 false positive samples. |
 | BLOOM_COMPRESS | NO | true | Whether to compress the BloomFilter index files. |
 
 


[23/50] [abbrv] carbondata git commit: [CARBONDATA-2793][32k][Doc] Add 32k support in document

Posted by ja...@apache.org.
[CARBONDATA-2793][32k][Doc] Add 32k support in document

This closes #2572


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f9b02a5c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f9b02a5c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f9b02a5c

Branch: refs/heads/external-format
Commit: f9b02a5c1204359b8c6ae20e6ee68174f1c6d4ef
Parents: de92460
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Fri Jul 27 16:10:44 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Aug 1 22:07:28 2018 +0800

----------------------------------------------------------------------
 docs/data-management-on-carbondata.md      | 48 +++++++++++++++++++------
 docs/supported-data-types-in-carbondata.md |  3 ++
 2 files changed, 40 insertions(+), 11 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9b02a5c/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 6aaaaa3..836fff9 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -137,7 +137,7 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
           
      | Properties | Default value | Description |
      | ---------- | ------------- | ----------- |
-     | LOCAL_DICTIONARY_ENABLE | false | By default, local dictionary will not be enabled for the table | 
+     | LOCAL_DICTIONARY_ENABLE | false | By default, local dictionary will not be enabled for the table |
      | LOCAL_DICTIONARY_THRESHOLD | 10000 | The maximum cardinality for local dictionary generation (range- 1000 to 100000) |
      | LOCAL_DICTIONARY_INCLUDE | all no-dictionary string/varchar columns | Columns for which Local Dictionary is generated. |
      | LOCAL_DICTIONARY_EXCLUDE | none | Columns for which Local Dictionary is not generated |
@@ -240,11 +240,11 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
 	 ```
 	 
    - **Caching at Block or Blocklet Level**
-   
+
      This feature allows you to maintain the cache at Block level, resulting in optimized usage of the memory. The memory consumption is high if the Blocklet level caching is maintained as a Block can have multiple Blocklet.
 	 
 	 Following are the valid values for CACHE_LEVEL:
-	 
+
 	 *Configuration for caching in driver at Block level (default value).*
 	 
 	 ```
@@ -285,21 +285,47 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
 	 ```
 	 ALTER TABLE employee SET TBLPROPERTIES (‘CACHE_LEVEL’=’Blocklet’)
 	 ```
-	 
-	 - **Support Flat folder same as Hive/Parquet**
-	 
+
+    - **Support Flat folder same as Hive/Parquet**
+
 	  This feature allows all carbondata and index files to keep directy under tablepath. Currently all carbondata/carbonindex files written under tablepath/Fact/Part0/Segment_NUM folder and it is not same as hive/parquet folder structure. This feature makes all files written will be directly under tablepath, it does not maintain any segment folder structure.This is useful for interoperability between the execution engines and plugin with other execution engines like hive or presto becomes easier.
-	  
+
 	  Following table property enables this feature and default value is false.
 	  ```
 	   'flat_folder'='true'
-	  ``` 
+	  ```
 	  Example:
 	  ```
 	  CREATE TABLE employee (name String, city String, id int) STORED BY ‘carbondata’ TBLPROPERTIES ('flat_folder'='true')
 	  ```
-	  
-	 
+
+    - **String longer than 32000 characters**
+
+     In common scenarios, the length of string is less than 32000,
+     so carbondata stores the length of content using Short to reduce memory and space consumption.
+     To support string longer than 32000 characters, carbondata introduces a table property called `LONG_STRING_COLUMNS`.
+     For these columns, carbondata internally stores the length of content using Integer.
+
+     You can specify the columns as 'long string column' using below tblProperties:
+
+     ```
+     // specify col1, col2 as long string columns
+     TBLPROPERTIES ('LONG_STRING_COLUMNS'='col1,col2')
+     ```
+
+     Besides, you can also use this property through DataFrame by
+     ```
+     df.format("carbondata")
+       .option("tableName", "carbonTable")
+       .option("long_string_columns", "col1, col2")
+       .save()
+     ```
+
+     If you are using Carbon-SDK, you can specify the datatype of long string column as `varchar`.
+     You can refer to SDKwriterTestCase for example.
+
+     **NOTE:** The LONG_STRING_COLUMNS can only be string/char/varchar columns and cannot be dictionary_include/sort_columns/complex columns.
+
 ## CREATE TABLE AS SELECT
   This function allows user to create a Carbon table from any of the Parquet/Hive/Carbon table. This is beneficial when the user wants to create Carbon table from any other Parquet/Hive table and use the Carbon query engine to query and achieve better query results for cases where Carbon is faster than other file formats. Also this feature can be used for backing up the data.
 
@@ -745,7 +771,7 @@ Users can specify which columns to include and exclude for local dictionary gene
   * If the FORCE option is used, then it auto-converts the data by storing the bad records as NULL before Loading data.
   * If the IGNORE option is used, then bad records are neither loaded nor written to the separate CSV file.
   * In loaded data, if all records are bad records, the BAD_RECORDS_ACTION is invalid and the load operation fails.
-  * The maximum number of characters per column is 32000. If there are more than 32000 characters in a column, data loading will fail.
+  * The default maximum number of characters per column is 32000. If there are more than 32000 characters in a column, please refer to *String longer than 32000 characters* section.
 
   Example:
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f9b02a5c/docs/supported-data-types-in-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/supported-data-types-in-carbondata.md b/docs/supported-data-types-in-carbondata.md
index 7260afe..eb74a2e 100644
--- a/docs/supported-data-types-in-carbondata.md
+++ b/docs/supported-data-types-in-carbondata.md
@@ -35,6 +35,9 @@
     * CHAR
     * VARCHAR
 
+    **NOTE**: For string longer than 32000 characters, use `LONG_STRING_COLUMNS` in table property.
+    Please refer to TBLProperties in [CreateTable](https://github.com/apache/carbondata/blob/master/docs/data-management-on-carbondata.md#create-table) for more information.
+
   * Complex Types
     * arrays: ARRAY``<data_type>``
     * structs: STRUCT``<col_name : data_type COMMENT col_comment, ...>``


[33/50] [abbrv] carbondata git commit: [CARBONDATA-2803]fix wrong datasize calculation and Refactoring for better readability and handle local dictionary for older tables

Posted by ja...@apache.org.
[CARBONDATA-2803]fix wrong datasize calculation and Refactoring for better readability and handle local dictionary for older tables

Changes in this PR:
1.data size was calculation wrongly, indexmap contains duplicate paths as it stores all blocklets, so remove duplicate and maintain uniq block paths for proper datasize calculation
2.Refactored code for better readability in carbonTableInputFormat
3. If the tableperoperties contain local dictionary enable property as null, it is old table, and put the flase value in the properties map

This closes #2583


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/7e93d7b8
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/7e93d7b8
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/7e93d7b8

Branch: refs/heads/external-format
Commit: 7e93d7b8707c36bf3f8d1f153b67a8cb997fa0f4
Parents: bd6abbb
Author: akashrn5 <ak...@gmail.com>
Authored: Mon Jul 30 19:41:34 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Aug 2 17:00:05 2018 +0530

----------------------------------------------------------------------
 .../indexstore/blockletindex/BlockDataMap.java  |  2 +-
 .../core/metadata/SegmentFileStore.java         | 23 ++------
 .../core/metadata/schema/table/CarbonTable.java | 10 ++--
 .../hadoop/api/CarbonTableInputFormat.java      | 61 ++++++++++++--------
 .../FlatFolderTableLoadingTestCase.scala        | 31 ++++++++++
 5 files changed, 81 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e93d7b8/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
index f4bb58e..0875e75 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
@@ -588,7 +588,7 @@ public class BlockDataMap extends CoarseGrainDataMap
 
   private boolean useMinMaxForExecutorPruning(FilterResolverIntf filterResolverIntf) {
     boolean useMinMaxForPruning = false;
-    if (this instanceof BlockletDataMap) {
+    if (!isLegacyStore && this instanceof BlockletDataMap) {
       useMinMaxForPruning = BlockletDataMapUtil
           .useMinMaxForBlockletPruning(filterResolverIntf, getMinMaxCacheColumns());
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e93d7b8/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 111e444..1acf0ea 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -16,22 +16,9 @@
  */
 package org.apache.carbondata.core.metadata;
 
-import java.io.BufferedReader;
-import java.io.BufferedWriter;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStreamWriter;
-import java.io.Serializable;
+import java.io.*;
 import java.nio.charset.Charset;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
+import java.util.*;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
@@ -511,11 +498,13 @@ public class SegmentFileStore {
     for (Map.Entry<String, byte[]> entry : carbonIndexMap.entrySet()) {
       List<DataFileFooter> indexInfo =
           fileFooterConverter.getIndexInfo(entry.getKey(), entry.getValue());
-      List<String> blocks = new ArrayList<>();
+      // carbonindex file stores blocklets so block filename will be duplicated, use set to remove
+      // duplicates
+      Set<String> blocks = new LinkedHashSet<>();
       for (DataFileFooter footer : indexInfo) {
         blocks.add(footer.getBlockInfo().getTableBlockInfo().getFilePath());
       }
-      indexFilesMap.put(entry.getKey(), blocks);
+      indexFilesMap.put(entry.getKey(), new ArrayList<>(blocks));
       boolean added = false;
       for (Map.Entry<String, List<String>> mergeFile : indexFileStore
           .getCarbonMergeFileToIndexFilesMap().entrySet()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e93d7b8/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 850a791..14052f8 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -1160,10 +1160,11 @@ public class CarbonTable implements Serializable {
    * @param tableInfo
    */
   private static void setLocalDictInfo(CarbonTable table, TableInfo tableInfo) {
-    String isLocalDictionaryEnabled = tableInfo.getFactTable().getTableProperties()
-        .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE);
-    String localDictionaryThreshold = tableInfo.getFactTable().getTableProperties()
-        .get(CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD);
+    Map<String, String> tableProperties = tableInfo.getFactTable().getTableProperties();
+    String isLocalDictionaryEnabled =
+        tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE);
+    String localDictionaryThreshold =
+        tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD);
     if (null != isLocalDictionaryEnabled) {
       table.setLocalDictionaryEnabled(Boolean.parseBoolean(isLocalDictionaryEnabled));
       if (null != localDictionaryThreshold) {
@@ -1176,6 +1177,7 @@ public class CarbonTable implements Serializable {
       // in case of old tables, local dictionary enable property will not be present in
       // tableProperties, so disable the local dictionary generation
       table.setLocalDictionaryEnabled(Boolean.parseBoolean("false"));
+      tableProperties.put(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE, "false");
     }
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e93d7b8/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
index f53a1d7..be72983 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java
@@ -198,30 +198,7 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
         getFilteredSegment(job, new ArrayList<>(validAndInProgressSegments), false,
             readCommittedScope);
     // Clean the updated segments from memory if the update happens on segments
-    List<Segment> toBeCleanedSegments = new ArrayList<>();
-    for (Segment filteredSegment : filteredSegmentToAccess) {
-      boolean refreshNeeded =
-          DataMapStoreManager.getInstance().getTableSegmentRefresher(carbonTable)
-              .isRefreshNeeded(filteredSegment,
-                  updateStatusManager.getInvalidTimestampRange(filteredSegment.getSegmentNo()));
-      if (refreshNeeded) {
-        toBeCleanedSegments.add(filteredSegment);
-      }
-    }
-    // Clean segments if refresh is needed
-    for (Segment segment : filteredSegmentToAccess) {
-      if (DataMapStoreManager.getInstance().getTableSegmentRefresher(carbonTable)
-          .isRefreshNeeded(segment.getSegmentNo())) {
-        toBeCleanedSegments.add(segment);
-      }
-    }
-
-
-    if (toBeCleanedSegments.size() > 0) {
-      DataMapStoreManager.getInstance()
-          .clearInvalidSegments(getOrCreateCarbonTable(job.getConfiguration()),
-              toBeCleanedSegments);
-    }
+    refreshSegmentCacheIfRequired(job, carbonTable, updateStatusManager, filteredSegmentToAccess);
 
     // process and resolve the expression
     Expression filter = getFilterPredicates(job.getConfiguration());
@@ -266,6 +243,42 @@ public class CarbonTableInputFormat<T> extends CarbonInputFormat<T> {
   }
 
   /**
+   * Method to check and refresh segment cache
+   *
+   * @param job
+   * @param carbonTable
+   * @param updateStatusManager
+   * @param filteredSegmentToAccess
+   * @throws IOException
+   */
+  public void refreshSegmentCacheIfRequired(JobContext job, CarbonTable carbonTable,
+      SegmentUpdateStatusManager updateStatusManager, List<Segment> filteredSegmentToAccess)
+      throws IOException {
+    List<Segment> toBeCleanedSegments = new ArrayList<>();
+    for (Segment filteredSegment : filteredSegmentToAccess) {
+      boolean refreshNeeded =
+          DataMapStoreManager.getInstance().getTableSegmentRefresher(carbonTable)
+              .isRefreshNeeded(filteredSegment,
+                  updateStatusManager.getInvalidTimestampRange(filteredSegment.getSegmentNo()));
+      if (refreshNeeded) {
+        toBeCleanedSegments.add(filteredSegment);
+      }
+    }
+    // Clean segments if refresh is needed
+    for (Segment segment : filteredSegmentToAccess) {
+      if (DataMapStoreManager.getInstance().getTableSegmentRefresher(carbonTable)
+          .isRefreshNeeded(segment.getSegmentNo())) {
+        toBeCleanedSegments.add(segment);
+      }
+    }
+    if (toBeCleanedSegments.size() > 0) {
+      DataMapStoreManager.getInstance()
+          .clearInvalidSegments(getOrCreateCarbonTable(job.getConfiguration()),
+              toBeCleanedSegments);
+    }
+  }
+
+  /**
    * Below method will be used to get the filter segments when query is fired on pre Aggregate
    * and main table in case of streaming.
    * For Pre Aggregate rules it will set all the valid segments for both streaming and

http://git-wip-us.apache.org/repos/asf/carbondata/blob/7e93d7b8/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
index 68f8ca7..97bcb5f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/flatfolder/FlatFolderTableLoadingTestCase.scala
@@ -149,7 +149,38 @@ class FlatFolderTableLoadingTestCase extends QueryTest with BeforeAndAfterAll {
     sql("clean files for table flatfolder_delete")
     assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length == 1)
     assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles().filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 0)
+    sql("drop table if exists flatfolder_delete")
+  }
 
+  test("merge index flat folder and delete delta issue with GLOBAL SORT") {
+    sql("drop table if exists flatfolder_delete")
+    sql(
+      """
+        | CREATE TABLE flatfolder_delete (empname String, designation String, doj Timestamp,
+        |  workgroupcategory int, workgroupcategoryname String, deptno int, deptname String,
+        |  projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance int,
+        |  utilization int,salary int,empno int)
+        | STORED BY 'org.apache.carbondata.format' tblproperties('flat_folder'='true', 'SORT_SCOPE'='GLOBAL_SORT' )
+      """.stripMargin)
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"', 'GLOBAL_SORT_PARTITIONS'='4')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"', 'GLOBAL_SORT_PARTITIONS'='4')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"', 'GLOBAL_SORT_PARTITIONS'='4')""")
+    sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE flatfolder_delete OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"', 'GLOBAL_SORT_PARTITIONS'='4')""")
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", "flatfolder_delete")
+    sql(s"""delete from flatfolder_delete where empname='anandh'""")
+    sql(s"""delete from flatfolder_delete where empname='arvind'""")
+    sql(s"""select * from flatfolder_delete""").show()
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles()
+             .filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 8)
+    sql("Alter table flatfolder_delete compact 'minor'")
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles()
+             .filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 8)
+    sql("clean files for table flatfolder_delete")
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles()
+             .filter(_.getName.endsWith(CarbonTablePath.MERGE_INDEX_FILE_EXT)).length == 1)
+    assert(FileFactory.getCarbonFile(carbonTable.getTablePath).listFiles()
+             .filter(_.getName.endsWith(CarbonCommonConstants.DELETE_DELTA_FILE_EXT)).length == 0)
+    sql("drop table if exists flatfolder_delete")
   }
 
   override def afterAll = {


[50/50] [abbrv] carbondata git commit: [CARBONDATA-2768][CarbonStore] Fix error in tests for external csv format

Posted by ja...@apache.org.
[CARBONDATA-2768][CarbonStore] Fix error in tests for external csv format

In previous implementation earlier than PR2495, we only supportted csv as
external format for carbondata. And we validated the restriction while
creating the table.PR2495 added kafka support, so it removed the
validation, but it did not fix the related test case which cause failure in current version.
This PR fix the error test case.

This closes #2537


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/12ab5799
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/12ab5799
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/12ab5799

Branch: refs/heads/external-format
Commit: 12ab5799271a4e37d31dfd583f6ff83f71064ee6
Parents: 1a26ac1
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Fri Aug 3 14:34:32 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Tue Aug 7 21:08:19 2018 +0800

----------------------------------------------------------------------
 .../core/statusmanager/FileFormatProperties.java   | 17 +++++++++++++++++
 .../carbondata/hadoop/api/CarbonInputFormat.java   |  2 +-
 .../externalformat/CsvBasedCarbonTableSuite.scala  |  2 +-
 .../command/carbonTableSchemaCommon.scala          | 10 +++++++---
 4 files changed, 26 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/12ab5799/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
index 862c36c..4372b44 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/FileFormatProperties.java
@@ -17,10 +17,27 @@
 
 package org.apache.carbondata.core.statusmanager;
 
+import java.util.HashSet;
+import java.util.Set;
+
 /**
  * Provides the constant name for the file format properties
  */
 public class FileFormatProperties {
+  private static final Set<String> SUPPORTED_EXTERNAL_FORMAT = new HashSet<String>() {
+    {
+      add("csv");
+      add("kafka");
+    }
+  };
+
+  public static boolean isExternalFormatSupported(String format) {
+    return SUPPORTED_EXTERNAL_FORMAT.contains(format.toLowerCase());
+  }
+  public static Set<String> getSupportedExternalFormat() {
+    return SUPPORTED_EXTERNAL_FORMAT;
+  }
+
   public static class CSV {
     public static final String HEADER = "csv.header";
     public static final String DELIMITER = "csv.delimiter";

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12ab5799/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index 5fdc522..b6fc4b3 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -55,10 +55,10 @@ import org.apache.carbondata.core.scan.model.QueryModelBuilder;
 import org.apache.carbondata.core.stats.QueryStatistic;
 import org.apache.carbondata.core.stats.QueryStatisticsConstants;
 import org.apache.carbondata.core.stats.QueryStatisticsRecorder;
-import org.apache.carbondata.core.util.BlockletDataMapUtil;
 import org.apache.carbondata.core.statusmanager.FileFormat;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
+import org.apache.carbondata.core.util.BlockletDataMapUtil;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory;
 import org.apache.carbondata.core.util.CarbonUtil;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12ab5799/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
index 7f07878..85ccc10 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/externalformat/CsvBasedCarbonTableSuite.scala
@@ -151,7 +151,7 @@ class CsvBasedCarbonTableSuite extends QueryTest
       )
     }
 
-    assert(expectedException.getMessage.contains("Currently we only support csv as external file format"))
+    assert(expectedException.getMessage.contains("Unsupported external format parquet"))
   }
 
   test("test csv based carbon table: the sequence of header does not match schema") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12ab5799/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index 56e91f9..2fdbba7 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -24,6 +24,7 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable
 import scala.collection.mutable.ListBuffer
 
+import org.apache.commons.lang3.StringUtils
 import org.apache.spark.SparkContext
 import org.apache.spark.sql.SQLContext
 import org.apache.spark.sql.catalyst.TableIdentifier
@@ -42,7 +43,7 @@ import org.apache.carbondata.core.metadata.schema._
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, RelationIdentifier, TableInfo, TableSchema}
 import org.apache.carbondata.core.metadata.schema.table.column.{ColumnSchema, ParentColumnTableRelation}
 import org.apache.carbondata.core.service.impl.ColumnUniqueIdGenerator
-import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentUpdateStatusManager}
+import org.apache.carbondata.core.statusmanager.{FileFormatProperties, LoadMetadataDetails, SegmentUpdateStatusManager}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
 import org.apache.carbondata.processing.loading.FailureCauses
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel
@@ -892,8 +893,11 @@ class TableNewProcessor(cm: TableModel) {
     tableInfo.setFactTable(tableSchema)
     val format = cm.tableProperties.get(CarbonCommonConstants.FORMAT)
     if (format.isDefined) {
-      if (!format.get.equalsIgnoreCase("csv")) {
-        CarbonException.analysisException(s"Currently we only support csv as external file format")
+      if (!FileFormatProperties.isExternalFormatSupported(format.get)) {
+        CarbonException.analysisException(
+          s"Unsupported external format ${format.get}, currently carbondata only support" +
+          s" ${FileFormatProperties.getSupportedExternalFormat.asScala.mkString(", ")}" +
+          s" as external file format")
       }
       tableInfo.setFormat(format.get)
       val formatProperties = cm.tableProperties.filter(pair =>


[40/50] [abbrv] carbondata git commit: [CARBONDATA-2815][Doc] Add documentation for spilling memory and datamap rebuild

Posted by ja...@apache.org.
[CARBONDATA-2815][Doc] Add documentation for spilling memory and datamap rebuild

Add documentation for:1.spilling unsafe memory for data loading,2.datamap rebuild for index datamap

This closes #2604


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/6d6a5b2e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/6d6a5b2e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/6d6a5b2e

Branch: refs/heads/external-format
Commit: 6d6a5b2eb7eb30a39438019ddfed48dacd14a06f
Parents: 12725b7
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Thu Aug 2 22:39:49 2018 +0800
Committer: chenliang613 <ch...@huawei.com>
Committed: Sat Aug 4 08:53:53 2018 +0800

----------------------------------------------------------------------
 docs/configuration-parameters.md   |  3 ++-
 docs/datamap/datamap-management.md | 16 ++++++++++++----
 2 files changed, 14 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/6d6a5b2e/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index 77cf230..eee85e2 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -69,7 +69,8 @@ This section provides the details of all the configurations required for CarbonD
 | carbon.options.bad.record.path |  | Specifies the HDFS path where bad records are stored. By default the value is Null. This path must to be configured by the user if bad record logger is enabled or bad record action redirect. | |
 | carbon.enable.vector.reader | true | This parameter increases the performance of select queries as it fetch columnar batch of size 4*1024 rows instead of fetching data row by row. | |
 | carbon.blockletgroup.size.in.mb | 64 MB | The data are read as a group of blocklets which are called blocklet groups. This parameter specifies the size of the blocklet group. Higher value results in better sequential IO access.The minimum value is 16MB, any value lesser than 16MB will reset to the default value (64MB). |  |
-| carbon.task.distribution | block | **block**: Setting this value will launch one task per block. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. **custom**: Setting this value will group the blocks and distribute it uniformly to the available resources in the cluster. This enhances the query performance but not suggested in case of concurrent queries and queries having big shuffling scenarios. **blocklet**: Setting this value will launch one task per blocklet. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. **merge_small_files**: Setting this value will merge all the small partitions to a size of (128 MB is the default value of "spark.sql.files.maxPartitionBytes",it is configurable) during querying. The small partitions are combined to a map task to reduce the number of read task. This enhances the performance. | | 
+| carbon.task.distribution | block | **block**: Setting this value will launch one task per block. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. **custom**: Setting this value will group the blocks and distribute it uniformly to the available resources in the cluster. This enhances the query performance but not suggested in case of concurrent queries and queries having big shuffling scenarios. **blocklet**: Setting this value will launch one task per blocklet. This setting is suggested in case of concurrent queries and queries having big shuffling scenarios. **merge_small_files**: Setting this value will merge all the small partitions to a size of (128 MB is the default value of "spark.sql.files.maxPartitionBytes",it is configurable) during querying. The small partitions are combined to a map task to reduce the number of read task. This enhances the performance. | |
+| carbon.load.sortmemory.spill.percentage | 0 | If we use unsafe memory during data loading, this configuration will be used to control the behavior of spilling inmemory pages to disk. Internally in Carbondata, during sorting carbondata will sort data in pages and add them in unsafe memory. If the memory is insufficient, carbondata will spill the pages to disk and generate sort temp file. This configuration controls how many pages in memory will be spilled to disk based size. The size can be calculated by multiplying this configuration value with 'carbon.sort.storage.inmemory.size.inmb'. For example, default value 0 means that no pages in unsafe memory will be spilled and all the newly sorted data will be spilled to disk; Value 50 means that if the unsafe memory is insufficient, about half of pages in the unsafe memory will be spilled to disk while value 100 means that almost all pages in unsafe memory will be spilled. **Note**: This configuration only works for 'LOCAL_SORT' and 'BA
 TCH_SORT' and the actual spilling behavior may slightly be different in each data loading. | Integer values between 0 and 100 |
 
 * **Compaction Configuration**
   

http://git-wip-us.apache.org/repos/asf/carbondata/blob/6d6a5b2e/docs/datamap/datamap-management.md
----------------------------------------------------------------------
diff --git a/docs/datamap/datamap-management.md b/docs/datamap/datamap-management.md
index 01bb69f..1695a23 100644
--- a/docs/datamap/datamap-management.md
+++ b/docs/datamap/datamap-management.md
@@ -22,13 +22,13 @@ Currently, there are 5 DataMap implementation in CarbonData.
 | timeseries       | time dimension rollup table.             | event_time, xx_granularity, please refer to [Timeseries DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/timeseries-datamap-guide.md) | Automatic        |
 | mv               | multi-table pre-aggregate table,         | No DMPROPERTY is required                | Manual           |
 | lucene           | lucene indexing for text column          | index_columns to specifying the index columns | Manual/Automatic |
-| bloom            | bloom filter for high cardinality column, geospatial column | index_columns to specifying the index columns | Manual/Automatic |
+| bloomfilter      | bloom filter for high cardinality column, geospatial column | index_columns to specifying the index columns | Manual/Automatic |
 
 ## DataMap Management
 
 There are two kinds of management semantic for DataMap.
 
-1. Autmatic Refresh: Create datamap without `WITH DEFERED REBUILD` in the statement
+1. Automatic Refresh: Create datamap without `WITH DEFERED REBUILD` in the statement, which is by default.
 2. Manual Refresh: Create datamap with `WITH DEFERED REBUILD` in the statement
 
 ### Automatic Refresh
@@ -51,15 +51,23 @@ If user do want to perform above operations on the main table, user can first dr
 
 If user drop the main table, the datamap will be dropped immediately too.
 
+We do recommend you to use this management for index datamap.
+
 ### Manual Refresh
 
 When user creates a datamap specifying maunal refresh semantic, the datamap is created with status *disabled* and query will NOT use this datamap until user can issue REBUILD DATAMAP command to build the datamap. For every REBUILD DATAMAP command, system will trigger a full rebuild of the datamap. After rebuild is done, system will change datamap status to *enabled*, so that it can be used in query rewrite.
 
-For every new data loading, data update, delete, the related datamap will be made *disabled*.
+For every new data loading, data update, delete, the related datamap will be made *disabled*,
+which means that the following queries will not benefit from the datamap before it becomes *enabled* again.
 
 If the main table is dropped by user, the related datamap will be dropped immediately.
 
-*Note: If you are creating a datamap on external table, you need to do manual managment of the datamap.*
+**Note**:
++ If you are creating a datamap on external table, you need to do manual management of the datamap.
++ For index datamap such as BloomFilter datamap, there is no need to do manual refresh.
+ By default it is automatic refresh,
+ which means its data will get refreshed immediately after the datamap is created or the main table is loaded.
+ Manual refresh on this datamap will has no impact.
 
 
 


[09/50] [abbrv] carbondata git commit: [HOTFIX] Removed file existence check to improve dataMap loading performance

Posted by ja...@apache.org.
[HOTFIX] Removed file existence check to improve dataMap loading performance

Problem
DataMap loading performance degraded after adding file existence check.

Analysis
When carbonIndex file is read and carbondata file path to its metadata Info map is prepared, file physical existence is getting checked every time which in case of HDFS file system is a namenode call. This degrades the dataMap loading performance. This is done to avoid failures for

Handle IUD scenario where after delete operation carbondata file is deleted but the entry still exists in index file.
Fix
Modified code to check for physical file existence only in case when any IUD operation has happened on the table

This closes #2560


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fd747a3e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fd747a3e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fd747a3e

Branch: refs/heads/external-format
Commit: fd747a3eacee7196bc290e3ee17333627bf485a9
Parents: 3b9efed
Author: manishgupta88 <to...@gmail.com>
Authored: Fri Jul 27 14:11:11 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Mon Jul 30 13:03:35 2018 +0530

----------------------------------------------------------------------
 .../carbondata/core/util/BlockletDataMapUtil.java   | 16 +++++++++++++---
 1 file changed, 13 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fd747a3e/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
index 86e9f9c..db41e73 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/BlockletDataMapUtil.java
@@ -115,8 +115,14 @@ public class BlockletDataMapUtil {
         CarbonTable.updateTableByTableInfo(carbonTable, carbonTable.getTableInfo());
       }
       String blockPath = footer.getBlockInfo().getTableBlockInfo().getFilePath();
-      if (null == blockMetaInfoMap.get(blockPath) && FileFactory.isFileExist(blockPath)) {
-        blockMetaInfoMap.put(blockPath, createBlockMetaInfo(fileNameToMetaInfoMapping, blockPath));
+      if (null == blockMetaInfoMap.get(blockPath)) {
+        BlockMetaInfo blockMetaInfo = createBlockMetaInfo(fileNameToMetaInfoMapping, blockPath);
+        // if blockMetaInfo is null that means the file has been deleted from the file system.
+        // This can happen in case IUD scenarios where after deleting or updating the data the
+        // complete block is deleted but the entry still exists in index or merge index file
+        if (null != blockMetaInfo) {
+          blockMetaInfoMap.put(blockPath, blockMetaInfo);
+        }
       }
     }
     return blockMetaInfoMap;
@@ -152,10 +158,14 @@ public class BlockletDataMapUtil {
   }
 
   private static BlockMetaInfo createBlockMetaInfo(
-      Map<String, BlockMetaInfo> fileNameToMetaInfoMapping, String carbonDataFile) {
+      Map<String, BlockMetaInfo> fileNameToMetaInfoMapping, String carbonDataFile)
+      throws IOException {
     FileFactory.FileType fileType = FileFactory.getFileType(carbonDataFile);
     switch (fileType) {
       case LOCAL:
+        if (!FileFactory.isFileExist(carbonDataFile)) {
+          return null;
+        }
         CarbonFile carbonFile = FileFactory.getCarbonFile(carbonDataFile, fileType);
         return new BlockMetaInfo(new String[] { "localhost" }, carbonFile.getSize());
       default:


[13/50] [abbrv] carbondata git commit: [CARBONDATA-2801]Added documentation for flat folder

Posted by ja...@apache.org.
[CARBONDATA-2801]Added documentation for flat folder

[CARBONDATA-2801]Added documentation for flat folder

This closes #2582


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/790cde87
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/790cde87
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/790cde87

Branch: refs/heads/external-format
Commit: 790cde876f00c345dbdedd071276f2f6d6fbf7ec
Parents: 7b53890
Author: ravipesala <ra...@gmail.com>
Authored: Mon Jul 30 18:15:32 2018 +0530
Committer: chenliang613 <ch...@huawei.com>
Committed: Tue Jul 31 22:11:58 2018 +0800

----------------------------------------------------------------------
 docs/data-management-on-carbondata.md | 20 ++++++++++++++++++--
 1 file changed, 18 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/790cde87/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 27cdab6..28bc7d3 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -240,16 +240,18 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
 	 ```
 	 
    - **Caching at Block or Blocklet Level**
+   
      This feature allows you to maintain the cache at Block level, resulting in optimized usage of the memory. The memory consumption is high if the Blocklet level caching is maintained as a Block can have multiple Blocklet.
 	 
 	 Following are the valid values for CACHE_LEVEL:
-	 * Configuration for caching in driver at Block level (default value).
+	 
+	 *Configuration for caching in driver at Block level (default value).*
 	 
 	 ```
 	 CACHE_LEVEL= ‘BLOCK’
 	 ```
 	 
-	 * Configuration for caching in driver at Blocklet level.
+	 *Configuration for caching in driver at Blocklet level.*
 	 
 	 ```
 	 CACHE_LEVEL= ‘BLOCKLET’
@@ -284,6 +286,20 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
 	 ALTER TABLE employee SET TBLPROPERTIES (‘CACHE_LEVEL’=’Blocklet’)
 	 ```
 	 
+	 - **Support Flat folder same as Hive/Parquet**
+	 
+	  This feature allows all carbondata and index files to keep directy under tablepath. Currently all carbondata/carbonindex files written under tablepath/Fact/Part0/Segment_NUM folder and it is not same as hive/parquet folder structure. This feature makes all files written will be directly under tablepath, it does not maintain any segment folder structure.This is useful for interoperability between the execution engines and plugin with other execution engines like hive or presto becomes easier.
+	  
+	  Following table property enables this feature and default value is false.
+	  ```
+	   'flat_folder'='true'
+	  ``` 
+	  Example:
+	  ```
+	  CREATE TABLE employee (name String, city String, id int) STORED BY ‘carbondata’ TBLPROPERTIES ('flat_folder'='true')
+	  ```
+	  
+	 
 ## CREATE TABLE AS SELECT
   This function allows user to create a Carbon table from any of the Parquet/Hive/Carbon table. This is beneficial when the user wants to create Carbon table from any other Parquet/Hive table and use the Carbon query engine to query and achieve better query results for cases where Carbon is faster than other file formats. Also this feature can be used for backing up the data.
 


[06/50] [abbrv] carbondata git commit: [CARBONDATA-2784][CARBONDATA-2786][SDK writer] Fixed:Forever blocking wait with more than 21 batch of data

Posted by ja...@apache.org.
[CARBONDATA-2784][CARBONDATA-2786][SDK writer] Fixed:Forever blocking wait with more than 21 batch of data

problem: [CARBONDATA-2784]
[SDK writer] Forever blocking wait with more than 21 batch of data, when consumer is dead due to data loading exception (bad record / out of memory)

root cause:
When the consumer is dead due to data loading exception, writer will be forcefully closed. but queue.clear() cleared only snapshot of entries (10 batches) and close is set to true after that. In between clear() and close = true, If more than 10 batches of data is again put into queue. For 11th batch, queue.put() goes for forever block as consumer is dead.

Solution:
set close = true, before clearing the queue. This will avoid adding more batches to queue from write().

problem [CARBONDATA-2786] NPE when SDK writer tries to write a file

solution and cause:
#2387 , in CarbonProperties.java
After systemLocation = getStorePath(); Null validation missing for systemLocation.
because this can be null in SDK case. As Store location is not applicable for SDK.
All a null validation.

This closes #2561


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/fc8510a1
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/fc8510a1
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/fc8510a1

Branch: refs/heads/external-format
Commit: fc8510a112eaeab951b4bfc2c2a45bda45b6d757
Parents: 0e45f3a
Author: ajantha-bhat <aj...@gmail.com>
Authored: Thu Jul 26 00:35:36 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Sun Jul 29 19:57:43 2018 +0530

----------------------------------------------------------------------
 .../org/apache/carbondata/core/util/CarbonProperties.java    | 6 ++++--
 .../loading/iterator/CarbonOutputIteratorWrapper.java        | 8 ++++++--
 2 files changed, 10 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc8510a1/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index 004eb74..8a91a43 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -1532,8 +1532,10 @@ public final class CarbonProperties {
     if (systemLocation == null) {
       systemLocation = getStorePath();
     }
-    systemLocation = CarbonUtil.checkAndAppendFileSystemURIScheme(systemLocation);
-    systemLocation = FileFactory.getUpdatedFilePath(systemLocation);
+    if (systemLocation != null) {
+      systemLocation = CarbonUtil.checkAndAppendFileSystemURIScheme(systemLocation);
+      systemLocation = FileFactory.getUpdatedFilePath(systemLocation);
+    }
     return systemLocation + CarbonCommonConstants.FILE_SEPARATOR + "_system";
   }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/fc8510a1/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java b/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
index deb628c..a00b562 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/iterator/CarbonOutputIteratorWrapper.java
@@ -94,9 +94,13 @@ public class CarbonOutputIteratorWrapper extends CarbonIterator<Object[]> {
     }
     try {
       if (isForceClose) {
-        // unblock the queue.put on the other thread and clear the queue.
-        queue.clear();
+        // first make close is set to true, when force close happens because of dead consumer.
+        // so that, write() method will stop taking input rows.
         close = true;
+        // once write() method stops taking input rows, clear the queue.
+        // If queue is cleared before close is set to true, then queue will be again filled
+        // by .write() and it can go to blocking put() forever as consumer is dead.
+        queue.clear();
         return;
       }
       // below code will ensure that the last RowBatch is consumed properly


[42/50] [abbrv] carbondata git commit: [CARBONDATA-2750] Updated documentation on Local Dictionary Supoort

Posted by ja...@apache.org.
[CARBONDATA-2750] Updated documentation on Local Dictionary Supoort

Updated Documentation on Local Dictionary Support. Changed default scenario for Local dictionary to false

This closes #2590


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/40571b84
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/40571b84
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/40571b84

Branch: refs/heads/external-format
Commit: 40571b8466e27c0e6d234359f537a28da96d8583
Parents: e26a742
Author: praveenmeenakshi56 <pr...@gmail.com>
Authored: Tue Jul 31 15:49:10 2018 +0530
Committer: chenliang613 <ch...@huawei.com>
Committed: Mon Aug 6 16:49:55 2018 +0800

----------------------------------------------------------------------
 docs/data-management-on-carbondata.md | 96 ++++++++++++++++++++----------
 1 file changed, 63 insertions(+), 33 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/40571b84/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 7cf6123..0df9643 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -144,42 +144,55 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
      ```
 
    - **Local Dictionary Configuration**
-     
-     Local Dictionary is generated only for no-dictionary string/varchar datatype columns. It helps in:
-     1. Getting more compression on dimension columns with less cardinality.
-     2. Filter queries and full scan queries on No-dictionary columns with local dictionary will be faster as filter will be done on encoded data.
-     3. Reducing the store size and memory footprint as only unique values will be stored as part of local dictionary and corresponding data will be stored as encoded data.
    
-     By default, Local Dictionary will be enabled and generated for all no-dictionary string/varchar datatype columns.
-          
-     Users will be able to pass following properties in create table command: 
+   Columns for which dictionary is not generated needs more storage space and in turn more IO. Also since more data will have to be read during query, query performance also would suffer.Generating dictionary per blocklet for such columns would help in saving storage space and assist in improving query performance as carbondata is optimized for handling dictionary encoded columns more effectively.Generating dictionary internally per blocklet is termed as local dictionary. Please refer to [File structure of Carbondata](../file-structure-of-carbondata.md) for understanding about the file structure of carbondata and meaning of terms like blocklet.
+   
+   Local Dictionary helps in:
+   1. Getting more compression.
+   2. Filter queries and full scan queries will be faster as filter will be done on encoded data.
+   3. Reducing the store size and memory footprint as only unique values will be stored as part of local dictionary and corresponding data will be stored as encoded data.
+   4. Getting higher IO throughput.
+ 
+   **NOTE:** 
+   
+   * Following Data Types are Supported for Local Dictionary:
+      * STRING
+      * VARCHAR
+      * CHAR
+
+   * Following Data Types are not Supported for Local Dictionary: 
+      * SMALLINT
+      * INTEGER
+      * BIGINT
+      * DOUBLE
+      * DECIMAL
+      * TIMESTAMP
+      * DATE
+      * BOOLEAN
+   
+   * In case of multi-level complex dataType columns, primitive string/varchar/char columns are considered for local dictionary generation.
+   
+   Local dictionary will have to be enabled explicitly during create table or by enabling the system property 'carbon.local.dictionary.enable'. By default, Local Dictionary will be disabled for the carbondata table.
+    
+   Local Dictionary can be configured using the following properties during create table command: 
           
-     | Properties | Default value | Description |
-     | ---------- | ------------- | ----------- |
-     | LOCAL_DICTIONARY_ENABLE | false | By default, local dictionary will not be enabled for the table |
-     | LOCAL_DICTIONARY_THRESHOLD | 10000 | The maximum cardinality for local dictionary generation (range- 1000 to 100000) |
-     | LOCAL_DICTIONARY_INCLUDE | all no-dictionary string/varchar columns | Columns for which Local Dictionary is generated. |
-     | LOCAL_DICTIONARY_EXCLUDE | none | Columns for which Local Dictionary is not generated |
+   | Properties | Default value | Description |
+   | ---------- | ------------- | ----------- |
+   | LOCAL_DICTIONARY_ENABLE | false | Whether to enable local dictionary generation. **NOTE:** If this property is defined, it will override the value configured at system level by 'carbon.local.dictionary.enable' |
+   | LOCAL_DICTIONARY_THRESHOLD | 10000 | The maximum cardinality of a column upto which carbondata can try to generate local dictionary (maximum - 100000) |
+   | LOCAL_DICTIONARY_INCLUDE | string/varchar/char columns| Columns for which Local Dictionary has to be generated.**NOTE:** Those string/varchar/char columns which are added into DICTIONARY_INCLUDE option will not be considered for local dictionary generation.|
+   | LOCAL_DICTIONARY_EXCLUDE | none | Columns for which Local Dictionary need not be generated. |
         
-      **NOTE:**  If the cardinality exceeds the threshold, this column will not use local dictionary encoding. And in this case, the data loading performance will decrease since there is a rollback procedure for local dictionary encoding.
-      
-      **Calculating Memory Usage for Local Dictionary:**
-      
-      Encoded data and Actual data are both stored when Local Dictionary is enabled.
-      Suppose 'x' columns are configured for Local Dictionary generation out of a total of 'y' string/varchar columns. 
-      
-      Total size will be 
-      
-      Memory size(y-x) + ((4 bytes * number of rows) * x) + (Local Dictionary size of x columns)
-      
-      Local Dictionary size = ((memory occupied by each unique value * cardinality of the column) * number of columns)
-      
-      **Bad Records Path:**
-      
-      This property is used to specify the location where bad records would be written.
-      
-      ```TBLPROPERTIES('BAD_RECORDS_PATH'='/opt/badrecords'')```
-      
+   **Fallback behavior:** 
+   
+   * When the cardinality of a column exceeds the threshold, it triggers a fallback and the generated dictionary will be reverted and data loading will be continued without dictionary encoding.
+   
+   **NOTE:** When fallback is triggered, the data loading performance will decrease as encoded data will be discarded and the actual data is written to the temporary sort files.
+   
+   **The cost for Local Dictionary:**
+   
+   The memory footprint will increase when local dictionary is configured as actual data will have to be stored along with dictionary encoded data.
+       
 ### Example:
  
    ```
@@ -195,7 +208,13 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
      TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
      'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
    ```
+
+   **NOTE:** 
    
+   * We recommend to use Local Dictionary when cardinality is high but is distributed across multiple loads
+   * On a large cluster, decoding data can become a bottleneck for global dictionary as there will be many remote reads. In this scenario, it is better to use Local Dictionary.
+   * When cardinality is less, but loads are repetitive, it is better to use global dictionary as local dictionary generates multiple dictionary files at blocklet level increasing redundancy.
+
    - **Caching Min/Max Value for Required Columns**
      By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. 
 	 
@@ -558,6 +577,9 @@ Users can specify which columns to include and exclude for local dictionary gene
     ```
    ALTER TABLE tablename UNSET TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE','LOCAL_DICTIONARY_THRESHOLD','LOCAL_DICTIONARY_INCLUDE','LOCAL_DICTIONARY_EXCLUDE')
     ```
+    
+   **NOTE:** For old tables, by default, local dictionary is disabled. If user wants local dictionary for these tables, user can enable/disable local dictionary for new data at their discretion. 
+   This can be achieved by using the alter table set command.
 
 ### DROP TABLE
   
@@ -784,6 +806,14 @@ Users can specify which columns to include and exclude for local dictionary gene
   * Since Bad Records Path can be specified in create, load and carbon properties. 
   Therefore, value specified in load will have the highest priority, and value specified in carbon properties will have the least priority.
 
+   **Bad Records Path:**
+        
+   This property is used to specify the location where bad records would be written.
+        
+   ```
+   TBLPROPERTIES('BAD_RECORDS_PATH'='/opt/badrecords'')
+   ```
+        
   Example:
 
   ```


[30/50] [abbrv] carbondata git commit: [HOTFIX][PR 2575] Fixed modular plan creation only if valid datamaps are available

Posted by ja...@apache.org.
[HOTFIX][PR 2575] Fixed modular plan creation only if valid datamaps are available

update query is failing in spark-2.2 cluster if mv jars are available because catalogs
are not empty if datamap are created for other table also and returns true from isValidPlan() inside MVAnalyzerRule.

This closes #2579


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f52c1338
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f52c1338
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f52c1338

Branch: refs/heads/external-format
Commit: f52c13380828ac7f388273cb2460971fc1a5eed1
Parents: b483a57
Author: rahul <ra...@knoldus.in>
Authored: Mon Jul 30 12:01:49 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Aug 2 16:46:16 2018 +0530

----------------------------------------------------------------------
 .../carbondata/mv/rewrite/MVCreateTestCase.scala      | 14 ++++++++++++++
 1 file changed, 14 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f52c1338/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
index 6adb14e..0b96202 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
@@ -18,6 +18,7 @@ package org.apache.carbondata.mv.rewrite
 
 import java.io.File
 
+import org.apache.spark.sql.Row
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.datasources.LogicalRelation
 import org.apache.spark.sql.test.util.QueryTest
@@ -885,6 +886,19 @@ class MVCreateTestCase extends QueryTest with BeforeAndAfterAll {
     sql("drop datamap if exists datamap_subqry")
   }
 
+  test("basic scenario") {
+
+    sql("drop table if exists mvtable1")
+    sql("create table mvtable1(name string,age int,salary int) stored by 'carbondata'")
+    sql(" insert into mvtable1 select 'n1',12,12")
+    sql("  insert into mvtable1 select 'n1',12,12")
+    sql(" insert into mvtable1 select 'n3',12,12")
+    sql(" insert into mvtable1 select 'n4',12,12")
+    sql("update mvtable1 set(name) = ('updatedName')").show()
+    checkAnswer(sql("select count(*) from mvtable1 where name = 'updatedName'"),Seq(Row(4)))
+    sql("drop table if exists mvtable1")
+  }
+
   def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {
     val tables = logicalPlan collect {
       case l: LogicalRelation => l.catalogTable.get


[26/50] [abbrv] carbondata git commit: [CARBONDATA-2796][32K]Fix data loading problem when table has complex column and long string column

Posted by ja...@apache.org.
[CARBONDATA-2796][32K]Fix data loading problem when table has complex column and long string column

currently both varchar column and complex column believes itself is the last one member in noDictionary group when converting carbon row from raw format to 3-parted format. Since they need to be proceeded in different way, exception will occur if we deal the column in wrong way.

To fix this, we mark the info of complex columns explicitly like varchar columns, and keep the order of noDictionary group as : normal Dim & varchar & complex

This closes #2577


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/11fb422d
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/11fb422d
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/11fb422d

Branch: refs/heads/external-format
Commit: 11fb422d9d03a5354d746d134bd25ed65e6ca736
Parents: af98410
Author: Manhua <ke...@qq.com>
Authored: Mon Jul 30 15:07:37 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Aug 1 22:38:13 2018 +0800

----------------------------------------------------------------------
 .../complexType/TestComplexDataType.scala       |  7 ----
 .../LocalDictionarySupportAlterTableTest.scala  | 14 +++----
 .../LocalDictionarySupportCreateTableTest.scala |  6 +--
 .../VarcharDataTypesBasicTestCase.scala         | 33 +++++++++++++++++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala | 15 ++++++--
 .../command/carbonTableSchemaCommon.scala       |  8 +---
 .../loading/row/IntermediateSortTempRow.java    | 26 +++++++++----
 .../loading/sort/SortStepRowHandler.java        | 39 +++++++++++++-------
 .../sort/sortdata/TableFieldStat.java           | 31 +++++++++++-----
 .../store/CarbonFactDataHandlerModel.java       |  6 +--
 10 files changed, 125 insertions(+), 60 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 1ad7889..8527380 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -45,7 +45,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "create table table1 (roll string,person Struct<detail:int>) stored by " +
       "'carbondata'")
     sql("insert into table1 values('abc',1)")
-    sql("select roll,person,roll,person.detail from table1").show(false)
     checkAnswer(sql("select roll,person,person.detail from table1"),
       Seq(Row("abc", Row(1), 1)))
     checkAnswer(sql("select person,person.detail from table1"),
@@ -60,7 +59,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "create table table1 (roll string,person array<int>) stored by " +
       "'carbondata'")
     sql("insert into table1 values('abc','1$2$3')")
-    sql("select roll,person,roll,person from table1").show(false)
     checkAnswer(sql("select roll,person from table1"),
       Seq(Row("abc", mutable.WrappedArray.make(Array(1, 2, 3)))))
   }
@@ -99,8 +97,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "create table table1 (roll int,person Struct<detail:array<string>>) stored by " +
       "'carbondata'")
     sql("insert into table1 values(1,'abc:bcd')")
-    //    sql("select person from table1").show(false)
-    sql("select person.detail[0] from table1").show(false)
     checkAnswer(sql("select person.detail[0] from table1"), Seq(Row("abc")))
     checkAnswer(sql("select person.detail[1] from table1"), Seq(Row("bcd")))
     checkAnswer(sql("select roll,person from table1"),
@@ -164,7 +160,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "'carbondata'")
     sql("insert into table1 values(1,'2018/01/01')")
     checkExistence(sql("select person from table1"), true, "2018-01-01 00:00:00.0")
-    sql("select person,roll,person.detail from table1").show(false)
     checkAnswer(sql("select person,roll,person.detail from table1"),
       Seq(Row(Row(Timestamp.valueOf("2018-01-01 00:00:00.0")), 1,
         Timestamp.valueOf("2018-01-01 00:00:00.0"))))
@@ -227,7 +222,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "'carbondata'")
     sql("insert into table1 values(1,20)")
     checkExistence(sql("select person from table1"), true, "20")
-    sql("select person,person.detail from table1").show(false)
     checkAnswer(sql("select person,roll,person.detail from table1"), Seq(Row(Row(20), 1, 20)))
     checkExistence(sql("select person.detail from table1"), true, "20")
     checkAnswer(sql("select roll,person from table1"), Seq(Row(1, Row(20))))
@@ -252,7 +246,6 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       "'carbondata'")
     sql("insert into table1 values(1,true)")
     checkExistence(sql("select person from table1"), true, "true")
-    sql("select person,person.detail from table1").show(false)
     checkAnswer(sql("select person,roll,person.detail from table1"), Seq(Row(Row(true), 1, true)))
     checkExistence(sql("select person.detail from table1"), true, "true")
     checkAnswer(sql("select roll,person from table1"), Seq(Row(1, Row(true))))

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
index 373b309..24af99e 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
@@ -1235,7 +1235,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.si,name"))
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.si"))
       case None => assert(false)
     }
   }
@@ -1260,7 +1260,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.si,name"))
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.si"))
       case None => assert(false)
     }
   }
@@ -1284,7 +1284,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.sd,name"))
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.sd"))
       case None => assert(false)
     }
   }
@@ -1309,7 +1309,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("h,st.val.sd,name"))
+      case Some(row) => assert(row.get(1).toString.contains("h,name,st.val.sd"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
@@ -1338,7 +1338,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("h,st.sd,st.sh.val,name"))
+      case Some(row) => assert(row.get(1).toString.contains("h,name,st.sd,st.sh.val"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
@@ -1367,7 +1367,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("h,st.sd,name"))
+      case Some(row) => assert(row.get(1).toString.contains("h,name,st.sd"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
@@ -1396,7 +1396,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("h,st.val,name"))
+      case Some(row) => assert(row.get(1).toString.contains("h,name,st.val"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
index 52c18d0..a02d3ef 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
@@ -2364,7 +2364,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.si,name") && !row.get(1).toString.contains("city"))
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.si") && !row.get(1).toString.contains("city"))
       case None => assert(false)
     }
   }
@@ -2387,7 +2387,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Exclude")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.si,name") &&
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.si") &&
                                !row.get(1).toString.contains("city"))
       case None => assert(false)
     }
@@ -2416,7 +2416,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("st.val.si,name") &&
+      case Some(row) => assert(row.get(1).toString.contains("name,st.val.si") &&
                                !row.get(1).toString.contains("city"))
       case None => assert(false)
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
index 4aa7062..b607d07 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/longstring/VarcharDataTypesBasicTestCase.scala
@@ -31,6 +31,8 @@ import org.apache.carbondata.core.metadata.CarbonMetadata
 import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.core.util.CarbonProperties
 
+import scala.collection.mutable
+
 class VarcharDataTypesBasicTestCase extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
   private val longStringTable = "long_string_table"
   private val inputDir = s"$resourcesPath${File.separator}varchartype${File.separator}"
@@ -318,6 +320,37 @@ class VarcharDataTypesBasicTestCase extends QueryTest with BeforeAndAfterEach wi
     sql(s"DROP DATAMAP IF EXISTS $datamapName ON TABLE $longStringTable")
   }
 
+  test("create table with varchar column and complex column") {
+    sql("DROP TABLE IF EXISTS varchar_complex_table")
+    sql("""
+        | CREATE TABLE varchar_complex_table
+        | (m1 int,arr1 array<string>,varchar1 string,s1 string,varchar2 string,arr2 array<string>)
+        | STORED BY 'carbondata'
+        | TBLPROPERTIES('long_string_columns'='varchar1,varchar2')
+        | """.stripMargin)
+    sql(
+      """
+        | INSERT INTO TABLE varchar_complex_table
+        | VALUES(1,'ar1.0$ar1.1','longstr10','normal string1','longstr11','ar2.0$ar2.1'),
+        | (2,'ar1.2$ar1.3','longstr20','normal string2','longstr21','ar2.2$ar2.3')
+        | """.stripMargin)
+    checkAnswer(
+      sql("SELECT * FROM varchar_complex_table where varchar1='longstr10'"),
+      Seq(Row(1,mutable.WrappedArray.make(Array("ar1.0","ar1.1")),"longstr10","normal string1",
+        "longstr11",mutable.WrappedArray.make(Array("ar2.0","ar2.1")))))
+    checkAnswer(
+      sql(
+        """
+          |SELECT varchar1,arr2,s1,m1,varchar2,arr1
+          |FROM varchar_complex_table
+          |WHERE arr1[1]='ar1.3'
+          |""".stripMargin),
+      Seq(Row("longstr20",mutable.WrappedArray.make(Array("ar2.2","ar2.3")),"normal string2",2,
+        "longstr21",mutable.WrappedArray.make(Array("ar1.2","ar1.3")))))
+
+    sql("DROP TABLE IF EXISTS varchar_complex_table")
+  }
+
     // ignore this test in CI, because it will need at least 4GB memory to run successfully
   ignore("Exceed 2GB per column page for varchar datatype") {
     deleteFile(inputFile_2g_column_page)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 2599b3f..bb68ec5 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -225,17 +225,24 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
 
   protected val escapedIdentifier = "`([^`]+)`".r
 
-  private def reorderDimensions(dims: Seq[Field]): Seq[Field] = {
-    var complexDimensions: Seq[Field] = Seq()
+  private def reorderDimensions(dims: Seq[Field], varcharCols: Seq[String]): Seq[Field] = {
     var dimensions: Seq[Field] = Seq()
+    var varcharDimensions: Seq[Field] = Seq()
+    var complexDimensions: Seq[Field] = Seq()
     dims.foreach { dimension =>
       dimension.dataType.getOrElse("NIL") match {
         case "Array" => complexDimensions = complexDimensions :+ dimension
         case "Struct" => complexDimensions = complexDimensions :+ dimension
+        case "String" =>
+          if (varcharCols.exists(dimension.column.equalsIgnoreCase)) {
+            varcharDimensions = varcharDimensions :+ dimension
+          } else {
+            dimensions = dimensions :+ dimension
+          }
         case _ => dimensions = dimensions :+ dimension
       }
     }
-    dimensions ++ complexDimensions
+    dimensions ++ varcharDimensions ++ complexDimensions
   }
 
   /**
@@ -415,7 +422,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
       dbName,
       tableName,
       tableProperties.toMap,
-      reorderDimensions(dims.map(f => normalizeType(f)).map(f => addParent(f))),
+      reorderDimensions(dims.map(f => normalizeType(f)).map(f => addParent(f)), varcharColumns),
       msrs.map(f => normalizeType(f)),
       Option(sortKeyDims),
       Option(varcharColumns),

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index a61f94f..4a99ac7 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -689,12 +689,8 @@ class TableNewProcessor(cm: TableModel) {
         }
       }
     }
-    // dimensions that are not varchar
-    cm.dimCols.filter(field => !cm.varcharCols.get.contains(field.column))
-      .foreach(addDimensionCol(_))
-    // dimensions that are varchar
-    cm.dimCols.filter(field => cm.varcharCols.get.contains(field.column))
-      .foreach(addDimensionCol(_))
+    // add all dimensions
+    cm.dimCols.foreach(addDimensionCol(_))
 
     // check whether the column is a local dictionary column and set in column schema
     if (null != cm.tableProperties) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java b/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
index 8bec099..47b419e 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
@@ -54,13 +54,15 @@ public class IntermediateSortTempRow {
   /**
    * deserialize from bytes array to get the no sort fields
    * @param outDictNoSort stores the dict & no-sort fields
-   * @param outNoDictNoSortAndVarcharDims stores the no-dict & no-sort fields,
- *                                    including complex and varchar fields
+   * @param outNoDictNoSort stores all no-dict & no-sort fields,
+   *                        including complex and varchar fields
    * @param outMeasures stores the measure fields
    * @param dataTypes data type for the measure
+   * @param varcharDimCnt number of varchar column
+   * @param complexDimCnt number of complex column
    */
-  public void unpackNoSortFromBytes(int[] outDictNoSort, byte[][] outNoDictNoSortAndVarcharDims,
-      Object[] outMeasures, DataType[] dataTypes, int varcharDimCnt) {
+  public void unpackNoSortFromBytes(int[] outDictNoSort, byte[][] outNoDictNoSort,
+      Object[] outMeasures, DataType[] dataTypes, int varcharDimCnt, int complexDimCnt) {
     ByteBuffer rowBuffer = ByteBuffer.wrap(noSortDimsAndMeasures);
     // read dict_no_sort
     int dictNoSortCnt = outDictNoSort.length;
@@ -68,13 +70,13 @@ public class IntermediateSortTempRow {
       outDictNoSort[i] = rowBuffer.getInt();
     }
 
-    // read no_dict_no_sort (including complex)
-    int noDictNoSortCnt = outNoDictNoSortAndVarcharDims.length - varcharDimCnt;
+    // read no_dict_no_sort
+    int noDictNoSortCnt = outNoDictNoSort.length - varcharDimCnt - complexDimCnt;
     for (int i = 0; i < noDictNoSortCnt; i++) {
       short len = rowBuffer.getShort();
       byte[] bytes = new byte[len];
       rowBuffer.get(bytes);
-      outNoDictNoSortAndVarcharDims[i] = bytes;
+      outNoDictNoSort[i] = bytes;
     }
 
     // read varchar dims
@@ -82,7 +84,15 @@ public class IntermediateSortTempRow {
       int len = rowBuffer.getInt();
       byte[] bytes = new byte[len];
       rowBuffer.get(bytes);
-      outNoDictNoSortAndVarcharDims[i + noDictNoSortCnt] = bytes;
+      outNoDictNoSort[i + noDictNoSortCnt] = bytes;
+    }
+
+    // read complex dims
+    for (int i = 0; i < complexDimCnt; i++) {
+      short len = rowBuffer.getShort();
+      byte[] bytes = new byte[len];
+      rowBuffer.get(bytes);
+      outNoDictNoSort[i + noDictNoSortCnt + varcharDimCnt] = bytes;
     }
 
     // read measure

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
index 1c6d8b2..0118e4d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
@@ -47,6 +47,7 @@ public class SortStepRowHandler implements Serializable {
   private int noDictSortDimCnt = 0;
   private int noDictNoSortDimCnt = 0;
   private int varcharDimCnt = 0;
+  private int complexDimCnt = 0;
   private int measureCnt;
 
   // indices for dict & sort dimension columns
@@ -55,9 +56,10 @@ public class SortStepRowHandler implements Serializable {
   private int[] dictNoSortDimIdx;
   // indices for no-dict & sort dimension columns
   private int[] noDictSortDimIdx;
-  // indices for no-dict & no-sort dimension columns, including complex columns
+  // indices for no-dict & no-sort dimension columns, excluding complex/varchar columns
   private int[] noDictNoSortDimIdx;
   private int[] varcharDimIdx;
+  private int[] complexDimIdx;
   // indices for measure columns
   private int[] measureIdx;
 
@@ -73,12 +75,14 @@ public class SortStepRowHandler implements Serializable {
     this.noDictSortDimCnt = tableFieldStat.getNoDictSortDimCnt();
     this.noDictNoSortDimCnt = tableFieldStat.getNoDictNoSortDimCnt();
     this.varcharDimCnt = tableFieldStat.getVarcharDimCnt();
+    this.complexDimCnt = tableFieldStat.getComplexDimCnt();
     this.measureCnt = tableFieldStat.getMeasureCnt();
     this.dictSortDimIdx = tableFieldStat.getDictSortDimIdx();
     this.dictNoSortDimIdx = tableFieldStat.getDictNoSortDimIdx();
     this.noDictSortDimIdx = tableFieldStat.getNoDictSortDimIdx();
     this.noDictNoSortDimIdx = tableFieldStat.getNoDictNoSortDimIdx();
     this.varcharDimIdx = tableFieldStat.getVarcharDimIdx();
+    this.complexDimIdx = tableFieldStat.getComplexDimIdx();
     this.measureIdx = tableFieldStat.getMeasureIdx();
     this.dataTypes = tableFieldStat.getMeasureDataType();
   }
@@ -104,8 +108,8 @@ public class SortStepRowHandler implements Serializable {
     try {
       int[] dictDims
           = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
-      byte[][] nonDictArray
-          = new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt + this.varcharDimCnt][];
+      byte[][] nonDictArray = new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt
+                                       + this.varcharDimCnt + this.complexDimCnt ][];
       Object[] measures = new Object[this.measureCnt];
 
       // convert dict & data
@@ -131,6 +135,10 @@ public class SortStepRowHandler implements Serializable {
       for (int idx = 0; idx < this.varcharDimCnt; idx++) {
         nonDictArray[idxAcc++] = (byte[]) row[this.varcharDimIdx[idx]];
       }
+      // convert complex dims
+      for (int idx = 0; idx < this.complexDimCnt; idx++) {
+        nonDictArray[idxAcc++] = (byte[]) row[this.complexDimIdx[idx]];
+      }
 
       // convert measure data
       for (int idx = 0; idx < this.measureCnt; idx++) {
@@ -152,18 +160,17 @@ public class SortStepRowHandler implements Serializable {
    * @return 3-parted row
    */
   public Object[] convertIntermediateSortTempRowTo3Parted(IntermediateSortTempRow sortTempRow) {
-    int[] dictDims
-        = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
-    byte[][] noDictArray
-        = new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt + this.varcharDimCnt][];
+    int[] dictDims = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
+    byte[][] noDictArray = new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt
+                                    + this.varcharDimCnt + this.complexDimCnt][];
 
     int[] dictNoSortDims = new int[this.dictNoSortDimCnt];
-    byte[][] noDictNoSortAndVarcharDims
-        = new byte[this.noDictNoSortDimCnt + this.varcharDimCnt][];
+    byte[][] noDictNoSortAndVarcharComplexDims
+        = new byte[this.noDictNoSortDimCnt + this.varcharDimCnt + this.complexDimCnt][];
     Object[] measures = new Object[this.measureCnt];
 
-    sortTempRow.unpackNoSortFromBytes(dictNoSortDims, noDictNoSortAndVarcharDims, measures,
-        this.dataTypes, this.varcharDimCnt);
+    sortTempRow.unpackNoSortFromBytes(dictNoSortDims, noDictNoSortAndVarcharComplexDims, measures,
+        this.dataTypes, this.varcharDimCnt, this.complexDimCnt);
 
     // dict dims
     System.arraycopy(sortTempRow.getDictSortDims(), 0 , dictDims,
@@ -174,8 +181,8 @@ public class SortStepRowHandler implements Serializable {
     // no dict dims, including complex
     System.arraycopy(sortTempRow.getNoDictSortDims(), 0,
         noDictArray, 0, this.noDictSortDimCnt);
-    System.arraycopy(noDictNoSortAndVarcharDims, 0, noDictArray,
-        this.noDictSortDimCnt, this.noDictNoSortDimCnt + this.varcharDimCnt);
+    System.arraycopy(noDictNoSortAndVarcharComplexDims, 0, noDictArray,
+        this.noDictSortDimCnt, this.noDictNoSortDimCnt + this.varcharDimCnt + this.complexDimCnt);
 
     // measures are already here
 
@@ -445,6 +452,12 @@ public class SortStepRowHandler implements Serializable {
       rowBuffer.putInt(bytes.length);
       rowBuffer.put(bytes);
     }
+    // convert complex dims
+    for (int idx = 0; idx < this.complexDimCnt; idx++) {
+      byte[] bytes = (byte[]) row[this.complexDimIdx[idx]];
+      rowBuffer.putShort((short) bytes.length);
+      rowBuffer.put(bytes);
+    }
 
     // convert measure
     Object tmpValue;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/TableFieldStat.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/TableFieldStat.java b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/TableFieldStat.java
index 094bd83..353ddb4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/TableFieldStat.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/TableFieldStat.java
@@ -33,8 +33,10 @@ public class TableFieldStat implements Serializable {
   private int dictSortDimCnt = 0;
   private int dictNoSortDimCnt = 0;
   private int noDictSortDimCnt = 0;
-  // for columns that are no_dict_dim and no_sort_dim and complex, except the varchar dims
+  // for columns that are no_dict_dim and no_sort_dim, except complex/varchar dims
   private int noDictNoSortDimCnt = 0;
+  // for columns that are complex data type
+  private int complexDimCnt = 0;
   // for columns that are varchar data type
   private int varcharDimCnt = 0;
   // whether sort column is of dictionary type or not
@@ -49,17 +51,19 @@ public class TableFieldStat implements Serializable {
   private int[] dictNoSortDimIdx;
   // indices for no-dict & sort dimension columns
   private int[] noDictSortDimIdx;
-  // indices for no-dict & no-sort dimension columns, including complex columns
+  // indices for no-dict & no-sort dimension columns, excluding complex/varchar columns
   private int[] noDictNoSortDimIdx;
   // indices for varchar dimension columns
   private int[] varcharDimIdx;
+  // indices for varchar dimension columns
+  private int [] complexDimIdx;
   // indices for measure columns
   private int[] measureIdx;
 
   public TableFieldStat(SortParameters sortParameters) {
     int noDictDimCnt = sortParameters.getNoDictionaryCount();
-    int complexDimCnt = sortParameters.getComplexDimColCount();
     int dictDimCnt = sortParameters.getDimColCount() - noDictDimCnt;
+    this.complexDimCnt = sortParameters.getComplexDimColCount();
     this.isSortColNoDictFlags = sortParameters.getNoDictionarySortColumn();
     this.isVarcharDimFlags = sortParameters.getIsVarcharDimensionColumn();
     int sortColCnt = isSortColNoDictFlags.length;
@@ -83,8 +87,8 @@ public class TableFieldStat implements Serializable {
     this.dictSortDimIdx = new int[dictSortDimCnt];
     this.dictNoSortDimIdx = new int[dictDimCnt - dictSortDimCnt];
     this.noDictSortDimIdx = new int[noDictSortDimCnt];
-    this.noDictNoSortDimIdx = new int[noDictDimCnt + complexDimCnt - noDictSortDimCnt
-        - varcharDimCnt];
+    this.noDictNoSortDimIdx = new int[noDictDimCnt - noDictSortDimCnt - varcharDimCnt];
+    this.complexDimIdx = new int[complexDimCnt];
     this.varcharDimIdx = new int[varcharDimCnt];
     this.measureIdx = new int[measureCnt];
 
@@ -113,13 +117,13 @@ public class TableFieldStat implements Serializable {
       }
     }
     dictNoSortDimCnt = tmpDictNoSortCnt;
+    noDictNoSortDimCnt = tmpNoDictNoSortCnt;
 
     int base = isDimNoDictFlags.length;
-    // adding complex dimension columns
+    // indices for complex dimension columns
     for (int i = 0; i < complexDimCnt; i++) {
-      noDictNoSortDimIdx[tmpNoDictNoSortCnt++] = base + i;
+      complexDimIdx[i] = base + i;
     }
-    noDictNoSortDimCnt = tmpNoDictNoSortCnt;
 
     base += complexDimCnt;
     // indices for measure columns
@@ -144,6 +148,10 @@ public class TableFieldStat implements Serializable {
     return noDictNoSortDimCnt;
   }
 
+  public int getComplexDimCnt() {
+    return complexDimCnt;
+  }
+
   public int getVarcharDimCnt() {
     return varcharDimCnt;
   }
@@ -180,6 +188,10 @@ public class TableFieldStat implements Serializable {
     return noDictNoSortDimIdx;
   }
 
+  public int[] getComplexDimIdx() {
+    return complexDimIdx;
+  }
+
   public int[] getVarcharDimIdx() {
     return varcharDimIdx;
   }
@@ -196,12 +208,13 @@ public class TableFieldStat implements Serializable {
         && dictNoSortDimCnt == that.dictNoSortDimCnt
         && noDictSortDimCnt == that.noDictSortDimCnt
         && noDictNoSortDimCnt == that.noDictNoSortDimCnt
+        && complexDimCnt == that.complexDimCnt
         && varcharDimCnt == that.varcharDimCnt
         && measureCnt == that.measureCnt;
   }
 
   @Override public int hashCode() {
     return Objects.hash(dictSortDimCnt, dictNoSortDimCnt, noDictSortDimCnt,
-        noDictNoSortDimCnt, varcharDimCnt, measureCnt);
+        noDictNoSortDimCnt, complexDimCnt, varcharDimCnt, measureCnt);
   }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/11fb422d/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
index 7201305..26ee65a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
@@ -224,13 +224,12 @@ public class CarbonFactDataHandlerModel {
 
     // for dynamic page size in write step if varchar columns exist
     List<Integer> varcharDimIdxInNoDict = new ArrayList<>();
-    int dictDimCount = configuration.getDimensionCount() - configuration.getNoDictionaryCount();
     for (DataField dataField : configuration.getDataFields()) {
       CarbonColumn column = dataField.getColumn();
       if (!column.isComplex() && !dataField.hasDictionaryEncoding() &&
               column.getDataType() == DataTypes.VARCHAR) {
         // ordinal is set in CarbonTable.fillDimensionsAndMeasuresForTables()
-        varcharDimIdxInNoDict.add(column.getOrdinal() - dictDimCount);
+        varcharDimIdxInNoDict.add(column.getOrdinal() - simpleDimsCount);
       }
     }
 
@@ -319,7 +318,8 @@ public class CarbonFactDataHandlerModel {
     // for dynamic page size in write step if varchar columns exist
     List<Integer> varcharDimIdxInNoDict = new ArrayList<>();
     List<CarbonDimension> allDimensions = carbonTable.getDimensions();
-    int dictDimCount = allDimensions.size() - segmentProperties.getNumberOfNoDictionaryDimension();
+    int dictDimCount = allDimensions.size() - segmentProperties.getNumberOfNoDictionaryDimension()
+            - segmentProperties.getComplexDimensions().size();
     for (CarbonDimension dim : allDimensions) {
       if (!dim.isComplex() && !dim.hasEncoding(Encoding.DICTIONARY) &&
           dim.getDataType() == DataTypes.VARCHAR) {


[14/50] [abbrv] carbondata git commit: [CARBONDATA-2606][Complex DataType Enhancements]Fix Null result if projection column have null primitive column and struct

Posted by ja...@apache.org.
[CARBONDATA-2606][Complex DataType Enhancements]Fix Null result if projection column have null primitive column and struct

Problem:
In case if the actual value of the primitive data type is null, by PR#2489, we are moving all the null values to the end of the collected row without considering the data type.

Solution:
Place null in the end of output iff the null value is of complex primitive column.

This closes #2559


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/9a3b0b16
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/9a3b0b16
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/9a3b0b16

Branch: refs/heads/external-format
Commit: 9a3b0b16b79671a5dd9731ccb539d722c14d7c6f
Parents: 790cde8
Author: ajantha-bhat <aj...@gmail.com>
Authored: Wed Jul 25 19:21:02 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Tue Jul 31 21:19:39 2018 +0530

----------------------------------------------------------------------
 .../impl/DictionaryBasedResultCollector.java    | 57 +++++++++++++++-----
 .../complexType/TestComplexDataType.scala       |  8 +++
 2 files changed, 53 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/9a3b0b16/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
index 3184d80..1faf2fd 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java
@@ -29,6 +29,7 @@ import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionary
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.scan.complextypes.StructQueryType;
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
 import org.apache.carbondata.core.scan.filter.GenericQueryType;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
@@ -112,11 +113,27 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
    */
   @Override
   public List<Object[]> collectResultInRow(BlockletScannedResult scannedResult, int batchSize) {
-
     // scan the record and add to list
     List<Object[]> listBasedResult = new ArrayList<>(batchSize);
     int rowCounter = 0;
-
+    boolean isStructQueryType = false;
+    for (Object obj : scannedResult.complexParentIndexToQueryMap.values()) {
+      if (obj instanceof StructQueryType) {
+        //if any one of the map elements contains struct,need to shift rows if contains null.
+        isStructQueryType = true;
+        break;
+      }
+    }
+    boolean[] isComplexChildColumn = null;
+    if (isStructQueryType) {
+      // need to identify complex child columns for shifting rows if contains null
+      isComplexChildColumn = new boolean[queryDimensions.length + queryMeasures.length];
+      for (ProjectionDimension dimension : queryDimensions) {
+        if (null != dimension.getDimension().getComplexParentDimension()) {
+          isComplexChildColumn[dimension.getOrdinal()] = true;
+        }
+      }
+    }
     while (scannedResult.hasNext() && rowCounter < batchSize) {
       Object[] row = new Object[queryDimensions.length + queryMeasures.length];
       if (isDimensionExists) {
@@ -140,16 +157,8 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
         continue;
       }
       fillMeasureData(scannedResult, row);
-      if (scannedResult.complexParentIndexToQueryMap.toString().contains("StructQueryType")) {
-        // If a : <b,c> and d : <e,f> are two struct and if a.b,a.c,d.e is given in the
-        // projection list,then object array will contain a,null,d as result, because for a.b,
-        // a will be filled and for a.c null will be placed.
-        // Instead place null in the end of object array and send a,d,null as result.
-        int count = 0;
-        for (int j = 0; j < row.length; j++) {
-          if (row[j] != null) row[count++] = row[j];
-        }
-        while (count < row.length) row[count++] = null;
+      if (isStructQueryType) {
+        shiftNullForStruct(row, isComplexChildColumn);
       }
       listBasedResult.add(row);
       rowCounter++;
@@ -157,6 +166,30 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect
     return listBasedResult;
   }
 
+  /**
+   * shift the complex column null to the end
+   *
+   * @param row
+   * @param isComplexChildColumn
+   */
+  private void shiftNullForStruct(Object[] row, boolean[] isComplexChildColumn) {
+    int count = 0;
+    // If a : <b,c> and d : <e,f> are two struct and if a.b,a.c,d.e is given in the
+    // projection list,then object array will contain a,null,d as result, because for a.b,
+    // a will be filled and for a.c null will be placed.
+    // Instead place null in the end of object array and send a,d,null as result.
+    for (int j = 0; j < row.length; j++) {
+      if (null == row[j] && !isComplexChildColumn[j]) {
+        // if it is a primitive column, don't shift the null to the end.
+        row[count++] = null;
+      } else if (null != row[j]) {
+        row[count++] = row[j];
+      }
+    }
+    // fill the skipped content
+    while (count < row.length) row[count++] = null;
+  }
+
   private void fillComplexColumnDataBufferForThisRow() {
     mergedComplexDimensionDataMap.clear();
     int noDictionaryComplexColumnIndex = 0;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/9a3b0b16/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index 2b3cfc0..1451f7b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -1009,4 +1009,12 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
       .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
         CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
   }
+  test("test null values in primitive data type and select all data types including complex data type") {
+    sql("DROP TABLE IF EXISTS table1")
+    sql(
+      "create table table1 (id int, name string, structField struct<intval:int, stringval:string>) stored by 'carbondata'")
+    sql("insert into table1 values(null,'aaa','23$bb')")
+    checkAnswer(sql("select * from table1"),Seq(Row(null,"aaa", Row(23,"bb"))))
+  }
+
 }


[05/50] [abbrv] carbondata git commit: [HOTFIX] CreateDataMapPost Event was skipped in case of preaggregate datamap

Posted by ja...@apache.org.
[HOTFIX] CreateDataMapPost Event was skipped in case of preaggregate datamap

CreateDataMapPost Event was skipped in case of preaggregate datamap

This closes #2562


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/0e45f3ad
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/0e45f3ad
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/0e45f3ad

Branch: refs/heads/external-format
Commit: 0e45f3ad929a6509e8b905b947614225c18b52f0
Parents: f5d3c17
Author: Jatin <ja...@knoldus.in>
Authored: Thu Jul 26 00:42:50 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Sun Jul 29 19:53:23 2018 +0530

----------------------------------------------------------------------
 .../org/apache/carbondata/events/DataMapEvents.scala  |  2 +-
 .../command/datamap/CarbonCreateDataMapCommand.scala  | 14 +++++++-------
 2 files changed, 8 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/0e45f3ad/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
index 72c980c..e601633 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/events/DataMapEvents.scala
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
  * example: bloom datamap, Lucene datamap
  */
 case class CreateDataMapPostExecutionEvent(sparkSession: SparkSession,
-    storePath: String, tableIdentifier: TableIdentifier)
+    storePath: String, tableIdentifier: Option[TableIdentifier], dmProviderName: String)
   extends Event with CreateDataMapEventsInfo
 
 /**

http://git-wip-us.apache.org/repos/asf/carbondata/blob/0e45f3ad/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 336793e..c40dcb0 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -97,6 +97,8 @@ case class CarbonCreateDataMapCommand(
         "For this datamap, main table is required. Use `CREATE DATAMAP ... ON TABLE ...` ")
     }
     dataMapProvider = DataMapManager.get.getDataMapProvider(mainTable, dataMapSchema, sparkSession)
+    val systemFolderLocation: String = CarbonProperties.getInstance().getSystemFolderLocation
+    val operationContext: OperationContext = new OperationContext()
 
     // If it is index datamap, check whether the column has datamap created already
     dataMapProvider match {
@@ -128,8 +130,6 @@ case class CarbonCreateDataMapCommand(
           }
         }
 
-        val operationContext: OperationContext = new OperationContext()
-        val systemFolderLocation: String = CarbonProperties.getInstance().getSystemFolderLocation
         val createDataMapPreExecutionEvent: CreateDataMapPreExecutionEvent =
           new CreateDataMapPreExecutionEvent(sparkSession,
             systemFolderLocation, tableIdentifier.get)
@@ -137,11 +137,6 @@ case class CarbonCreateDataMapCommand(
           operationContext)
         dataMapProvider.initMeta(queryString.orNull)
         DataMapStatusManager.disableDataMap(dataMapName)
-        val createDataMapPostExecutionEvent: CreateDataMapPostExecutionEvent =
-          new CreateDataMapPostExecutionEvent(sparkSession,
-            systemFolderLocation, tableIdentifier.get)
-        OperationListenerBus.getInstance().fireEvent(createDataMapPostExecutionEvent,
-          operationContext)
       case _ =>
         if (deferredRebuild) {
           throw new MalformedDataMapCommandException(
@@ -149,6 +144,11 @@ case class CarbonCreateDataMapCommand(
         }
         dataMapProvider.initMeta(queryString.orNull)
     }
+    val createDataMapPostExecutionEvent: CreateDataMapPostExecutionEvent =
+      new CreateDataMapPostExecutionEvent(sparkSession,
+        systemFolderLocation, tableIdentifier, dmProviderName)
+    OperationListenerBus.getInstance().fireEvent(createDataMapPostExecutionEvent,
+      operationContext)
     val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
     LOGGER.audit(s"DataMap $dataMapName successfully added")
     Seq.empty


[28/50] [abbrv] carbondata git commit: [CARBONDATA-2753][Compatibility] Merge Index file not getting created with blocklet information for old store

Posted by ja...@apache.org.
[CARBONDATA-2753][Compatibility] Merge Index file not getting created with blocklet information for old store

Problem
Merge Index file not getting created with blocklet information for old store

Analysis
In legacy store (store <= 1.1 version), blocklet information is not written in the carbon Index files. When merge Index is created using the Alter DDL command on old store then merge Index file should be created with blocklet information which is as per the new store. This is not happening because the flag to read the carbondata file footer is not passed as true from Alter DDL command flow.

Fix
Pass the flag to read carbondataFileFooter as true while creating the merge Index file using Alter DDL command

This closes #2593


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/625a2efa
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/625a2efa
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/625a2efa

Branch: refs/heads/external-format
Commit: 625a2efa752234f04f227b2800181d309e8b3bb4
Parents: 77642cf
Author: manishgupta88 <to...@gmail.com>
Authored: Wed Aug 1 14:24:52 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Aug 2 10:04:25 2018 +0530

----------------------------------------------------------------------
 .../blockletindex/SegmentIndexFileStore.java      |  2 +-
 .../sql/events/MergeIndexEventListener.scala      | 18 ++++++++++++------
 2 files changed, 13 insertions(+), 7 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/625a2efa/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
index 16910ac..c4e7f7a 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/SegmentIndexFileStore.java
@@ -405,7 +405,7 @@ public class SegmentIndexFileStore {
       // get the index header
       org.apache.carbondata.format.IndexHeader indexHeader = indexReader.readIndexHeader();
       DataFileFooterConverter fileFooterConverter = new DataFileFooterConverter();
-      String filePath = indexFile.getCanonicalPath();
+      String filePath = FileFactory.getUpdatedFilePath(indexFile.getCanonicalPath());
       String parentPath =
           filePath.substring(0, filePath.lastIndexOf(CarbonCommonConstants.FILE_SEPARATOR));
       while (indexReader.hasNext()) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/625a2efa/integration/spark2/src/main/scala/org/apache/spark/sql/events/MergeIndexEventListener.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/events/MergeIndexEventListener.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/events/MergeIndexEventListener.scala
index dff3424..5bff9aa 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/events/MergeIndexEventListener.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/events/MergeIndexEventListener.scala
@@ -118,12 +118,18 @@ class MergeIndexEventListener extends OperationEventListener with Logging {
                   .put(loadMetadataDetails.getLoadName,
                     String.valueOf(loadMetadataDetails.getLoadStartTime))
               })
-              CommonUtil.mergeIndexFiles(sparkSession.sparkContext,
-                validSegmentIds,
-                segmentFileNameMap,
-                carbonMainTable.getTablePath,
-                carbonMainTable,
-                true)
+              // in case of merge index file creation using Alter DDL command
+              // readFileFooterFromCarbonDataFile flag should be true. This flag is check for legacy
+              // store (store <= 1.1 version) and create merge Index file as per new store so that
+              // old store is also upgraded to new store
+              CommonUtil.mergeIndexFiles(
+                sparkContext = sparkSession.sparkContext,
+                segmentIds = validSegmentIds,
+                segmentFileNameToSegmentIdMap = segmentFileNameMap,
+                tablePath = carbonMainTable.getTablePath,
+                carbonTable = carbonMainTable,
+                mergeIndexProperty = true,
+                readFileFooterFromCarbonDataFile = true)
               // clear Block dataMap Cache
               clearBlockDataMapCache(carbonMainTable, validSegmentIds)
               val requestMessage = "Compaction request completed for table "


[27/50] [abbrv] carbondata git commit: [CARBONDATA-2478] Added datamap-developer-guide.md file to Readme.md

Posted by ja...@apache.org.
[CARBONDATA-2478] Added datamap-developer-guide.md file to Readme.md

[CARBONDATA-2478] Added datamap-developer-guide.md file to Readme.md

This closes #2305


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/77642cff
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/77642cff
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/77642cff

Branch: refs/heads/external-format
Commit: 77642cfff219687f621a2d2d24fbcbdb7e8d8c52
Parents: 11fb422
Author: vandana <va...@gmail.com>
Authored: Mon May 14 15:46:19 2018 +0530
Committer: chenliang613 <ch...@huawei.com>
Committed: Thu Aug 2 11:10:57 2018 +0800

----------------------------------------------------------------------
 README.md | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/77642cff/README.md
----------------------------------------------------------------------
diff --git a/README.md b/README.md
index d8f7226..d76b080 100644
--- a/README.md
+++ b/README.md
@@ -37,9 +37,9 @@ Spark2.2:
 </a>
 ## Features
 CarbonData file format is a columnar store in HDFS, it has many features that a modern columnar format has, such as splittable, compression schema ,complex data type etc, and CarbonData has following unique features:
-* Stores data along with index: it can significantly accelerate query performance and reduces the I/O scans and CPU resources, where there are filters in the query.  CarbonData index consists of multiple level of indices, a processing framework can leverage this index to reduce the task it needs to schedule and process, and it can also do skip scan in more finer grain unit (called blocklet) in task side scanning instead of scanning the whole file. 
-* Operable encoded data :Through supporting efficient compression and global encoding schemes, can query on compressed/encoded data, the data can be converted just before returning the results to the users, which is "late materialized". 
-* Supports for various use cases with one single Data format : like interactive OLAP-style query, Sequential Access (big scan), Random Access (narrow scan). 
+* Stores data along with index: it can significantly accelerate query performance and reduces the I/O scans and CPU resources, where there are filters in the query.  CarbonData index consists of multiple level of indices, a processing framework can leverage this index to reduce the task it needs to schedule and process, and it can also do skip scan in more finer grain unit (called blocklet) in task side scanning instead of scanning the whole file.
+* Operable encoded data :Through supporting efficient compression and global encoding schemes, can query on compressed/encoded data, the data can be converted just before returning the results to the users, which is "late materialized".
+* Supports for various use cases with one single Data format : like interactive OLAP-style query, Sequential Access (big scan), Random Access (narrow scan).
 
 ## Building CarbonData
 CarbonData is built using Apache Maven, to [build CarbonData](https://github.com/apache/carbondata/blob/master/build)
@@ -53,6 +53,7 @@ CarbonData is built using Apache Maven, to [build CarbonData](https://github.com
 * [Configuring Carbondata](https://github.com/apache/carbondata/blob/master/docs/configuration-parameters.md)
 * [Streaming Ingestion](https://github.com/apache/carbondata/blob/master/docs/streaming-guide.md)
 * [SDK Guide](https://github.com/apache/carbondata/blob/master/docs/sdk-guide.md)
+* [DataMap Developer Guide](https://github.com/apache/carbondata/blob/master/docs/datamap-developer-guide.md)
 * [CarbonData Pre-aggregate DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/preaggregate-datamap-guide.md)
 * [CarbonData Timeseries DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/timeseries-datamap-guide.md)
 * [FAQ](https://github.com/apache/carbondata/blob/master/docs/faq.md)


[31/50] [abbrv] carbondata git commit: [HOTFIX][PR 2575] Fixed modular plan creation only if valid datamaps are available

Posted by ja...@apache.org.
[HOTFIX][PR 2575] Fixed modular plan creation only if valid datamaps are available

update query is failing in spark-2.2 cluster if mv jars are available because catalogs
are not empty if datamap are created for other table also and returns true from isValidPlan() inside MVAnalyzerRule.

This closes #2579


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b65bf9bc
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b65bf9bc
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b65bf9bc

Branch: refs/heads/external-format
Commit: b65bf9bc7104cbcfad1277c99090853d9e7b0386
Parents: f52c133
Author: ravipesala <ra...@gmail.com>
Authored: Mon Jul 30 15:00:00 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Aug 2 16:52:21 2018 +0530

----------------------------------------------------------------------
 .../carbondata/core/datamap/DataMapCatalog.java |  4 +-
 .../carbondata/mv/datamap/MVAnalyzerRule.scala  | 57 ++++++++++++++++----
 .../mv/rewrite/SummaryDatasetCatalog.scala      |  9 +++-
 .../mv/rewrite/MVCreateTestCase.scala           |  4 ++
 4 files changed, 60 insertions(+), 14 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b65bf9bc/core/src/main/java/org/apache/carbondata/core/datamap/DataMapCatalog.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapCatalog.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapCatalog.java
index 89f2838..5dd4871 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapCatalog.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapCatalog.java
@@ -38,10 +38,10 @@ public interface DataMapCatalog<T> {
   void unregisterSchema(String dataMapName);
 
   /**
-   * List all registered schema catalogs
+   * List all registered valid schema catalogs
    * @return
    */
-  T[] listAllSchema();
+  T[] listAllValidSchema();
 
   /**
    * It reloads/removes all registered schema catalogs

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b65bf9bc/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
index 483780f..9e0f8e5 100644
--- a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
@@ -16,8 +16,11 @@
  */
 package org.apache.carbondata.mv.datamap
 
+import scala.collection.JavaConverters._
+
 import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute}
+import org.apache.spark.sql.catalyst.catalog.CatalogTable
 import org.apache.spark.sql.catalyst.expressions.{Alias, ScalaUDF}
 import org.apache.spark.sql.catalyst.plans.logical.{Command, DeserializeToObject, LogicalPlan}
 import org.apache.spark.sql.catalyst.rules.Rule
@@ -79,27 +82,59 @@ class MVAnalyzerRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
     }
   }
 
+  /**
+   * Whether the plan is valid for doing modular plan matching and datamap replacing.
+   */
   def isValidPlan(plan: LogicalPlan, catalog: SummaryDatasetCatalog): Boolean = {
-    !plan.isInstanceOf[Command] && !isDataMapExists(plan, catalog.listAllSchema()) &&
-    !plan.isInstanceOf[DeserializeToObject]
+    if (!plan.isInstanceOf[Command]  && !plan.isInstanceOf[DeserializeToObject]) {
+      val catalogs = extractCatalogs(plan)
+      !isDataMapReplaced(catalog.listAllValidSchema(), catalogs) &&
+      isDataMapExists(catalog.listAllValidSchema(), catalogs)
+    } else {
+      false
+    }
+
   }
   /**
    * Check whether datamap table already updated in the query.
    *
-   * @param plan
-   * @param mvs
-   * @return
+   * @param mvdataSetArray Array of available mvdataset which include modular plans
+   * @return Boolean whether already datamap replaced in the plan or not
    */
-  def isDataMapExists(plan: LogicalPlan, mvs: Array[SummaryDataset]): Boolean = {
-    val catalogs = plan collect {
-      case l: LogicalRelation => l.catalogTable
-    }
-    catalogs.isEmpty || catalogs.exists { c =>
-      mvs.exists { mv =>
+  def isDataMapReplaced(
+      mvdataSetArray: Array[SummaryDataset],
+      catalogs: Seq[Option[CatalogTable]]): Boolean = {
+    catalogs.exists { c =>
+      mvdataSetArray.exists { mv =>
         val identifier = mv.dataMapSchema.getRelationIdentifier
         identifier.getTableName.equals(c.get.identifier.table) &&
         identifier.getDatabaseName.equals(c.get.database)
       }
     }
   }
+
+  /**
+   * Check whether any suitable datamaps(like datamap which parent tables are present in the plan)
+   * exists for this plan.
+   *
+   * @param mvs
+   * @return
+   */
+  def isDataMapExists(mvs: Array[SummaryDataset], catalogs: Seq[Option[CatalogTable]]): Boolean = {
+    catalogs.exists { c =>
+      mvs.exists { mv =>
+        mv.dataMapSchema.getParentTables.asScala.exists { identifier =>
+          identifier.getTableName.equals(c.get.identifier.table) &&
+          identifier.getDatabaseName.equals(c.get.database)
+        }
+      }
+    }
+  }
+
+  private def extractCatalogs(plan: LogicalPlan): Seq[Option[CatalogTable]] = {
+    val catalogs = plan collect {
+      case l: LogicalRelation => l.catalogTable
+    }
+    catalogs
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b65bf9bc/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
index 210ff65..026d6b7 100644
--- a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/rewrite/SummaryDatasetCatalog.scala
@@ -152,7 +152,14 @@ private[mv] class SummaryDatasetCatalog(sparkSession: SparkSession)
   }
 
 
-  override def listAllSchema(): Array[SummaryDataset] = summaryDatasets.toArray
+  override def listAllValidSchema(): Array[SummaryDataset] = {
+    val statusDetails = DataMapStatusManager.getEnabledDataMapStatusDetails
+    // Only select the enabled datamaps for the query.
+    val enabledDataSets = summaryDatasets.filter { p =>
+      statusDetails.exists(_.getDataMapName.equalsIgnoreCase(p.dataMapSchema.getDataMapName))
+    }
+    enabledDataSets.toArray
+  }
 
   /**
    * API for test only

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b65bf9bc/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
index 0b96202..9f834a9 100644
--- a/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
+++ b/datamap/mv/core/src/test/scala/org/apache/carbondata/mv/rewrite/MVCreateTestCase.scala
@@ -889,7 +889,10 @@ class MVCreateTestCase extends QueryTest with BeforeAndAfterAll {
   test("basic scenario") {
 
     sql("drop table if exists mvtable1")
+    sql("drop table if exists mvtable2")
     sql("create table mvtable1(name string,age int,salary int) stored by 'carbondata'")
+    sql("create table mvtable2(name string,age int,salary int) stored by 'carbondata'")
+    sql("create datamap MV11 using 'mv' as select name from mvtable2")
     sql(" insert into mvtable1 select 'n1',12,12")
     sql("  insert into mvtable1 select 'n1',12,12")
     sql(" insert into mvtable1 select 'n3',12,12")
@@ -897,6 +900,7 @@ class MVCreateTestCase extends QueryTest with BeforeAndAfterAll {
     sql("update mvtable1 set(name) = ('updatedName')").show()
     checkAnswer(sql("select count(*) from mvtable1 where name = 'updatedName'"),Seq(Row(4)))
     sql("drop table if exists mvtable1")
+    sql("drop table if exists mvtable2")
   }
 
   def verifyMVDataMap(logicalPlan: LogicalPlan, dataMapName: String): Boolean = {


[46/50] [abbrv] carbondata git commit: [CARBONDATA-2836]Fixed data loading performance issue

Posted by ja...@apache.org.
[CARBONDATA-2836]Fixed data loading performance issue

Problem: Data Loading is taking more time when number of records are high(3.5 billion) records

Root Cause: In case of Final merge sort temp row conversion is done in main thread because of this final step processing became slower.

Solution: Move conversion logic to pre-fetch thread for parallel processing. This only done for single merge, intermediate merge no need to convert no sort columns.

This closes #2611


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f27efb3e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f27efb3e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f27efb3e

Branch: refs/heads/external-format
Commit: f27efb3e3757619ebdd822da8b6ab9737602de7b
Parents: b9e5106
Author: kumarvishal09 <ku...@gmail.com>
Authored: Mon Aug 6 19:00:27 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Tue Aug 7 17:02:07 2018 +0530

----------------------------------------------------------------------
 .../loading/row/IntermediateSortTempRow.java    | 109 ++---------
 .../loading/sort/SortStepRowHandler.java        | 194 ++++++++++++++++---
 .../sort/unsafe/UnsafeCarbonRowPage.java        |  14 +-
 .../holder/UnsafeFinalMergePageHolder.java      |   3 +
 .../unsafe/holder/UnsafeInmemoryHolder.java     |   1 +
 .../holder/UnsafeSortTempFileChunkHolder.java   |  19 +-
 .../merger/UnsafeIntermediateFileMerger.java    |   3 +-
 .../UnsafeSingleThreadFinalSortFilesMerger.java |   5 +-
 .../sort/sortdata/IntermediateFileMerger.java   |   3 +-
 .../SingleThreadFinalSortFilesMerger.java       |   2 +-
 .../sort/sortdata/SortTempFileChunkHolder.java  |  19 +-
 11 files changed, 234 insertions(+), 138 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java b/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
index 47b419e..1ad7879 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/row/IntermediateSortTempRow.java
@@ -16,12 +16,6 @@
  */
 package org.apache.carbondata.processing.loading.row;
 
-import java.nio.ByteBuffer;
-
-import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
-import org.apache.carbondata.core.util.DataTypeUtil;
-
 /**
  * During sort procedure, each row will be written to sort temp file in this logic format.
  * an intermediate sort temp row consists 3 parts:
@@ -30,7 +24,16 @@ import org.apache.carbondata.core.util.DataTypeUtil;
 public class IntermediateSortTempRow {
   private int[] dictSortDims;
   private byte[][] noDictSortDims;
+  /**
+   * this will be used for intermediate merger when
+   * no sort field and measure field will not be
+   * used for sorting
+   */
   private byte[] noSortDimsAndMeasures;
+  /**
+   * for final merger keep the measures
+   */
+  private Object[] measures;
 
   public IntermediateSortTempRow(int[] dictSortDims, byte[][] noDictSortDims,
       byte[] noSortDimsAndMeasures) {
@@ -39,10 +42,21 @@ public class IntermediateSortTempRow {
     this.noSortDimsAndMeasures = noSortDimsAndMeasures;
   }
 
+  public IntermediateSortTempRow(int[] dictSortDims, byte[][] noDictSortDims,
+      Object[] measures) {
+    this.dictSortDims = dictSortDims;
+    this.noDictSortDims = noDictSortDims;
+    this.measures = measures;
+  }
+
   public int[] getDictSortDims() {
     return dictSortDims;
   }
 
+  public Object[] getMeasures() {
+    return measures;
+  }
+
   public byte[][] getNoDictSortDims() {
     return noDictSortDims;
   }
@@ -50,87 +64,4 @@ public class IntermediateSortTempRow {
   public byte[] getNoSortDimsAndMeasures() {
     return noSortDimsAndMeasures;
   }
-
-  /**
-   * deserialize from bytes array to get the no sort fields
-   * @param outDictNoSort stores the dict & no-sort fields
-   * @param outNoDictNoSort stores all no-dict & no-sort fields,
-   *                        including complex and varchar fields
-   * @param outMeasures stores the measure fields
-   * @param dataTypes data type for the measure
-   * @param varcharDimCnt number of varchar column
-   * @param complexDimCnt number of complex column
-   */
-  public void unpackNoSortFromBytes(int[] outDictNoSort, byte[][] outNoDictNoSort,
-      Object[] outMeasures, DataType[] dataTypes, int varcharDimCnt, int complexDimCnt) {
-    ByteBuffer rowBuffer = ByteBuffer.wrap(noSortDimsAndMeasures);
-    // read dict_no_sort
-    int dictNoSortCnt = outDictNoSort.length;
-    for (int i = 0; i < dictNoSortCnt; i++) {
-      outDictNoSort[i] = rowBuffer.getInt();
-    }
-
-    // read no_dict_no_sort
-    int noDictNoSortCnt = outNoDictNoSort.length - varcharDimCnt - complexDimCnt;
-    for (int i = 0; i < noDictNoSortCnt; i++) {
-      short len = rowBuffer.getShort();
-      byte[] bytes = new byte[len];
-      rowBuffer.get(bytes);
-      outNoDictNoSort[i] = bytes;
-    }
-
-    // read varchar dims
-    for (int i = 0; i < varcharDimCnt; i++) {
-      int len = rowBuffer.getInt();
-      byte[] bytes = new byte[len];
-      rowBuffer.get(bytes);
-      outNoDictNoSort[i + noDictNoSortCnt] = bytes;
-    }
-
-    // read complex dims
-    for (int i = 0; i < complexDimCnt; i++) {
-      short len = rowBuffer.getShort();
-      byte[] bytes = new byte[len];
-      rowBuffer.get(bytes);
-      outNoDictNoSort[i + noDictNoSortCnt + varcharDimCnt] = bytes;
-    }
-
-    // read measure
-    int measureCnt = outMeasures.length;
-    DataType tmpDataType;
-    Object tmpContent;
-    for (short idx = 0 ; idx < measureCnt; idx++) {
-      if ((byte) 0 == rowBuffer.get()) {
-        outMeasures[idx] = null;
-        continue;
-      }
-
-      tmpDataType = dataTypes[idx];
-      if (DataTypes.BOOLEAN == tmpDataType) {
-        if ((byte) 1 == rowBuffer.get()) {
-          tmpContent = true;
-        } else {
-          tmpContent = false;
-        }
-      } else if (DataTypes.SHORT == tmpDataType) {
-        tmpContent = rowBuffer.getShort();
-      } else if (DataTypes.INT == tmpDataType) {
-        tmpContent = rowBuffer.getInt();
-      } else if (DataTypes.LONG == tmpDataType) {
-        tmpContent = rowBuffer.getLong();
-      } else if (DataTypes.DOUBLE == tmpDataType) {
-        tmpContent = rowBuffer.getDouble();
-      } else if (DataTypes.isDecimal(tmpDataType)) {
-        short len = rowBuffer.getShort();
-        byte[] decimalBytes = new byte[len];
-        rowBuffer.get(decimalBytes);
-        tmpContent = DataTypeUtil.byteToBigDecimal(decimalBytes);
-      } else {
-        throw new IllegalArgumentException("Unsupported data type: " + tmpDataType);
-      }
-      outMeasures[idx] = tmpContent;
-    }
-  }
-
-
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
index 0118e4d..f6fc3ca 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
@@ -160,49 +160,62 @@ public class SortStepRowHandler implements Serializable {
    * @return 3-parted row
    */
   public Object[] convertIntermediateSortTempRowTo3Parted(IntermediateSortTempRow sortTempRow) {
-    int[] dictDims = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
-    byte[][] noDictArray = new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt
-                                    + this.varcharDimCnt + this.complexDimCnt][];
-
-    int[] dictNoSortDims = new int[this.dictNoSortDimCnt];
-    byte[][] noDictNoSortAndVarcharComplexDims
-        = new byte[this.noDictNoSortDimCnt + this.varcharDimCnt + this.complexDimCnt][];
-    Object[] measures = new Object[this.measureCnt];
-
-    sortTempRow.unpackNoSortFromBytes(dictNoSortDims, noDictNoSortAndVarcharComplexDims, measures,
-        this.dataTypes, this.varcharDimCnt, this.complexDimCnt);
+    Object[] out = new Object[3];
+    NonDictionaryUtil
+        .prepareOutObj(out, sortTempRow.getDictSortDims(), sortTempRow.getNoDictSortDims(),
+            sortTempRow.getMeasures());
+    return out;
+  }
 
-    // dict dims
-    System.arraycopy(sortTempRow.getDictSortDims(), 0 , dictDims,
-        0, this.dictSortDimCnt);
-    System.arraycopy(dictNoSortDims, 0, dictDims,
-        this.dictSortDimCnt, this.dictNoSortDimCnt);;
+  /**
+   * Read intermediate sort temp row from InputStream.
+   * This method is used during the intermediate merge sort phase to read row from sort temp file.
+   *
+   * @param inputStream input stream
+   * @return a row that contains three parts
+   * @throws IOException if error occrus while reading from stream
+   */
+  public IntermediateSortTempRow readWithoutNoSortFieldConvert(
+      DataInputStream inputStream) throws IOException {
+    int[] dictSortDims = new int[this.dictSortDimCnt];
+    byte[][] noDictSortDims = new byte[this.noDictSortDimCnt][];
 
-    // no dict dims, including complex
-    System.arraycopy(sortTempRow.getNoDictSortDims(), 0,
-        noDictArray, 0, this.noDictSortDimCnt);
-    System.arraycopy(noDictNoSortAndVarcharComplexDims, 0, noDictArray,
-        this.noDictSortDimCnt, this.noDictNoSortDimCnt + this.varcharDimCnt + this.complexDimCnt);
+    // read dict & sort dim data
+    for (int idx = 0; idx < this.dictSortDimCnt; idx++) {
+      dictSortDims[idx] = inputStream.readInt();
+    }
 
-    // measures are already here
+    // read no-dict & sort data
+    for (int idx = 0; idx < this.noDictSortDimCnt; idx++) {
+      short len = inputStream.readShort();
+      byte[] bytes = new byte[len];
+      inputStream.readFully(bytes);
+      noDictSortDims[idx] = bytes;
+    }
 
-    Object[] holder = new Object[3];
-    NonDictionaryUtil.prepareOutObj(holder, dictDims, noDictArray, measures);
-    return holder;
+    // read no-dict dims & measures
+    int len = inputStream.readInt();
+    byte[] noSortDimsAndMeasures = new byte[len];
+    inputStream.readFully(noSortDimsAndMeasures);
+    // keeping no sort fields and measure in pack byte array as it will not participate in sort
+    return new IntermediateSortTempRow(dictSortDims, noDictSortDims, noSortDimsAndMeasures);
   }
 
   /**
    * Read intermediate sort temp row from InputStream.
-   * This method is used during the merge sort phase to read row from sort temp file.
+   * This method is used during the final merge sort phase to read row from sort temp file and
+   * merged sort temp file.
    *
    * @param inputStream input stream
    * @return a row that contains three parts
    * @throws IOException if error occrus while reading from stream
    */
-  public IntermediateSortTempRow readIntermediateSortTempRowFromInputStream(
+  public IntermediateSortTempRow readWithNoSortFieldConvert(
       DataInputStream inputStream) throws IOException {
-    int[] dictSortDims = new int[this.dictSortDimCnt];
-    byte[][] noDictSortDims = new byte[this.noDictSortDimCnt][];
+    int[] dictSortDims = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
+    byte[][] noDictSortDims =
+        new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt + this.varcharDimCnt
+            + this.complexDimCnt][];
 
     // read dict & sort dim data
     for (int idx = 0; idx < this.dictSortDimCnt; idx++) {
@@ -221,8 +234,80 @@ public class SortStepRowHandler implements Serializable {
     int len = inputStream.readInt();
     byte[] noSortDimsAndMeasures = new byte[len];
     inputStream.readFully(noSortDimsAndMeasures);
+    Object[] measure = new Object[this.measureCnt];
+    // unpack the no sort fields and measure fields
+    unpackNoSortFromBytes(noSortDimsAndMeasures, dictSortDims, noDictSortDims, measure);
+    return new IntermediateSortTempRow(dictSortDims, noDictSortDims,measure);
+  }
 
-    return new IntermediateSortTempRow(dictSortDims, noDictSortDims, noSortDimsAndMeasures);
+  private void unpackNoSortFromBytes(byte[] noSortDimsAndMeasures, int[] dictDims,
+      byte[][] noDictDims, Object[] measures) {
+    ByteBuffer rowBuffer = ByteBuffer.wrap(noSortDimsAndMeasures);
+    // read dict_no_sort
+    for (int i = dictSortDimCnt; i < dictDims.length; i++) {
+      dictDims[i] = rowBuffer.getInt();
+    }
+
+    int noDictIndex = noDictSortDimCnt;
+    // read no_dict_no_sort
+    for (int i = 0; i < noDictNoSortDimCnt; i++) {
+      short len = rowBuffer.getShort();
+      byte[] bytes = new byte[len];
+      rowBuffer.get(bytes);
+      noDictDims[noDictIndex++] = bytes;
+    }
+
+    // read varchar dims
+    for (int i = 0; i < varcharDimCnt; i++) {
+      int len = rowBuffer.getInt();
+      byte[] bytes = new byte[len];
+      rowBuffer.get(bytes);
+      noDictDims[noDictIndex++] = bytes;
+    }
+
+    // read complex dims
+    for (int i = 0; i < complexDimCnt; i++) {
+      short len = rowBuffer.getShort();
+      byte[] bytes = new byte[len];
+      rowBuffer.get(bytes);
+      noDictDims[noDictIndex++] = bytes;
+    }
+
+    // read measure
+    int measureCnt = measures.length;
+    DataType tmpDataType;
+    Object tmpContent;
+    for (short idx = 0 ; idx < measureCnt; idx++) {
+      if ((byte) 0 == rowBuffer.get()) {
+        measures[idx] = null;
+        continue;
+      }
+
+      tmpDataType = dataTypes[idx];
+      if (DataTypes.BOOLEAN == tmpDataType) {
+        if ((byte) 1 == rowBuffer.get()) {
+          tmpContent = true;
+        } else {
+          tmpContent = false;
+        }
+      } else if (DataTypes.SHORT == tmpDataType) {
+        tmpContent = rowBuffer.getShort();
+      } else if (DataTypes.INT == tmpDataType) {
+        tmpContent = rowBuffer.getInt();
+      } else if (DataTypes.LONG == tmpDataType) {
+        tmpContent = rowBuffer.getLong();
+      } else if (DataTypes.DOUBLE == tmpDataType) {
+        tmpContent = rowBuffer.getDouble();
+      } else if (DataTypes.isDecimal(tmpDataType)) {
+        short len = rowBuffer.getShort();
+        byte[] decimalBytes = new byte[len];
+        rowBuffer.get(decimalBytes);
+        tmpContent = DataTypeUtil.byteToBigDecimal(decimalBytes);
+      } else {
+        throw new IllegalArgumentException("Unsupported data type: " + tmpDataType);
+      }
+      measures[idx] = tmpContent;
+    }
   }
 
   /**
@@ -298,7 +383,7 @@ public class SortStepRowHandler implements Serializable {
    * @param address address of the row
    * @return intermediate sort temp row
    */
-  public IntermediateSortTempRow readIntermediateSortTempRowFromUnsafeMemory(Object baseObject,
+  public IntermediateSortTempRow readFromMemoryWithoutNoSortFieldConvert(Object baseObject,
       long address) {
     int size = 0;
 
@@ -333,6 +418,51 @@ public class SortStepRowHandler implements Serializable {
   }
 
   /**
+   * Read intermediate sort temp row from unsafe memory.
+   * This method is used during merge sort phase for off-heap sort.
+   *
+   * @param baseObject base object of memory block
+   * @param address address of the row
+   * @return intermediate sort temp row
+   */
+  public IntermediateSortTempRow readRowFromMemoryWithNoSortFieldConvert(Object baseObject,
+      long address) {
+    int size = 0;
+
+    int[] dictSortDims = new int[this.dictSortDimCnt + this.dictNoSortDimCnt];
+    byte[][] noDictSortDims =
+        new byte[this.noDictSortDimCnt + this.noDictNoSortDimCnt + this.varcharDimCnt
+            + this.complexDimCnt][];
+
+    // read dict & sort dim
+    for (int idx = 0; idx < dictSortDimCnt; idx++) {
+      dictSortDims[idx] = CarbonUnsafe.getUnsafe().getInt(baseObject, address + size);
+      size += 4;
+    }
+
+    // read no-dict & sort dim
+    for (int idx = 0; idx < this.noDictSortDimCnt; idx++) {
+      short length = CarbonUnsafe.getUnsafe().getShort(baseObject, address + size);
+      size += 2;
+      byte[] bytes = new byte[length];
+      CarbonUnsafe.getUnsafe().copyMemory(baseObject, address + size,
+          bytes, CarbonUnsafe.BYTE_ARRAY_OFFSET, length);
+      size += length;
+      noDictSortDims[idx] = bytes;
+    }
+
+    // read no-sort dims & measures
+    int len = CarbonUnsafe.getUnsafe().getInt(baseObject, address + size);
+    size += 4;
+    byte[] noSortDimsAndMeasures = new byte[len];
+    CarbonUnsafe.getUnsafe().copyMemory(baseObject, address + size,
+        noSortDimsAndMeasures, CarbonUnsafe.BYTE_ARRAY_OFFSET, len);
+    Object[] measures = new Object[measureCnt];
+    unpackNoSortFromBytes(noSortDimsAndMeasures, dictSortDims, noDictSortDims, measures);
+    return new IntermediateSortTempRow(dictSortDims, noDictSortDims, measures);
+  }
+
+  /**
    * Write intermediate sort temp row directly from unsafe memory to stream.
    * This method is used at the late beginning of the sort phase to write in-memory pages
    * to sort temp file. Comparing with reading intermediate sort temp row from memory and then
@@ -429,6 +559,8 @@ public class SortStepRowHandler implements Serializable {
     return size;
   }
 
+
+
   /**
    * Pack to no-sort fields to byte array
    *

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/UnsafeCarbonRowPage.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/UnsafeCarbonRowPage.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/UnsafeCarbonRowPage.java
index 7ea5cb3..45cfa13 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/UnsafeCarbonRowPage.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/UnsafeCarbonRowPage.java
@@ -49,6 +49,7 @@ public class UnsafeCarbonRowPage {
 
   private TableFieldStat tableFieldStat;
   private SortStepRowHandler sortStepRowHandler;
+  private boolean convertNoSortFields;
 
   public UnsafeCarbonRowPage(TableFieldStat tableFieldStat, MemoryBlock memoryBlock,
       boolean saveToDisk, long taskId) {
@@ -88,8 +89,13 @@ public class UnsafeCarbonRowPage {
    * @return one row
    */
   public IntermediateSortTempRow getRow(long address) {
-    return sortStepRowHandler.readIntermediateSortTempRowFromUnsafeMemory(
-        dataBlock.getBaseObject(), address);
+    if (convertNoSortFields) {
+      return sortStepRowHandler
+          .readRowFromMemoryWithNoSortFieldConvert(dataBlock.getBaseObject(), address);
+    } else {
+      return sortStepRowHandler
+          .readFromMemoryWithoutNoSortFieldConvert(dataBlock.getBaseObject(), address);
+    }
   }
 
   /**
@@ -146,4 +152,8 @@ public class UnsafeCarbonRowPage {
   public enum MemoryManagerType {
     UNSAFE_MEMORY_MANAGER, UNSAFE_SORT_MEMORY_MANAGER
   }
+
+  public void setReadConvertedNoSortField() {
+    this.convertNoSortFields = true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
index c966ff2..102b057 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeFinalMergePageHolder.java
@@ -49,6 +49,9 @@ public class UnsafeFinalMergePageHolder implements SortTempChunkHolder {
     this.mergedAddresses = merger.getMergedAddresses();
     this.rowPageIndexes = merger.getRowPageIndexes();
     this.rowPages = merger.getUnsafeCarbonRowPages();
+    for (UnsafeCarbonRowPage rowPage: rowPages) {
+      rowPage.setReadConvertedNoSortField();
+    }
     LOGGER.info("Processing unsafe inmemory rows page with size : " + actualSize);
     this.comparator = new IntermediateSortTempRowComparator(noDictSortColumnMapping);
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
index 8b4b550..02ffd68 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeInmemoryHolder.java
@@ -46,6 +46,7 @@ public class UnsafeInmemoryHolder implements SortTempChunkHolder {
     LOGGER.info("Processing unsafe inmemory rows page with size : " + actualSize);
     this.comparator = new IntermediateSortTempRowComparator(
         rowPage.getTableFieldStat().getIsSortColNoDictFlags());
+    this.rowPage.setReadConvertedNoSortField();
   }
 
   public boolean hasNext() {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
index 86b7ac8..7c3c056 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/holder/UnsafeSortTempFileChunkHolder.java
@@ -96,10 +96,12 @@ public class UnsafeSortTempFileChunkHolder implements SortTempChunkHolder {
   private TableFieldStat tableFieldStat;
   private SortStepRowHandler sortStepRowHandler;
   private Comparator<IntermediateSortTempRow> comparator;
+  private boolean convertNoSortFields;
   /**
    * Constructor to initialize
    */
-  public UnsafeSortTempFileChunkHolder(File tempFile, SortParameters parameters) {
+  public UnsafeSortTempFileChunkHolder(File tempFile, SortParameters parameters,
+      boolean convertNoSortFields) {
     // set temp file
     this.tempFile = tempFile;
     this.readBufferSize = parameters.getBufferSize();
@@ -108,6 +110,7 @@ public class UnsafeSortTempFileChunkHolder implements SortTempChunkHolder {
     this.sortStepRowHandler = new SortStepRowHandler(tableFieldStat);
     this.executorService = Executors.newFixedThreadPool(1);
     comparator = new IntermediateSortTempRowComparator(parameters.getNoDictionarySortColumn());
+    this.convertNoSortFields = convertNoSortFields;
     initialize();
   }
 
@@ -162,7 +165,11 @@ public class UnsafeSortTempFileChunkHolder implements SortTempChunkHolder {
       fillDataForPrefetch();
     } else {
       try {
-        this.returnRow = sortStepRowHandler.readIntermediateSortTempRowFromInputStream(stream);
+        if (convertNoSortFields) {
+          this.returnRow = sortStepRowHandler.readWithNoSortFieldConvert(stream);
+        } else {
+          this.returnRow = sortStepRowHandler.readWithoutNoSortFieldConvert(stream);
+        }
         this.numberOfObjectRead++;
       } catch (IOException e) {
         throw new CarbonSortKeyAndGroupByException("Problems while reading row", e);
@@ -210,9 +217,11 @@ public class UnsafeSortTempFileChunkHolder implements SortTempChunkHolder {
       throws IOException {
     IntermediateSortTempRow[] holders = new IntermediateSortTempRow[expected];
     for (int i = 0; i < expected; i++) {
-      IntermediateSortTempRow holder
-          = sortStepRowHandler.readIntermediateSortTempRowFromInputStream(stream);
-      holders[i] = holder;
+      if (convertNoSortFields) {
+        holders[i] = sortStepRowHandler.readWithNoSortFieldConvert(stream);
+      } else {
+        holders[i] = sortStepRowHandler.readWithoutNoSortFieldConvert(stream);
+      }
     }
     this.numberOfObjectRead += expected;
     return holders;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeIntermediateFileMerger.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeIntermediateFileMerger.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeIntermediateFileMerger.java
index c5b215e..0a12eda 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeIntermediateFileMerger.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeIntermediateFileMerger.java
@@ -209,7 +209,8 @@ public class UnsafeIntermediateFileMerger implements Callable<Void> {
 
     for (File tempFile : intermediateFiles) {
       // create chunk holder
-      sortTempFileChunkHolder = new UnsafeSortTempFileChunkHolder(tempFile, mergerParameters);
+      sortTempFileChunkHolder =
+          new UnsafeSortTempFileChunkHolder(tempFile, mergerParameters, false);
 
       sortTempFileChunkHolder.readRow();
       this.totalNumberOfRecords += sortTempFileChunkHolder.numberOfRows();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeSingleThreadFinalSortFilesMerger.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeSingleThreadFinalSortFilesMerger.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeSingleThreadFinalSortFilesMerger.java
index 6defeb7..2dd2f31 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeSingleThreadFinalSortFilesMerger.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/unsafe/merger/UnsafeSingleThreadFinalSortFilesMerger.java
@@ -143,7 +143,7 @@ public class UnsafeSingleThreadFinalSortFilesMerger extends CarbonIterator<Objec
       for (final File file : filesToMergeSort) {
 
         SortTempChunkHolder sortTempFileChunkHolder =
-            new UnsafeSortTempFileChunkHolder(file, parameters);
+            new UnsafeSortTempFileChunkHolder(file, parameters, true);
 
         // initialize
         sortTempFileChunkHolder.readRow();
@@ -197,8 +197,7 @@ public class UnsafeSingleThreadFinalSortFilesMerger extends CarbonIterator<Objec
    */
   public Object[] next() {
     if (hasNext()) {
-      IntermediateSortTempRow sortTempRow = getSortedRecordFromFile();
-      return sortStepRowHandler.convertIntermediateSortTempRowTo3Parted(sortTempRow);
+      return sortStepRowHandler.convertIntermediateSortTempRowTo3Parted(getSortedRecordFromFile());
     } else {
       throw new NoSuchElementException("No more elements to return");
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/IntermediateFileMerger.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/IntermediateFileMerger.java b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/IntermediateFileMerger.java
index 364515c..35563d0 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/IntermediateFileMerger.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/IntermediateFileMerger.java
@@ -206,7 +206,8 @@ public class IntermediateFileMerger implements Callable<Void> {
     for (File tempFile : intermediateFiles) {
       // create chunk holder
       sortTempFileChunkHolder =
-          new SortTempFileChunkHolder(tempFile, mergerParameters, mergerParameters.getTableName());
+          new SortTempFileChunkHolder(tempFile, mergerParameters, mergerParameters.getTableName(),
+              false);
 
       // initialize
       sortTempFileChunkHolder.initialize();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SingleThreadFinalSortFilesMerger.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SingleThreadFinalSortFilesMerger.java b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SingleThreadFinalSortFilesMerger.java
index 09c1920..646969a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SingleThreadFinalSortFilesMerger.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SingleThreadFinalSortFilesMerger.java
@@ -176,7 +176,7 @@ public class SingleThreadFinalSortFilesMerger extends CarbonIterator<Object[]> {
         @Override public Void call() throws CarbonSortKeyAndGroupByException {
             // create chunk holder
             SortTempFileChunkHolder sortTempFileChunkHolder =
-                new SortTempFileChunkHolder(tempFile, sortParameters, tableName);
+                new SortTempFileChunkHolder(tempFile, sortParameters, tableName, true);
           try {
             // initialize
             sortTempFileChunkHolder.initialize();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/f27efb3e/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortTempFileChunkHolder.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortTempFileChunkHolder.java b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortTempFileChunkHolder.java
index 7e221a7..e39fe1d 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortTempFileChunkHolder.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/sort/sortdata/SortTempFileChunkHolder.java
@@ -98,6 +98,7 @@ public class SortTempFileChunkHolder implements Comparable<SortTempFileChunkHold
   private TableFieldStat tableFieldStat;
   private SortStepRowHandler sortStepRowHandler;
   private Comparator<IntermediateSortTempRow> comparator;
+  private boolean convertToActualField;
   /**
    * Constructor to initialize
    *
@@ -105,7 +106,8 @@ public class SortTempFileChunkHolder implements Comparable<SortTempFileChunkHold
    * @param sortParameters
    * @param tableName
    */
-  public SortTempFileChunkHolder(File tempFile, SortParameters sortParameters, String tableName) {
+  public SortTempFileChunkHolder(File tempFile, SortParameters sortParameters, String tableName,
+      boolean convertToActualField) {
     // set temp file
     this.tempFile = tempFile;
     this.readBufferSize = sortParameters.getBufferSize();
@@ -116,6 +118,7 @@ public class SortTempFileChunkHolder implements Comparable<SortTempFileChunkHold
         tableFieldStat.getIsSortColNoDictFlags());
     this.executorService = Executors
         .newFixedThreadPool(1, new CarbonThreadFactory("SafeSortTempChunkHolderPool:" + tableName));
+    this.convertToActualField = convertToActualField;
   }
 
   /**
@@ -168,7 +171,11 @@ public class SortTempFileChunkHolder implements Comparable<SortTempFileChunkHold
       fillDataForPrefetch();
     } else {
       try {
-        this.returnRow = sortStepRowHandler.readIntermediateSortTempRowFromInputStream(stream);
+        if (convertToActualField) {
+          this.returnRow = sortStepRowHandler.readWithNoSortFieldConvert(stream);
+        } else {
+          this.returnRow = sortStepRowHandler.readWithoutNoSortFieldConvert(stream);
+        }
         this.numberOfObjectRead++;
       } catch (IOException e) {
         throw new CarbonSortKeyAndGroupByException("Problem while reading rows", e);
@@ -214,9 +221,11 @@ public class SortTempFileChunkHolder implements Comparable<SortTempFileChunkHold
   private IntermediateSortTempRow[] readBatchedRowFromStream(int expected) throws IOException {
     IntermediateSortTempRow[] holders = new IntermediateSortTempRow[expected];
     for (int i = 0; i < expected; i++) {
-      IntermediateSortTempRow holder
-          = sortStepRowHandler.readIntermediateSortTempRowFromInputStream(stream);
-      holders[i] = holder;
+      if (convertToActualField) {
+        holders[i] = sortStepRowHandler.readWithNoSortFieldConvert(stream);
+      } else {
+        holders[i] = sortStepRowHandler.readWithoutNoSortFieldConvert(stream);
+      }
     }
     this.numberOfObjectRead += expected;
     return holders;


[04/50] [abbrv] carbondata git commit: [HOTFIX] Fixed random test failure

Posted by ja...@apache.org.
[HOTFIX] Fixed random test failure

Fixed random test failure

This closes #2553


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f5d3c17b
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f5d3c17b
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f5d3c17b

Branch: refs/heads/external-format
Commit: f5d3c17b7f969244b4a161ce7ec4ac8451943eb8
Parents: 4d95dfc
Author: mohammadshahidkhan <mo...@gmail.com>
Authored: Wed Jul 25 12:14:38 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Sun Jul 29 18:44:46 2018 +0530

----------------------------------------------------------------------
 .../lucene/LuceneFineGrainDataMapSuite.scala    | 43 ++++++++++----------
 1 file changed, 22 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f5d3c17b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 08e44f7..a380f04 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -587,24 +587,28 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
 
     sql("EXPLAIN SELECT * FROM main WHERE TEXT_MATCH('name:bob')").show(false)
     val rows = sql("EXPLAIN SELECT * FROM main WHERE TEXT_MATCH('name:bob')").collect()
-
-    assertResult(
-      """== CarbonData Profiler ==
-        |Table Scan on main
-        | - total blocklets: 1
-        | - filter: TEXT_MATCH('name:bob')
-        | - pruned by Main DataMap
-        |    - skipped blocklets: 0
-        | - pruned by FG DataMap
-        |    - name: dm
-        |    - provider: lucene
-        |    - skipped blocklets: 1
-        |""".stripMargin)(rows(0).getString(0))
-
-    LuceneFineGrainDataMapSuite.deleteFile(file1)
-    sql("drop datamap dm on table main")
-    CarbonProperties.getInstance().addProperty(
-      CarbonCommonConstants.BLOCKLET_SIZE, CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
+    // sometimes the plan comparison is failing even in case of both the plan being same.
+    // once the failure happens the dropped datamap is not getting executed
+    // and due to this other test cases also failing.
+    try {
+      assertResult(
+        """== CarbonData Profiler ==
+          |Table Scan on main
+          | - total blocklets: 1
+          | - filter: TEXT_MATCH('name:bob')
+          | - pruned by Main DataMap
+          |    - skipped blocklets: 0
+          | - pruned by FG DataMap
+          |    - name: dm
+          |    - provider: lucene
+          |    - skipped blocklets: 1
+          |""".stripMargin)(rows(0).getString(0))
+    } finally {
+      LuceneFineGrainDataMapSuite.deleteFile(file1)
+      sql("drop datamap dm on table main")
+      CarbonProperties.getInstance().addProperty(
+        CarbonCommonConstants.BLOCKLET_SIZE, CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
+    }
   }
 
   test("test lucene datamap creation for blocked features") {
@@ -910,9 +914,6 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     sql("use default")
     sql("drop database if exists lucene cascade")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.CARBON_SYSTEM_FOLDER_LOCATION,
-        CarbonProperties.getStorePath)
-    CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_LUCENE_INDEX_STOP_WORDS,
         CarbonCommonConstants.CARBON_LUCENE_INDEX_STOP_WORDS_DEFAULT)
     CarbonProperties.getInstance()


[41/50] [abbrv] carbondata git commit: [CARBONDATA-2795] Add documentation for S3

Posted by ja...@apache.org.
[CARBONDATA-2795] Add documentation for S3

This closes #2576


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/e26a742c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/e26a742c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/e26a742c

Branch: refs/heads/external-format
Commit: e26a742cb7031cdf0dde3601a4d21616661f4b73
Parents: 6d6a5b2
Author: kunal642 <ku...@gmail.com>
Authored: Sun Jul 29 21:44:22 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Sun Aug 5 16:51:47 2018 +0530

----------------------------------------------------------------------
 docs/configuration-parameters.md                |  7 +-
 docs/data-management-on-carbondata.md           |  8 ++
 docs/datamap/bloomfilter-datamap-guide.md       | 17 ++++
 docs/datamap/datamap-management.md              | 17 ++++
 docs/datamap/lucene-datamap-guide.md            | 17 ++++
 docs/datamap/preaggregate-datamap-guide.md      | 17 ++++
 docs/datamap/timeseries-datamap-guide.md        | 17 ++++
 docs/s3-guide.md                                | 91 ++++++++++++++++++++
 docs/sdk-guide.md                               | 17 ++++
 docs/streaming-guide.md                         | 17 ++++
 .../sql/CarbonDatasourceHadoopRelation.scala    |  2 +
 .../org/apache/spark/sql/CarbonSource.scala     |  2 +
 12 files changed, 228 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index eee85e2..46b8bd0 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -109,7 +109,12 @@ This section provides the details of all the configurations required for CarbonD
 |---------------------------------------------|--------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | carbon.sort.file.write.buffer.size | 16384 | File write buffer size used during sorting. Minimum allowed buffer size is 10240 byte and Maximum allowed buffer size is 10485760 byte. |
 | carbon.lock.type | LOCALLOCK | This configuration specifies the type of lock to be acquired during concurrent operations on table. There are following types of lock implementation: - LOCALLOCK: Lock is created on local file system as file. This lock is useful when only one spark driver (thrift server) runs on a machine and no other CarbonData spark application is launched concurrently. - HDFSLOCK: Lock is created on HDFS file system as file. This lock is useful when multiple CarbonData spark applications are launched and no ZooKeeper is running on cluster and HDFS supports file based locking. |
-| carbon.lock.path | TABLEPATH | This configuration specifies the path where lock files have to be created. Recommended to configure zookeeper lock type or configure HDFS lock path(to this property) in case of S3 file system as locking is not feasible on S3.
+| carbon.lock.path | TABLEPATH | Locks on the files are used to prevent concurrent operation from modifying the same files. This 
+configuration specifies the path where lock files have to be created. Recommended to configure 
+HDFS lock path(to this property) in case of S3 file system as locking is not feasible on S3. 
+**Note:** If this property is not set to HDFS location for S3 store, then there is a possibility 
+of data corruption because multiple data manipulation calls might try to update the status file 
+and as lock is not acquired before updation data might get overwritten. |
 | carbon.sort.intermediate.files.limit | 20 | Minimum number of intermediate files after which merged sort can be started (minValue = 2, maxValue=50). |
 | carbon.block.meta.size.reserved.percentage | 10 | Space reserved in percentage for writing block meta data in CarbonData file. |
 | carbon.csv.read.buffersize.byte | 1048576 | csv reading buffer size. |

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 41fd513..7cf6123 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -174,6 +174,12 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
       
       Local Dictionary size = ((memory occupied by each unique value * cardinality of the column) * number of columns)
       
+      **Bad Records Path:**
+      
+      This property is used to specify the location where bad records would be written.
+      
+      ```TBLPROPERTIES('BAD_RECORDS_PATH'='/opt/badrecords'')```
+      
 ### Example:
  
    ```
@@ -775,6 +781,8 @@ Users can specify which columns to include and exclude for local dictionary gene
   * If the IGNORE option is used, then bad records are neither loaded nor written to the separate CSV file.
   * In loaded data, if all records are bad records, the BAD_RECORDS_ACTION is invalid and the load operation fails.
   * The default maximum number of characters per column is 32000. If there are more than 32000 characters in a column, please refer to *String longer than 32000 characters* section.
+  * Since Bad Records Path can be specified in create, load and carbon properties. 
+  Therefore, value specified in load will have the highest priority, and value specified in carbon properties will have the least priority.
 
   Example:
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/datamap/bloomfilter-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/bloomfilter-datamap-guide.md b/docs/datamap/bloomfilter-datamap-guide.md
index ccbcabe..92810f8 100644
--- a/docs/datamap/bloomfilter-datamap-guide.md
+++ b/docs/datamap/bloomfilter-datamap-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData BloomFilter DataMap (Alpha Feature)
 
 * [DataMap Management](#datamap-management)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/datamap/datamap-management.md
----------------------------------------------------------------------
diff --git a/docs/datamap/datamap-management.md b/docs/datamap/datamap-management.md
index 1695a23..23f1517 100644
--- a/docs/datamap/datamap-management.md
+++ b/docs/datamap/datamap-management.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData DataMap Management
 
 ## Overview

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/datamap/lucene-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/lucene-datamap-guide.md b/docs/datamap/lucene-datamap-guide.md
index 119b609..06cd194 100644
--- a/docs/datamap/lucene-datamap-guide.md
+++ b/docs/datamap/lucene-datamap-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData Lucene DataMap (Alpha Feature)
   
 * [DataMap Management](#datamap-management)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/datamap/preaggregate-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/preaggregate-datamap-guide.md b/docs/datamap/preaggregate-datamap-guide.md
index d85f527..ff4c28e 100644
--- a/docs/datamap/preaggregate-datamap-guide.md
+++ b/docs/datamap/preaggregate-datamap-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData Pre-aggregate DataMap
   
 * [Quick Example](#quick-example)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/datamap/timeseries-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/timeseries-datamap-guide.md b/docs/datamap/timeseries-datamap-guide.md
index 15ca3fc..135188d 100644
--- a/docs/datamap/timeseries-datamap-guide.md
+++ b/docs/datamap/timeseries-datamap-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData Timeseries DataMap
 
 * [Timeseries DataMap Introduction](#timeseries-datamap-intoduction)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/s3-guide.md
----------------------------------------------------------------------
diff --git a/docs/s3-guide.md b/docs/s3-guide.md
new file mode 100644
index 0000000..2f4dfa9
--- /dev/null
+++ b/docs/s3-guide.md
@@ -0,0 +1,91 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
+#S3 Guide (Alpha Feature 1.4.1)
+
+Object storage is the recommended storage format in cloud as it can support storing large data 
+files. S3 APIs are widely used for accessing object stores. This can be 
+used to store or retrieve data on Amazon cloud, Huawei Cloud(OBS) or on any other object
+ stores conforming to S3 API.
+Storing data in cloud is advantageous as there are no restrictions on the size of 
+data and the data can be accessed from anywhere at any time.
+Carbondata can support any Object Storage that conforms to Amazon S3 API.
+Carbondata relies on Hadoop provided S3 filesystem APIs to access Object stores.
+
+#Writing to Object Storage
+
+To store carbondata files onto Object Store, `carbon.storelocation` property will have 
+to be configured with Object Store path in CarbonProperties file. 
+
+For example:
+```
+carbon.storelocation=s3a://mybucket/carbonstore.
+```
+
+If the existing store location cannot be changed or only specific tables need to be stored 
+onto cloud object store, it can be done so by specifying the `location` option in the create 
+table DDL command.
+
+For example:
+
+```
+CREATE TABLE IF NOT EXISTS db1.table1(col1 string, col2 int) STORED AS carbondata LOCATION 's3a://mybucket/carbonstore'
+``` 
+
+For more details on create table, Refer [data-management-on-carbondata](./data-management-on-carbondata.md#create-table)
+
+#Authentication
+
+Authentication properties will have to be configured to store the carbondata files on to S3 location. 
+
+Authentication properties can be set in any of the following ways:
+1. Set authentication properties in core-site.xml, refer 
+[hadoop authentication document](https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/index.html#Authentication_properties)
+
+2. Set authentication properties in spark-defaults.conf.
+
+Example
+```
+spark.hadoop.fs.s3a.secret.key=123
+spark.hadoop.fs.s3a.access.key=456
+```
+
+3. Pass authentication properties with spark-submit as configuration.
+
+Example:
+```
+./bin/spark-submit --master yarn --conf spark.hadoop.fs.s3a.secret.key=123 --conf spark.hadoop.fs
+.s3a.access.key=456 --class=
+```  
+
+4. Set authentication properties to hadoop configuration object in sparkContext.
+
+Example:
+```
+sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.secret.key", "123")
+sparkSession.sparkContext.hadoopConfiguration.set("fs.s3a.access.key","456")
+```
+
+#Recommendations
+
+1. Object Storage like S3 does not support file leasing mechanism(supported by HDFS) that is 
+required to take locks which ensure consistency between concurrent operations therefore, it is 
+recommended to set the configurable lock path property([carbon.lock.path](https://github.com/apache/carbondata/blob/master/docs/configuration-parameters.md#miscellaneous-configuration))
+ to a HDFS directory.
+2. Concurrent data manipulation operations are not supported. Object stores follow eventual 
+consistency semantics, i.e., any put request might take some time to reflect when trying to list
+.This behaviour causes not to ensure the data read is always consistent or latest.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/sdk-guide.md
----------------------------------------------------------------------
diff --git a/docs/sdk-guide.md b/docs/sdk-guide.md
index c7bff59..e592aa5 100644
--- a/docs/sdk-guide.md
+++ b/docs/sdk-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # SDK Guide
 In the carbon jars package, there exist a carbondata-store-sdk-x.x.x-SNAPSHOT.jar, including SDK writer and reader.
 # SDK Writer

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/docs/streaming-guide.md
----------------------------------------------------------------------
diff --git a/docs/streaming-guide.md b/docs/streaming-guide.md
index a9284e6..32d24dc 100644
--- a/docs/streaming-guide.md
+++ b/docs/streaming-guide.md
@@ -1,3 +1,20 @@
+<!--
+    Licensed to the Apache Software Foundation (ASF) under one or more 
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership. 
+    The ASF licenses this file to you under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with 
+    the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software 
+    distributed under the License is distributed on an "AS IS" BASIS, 
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and 
+    limitations under the License.
+-->
+
 # CarbonData Streaming Ingestion
 
 ## Quick example

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 3ce8c8c..b5842a9 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -38,6 +38,7 @@ import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.expression.logical.AndExpression
 import org.apache.carbondata.hadoop.CarbonProjection
+import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.spark.rdd.{CarbonScanRDD, SparkReadSupport}
 
 case class CarbonDatasourceHadoopRelation(
@@ -55,6 +56,7 @@ case class CarbonDatasourceHadoopRelation(
     caseInsensitiveMap("tablename"))
   lazy val databaseName: String = carbonTable.getDatabaseName
   lazy val tableName: String = carbonTable.getTableName
+  CarbonInputFormatUtil.setS3Configurations(sparkSession.sessionState.newHadoopConf())
   CarbonSession.updateSessionInfoToCurrentThread(sparkSession)
 
   @transient lazy val carbonRelation: CarbonRelation =

http://git-wip-us.apache.org/repos/asf/carbondata/blob/e26a742c/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
index 0d13d4c..b162294 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSource.scala
@@ -44,6 +44,7 @@ import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.metadata.schema.SchemaEvolutionEntry
 import org.apache.carbondata.core.metadata.schema.table.TableInfo
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil}
+import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.spark.CarbonOption
 import org.apache.carbondata.spark.util.CarbonScalaUtil
 import org.apache.carbondata.streaming.{CarbonStreamException, CarbonStreamingQueryListener, StreamSinkFactory}
@@ -328,6 +329,7 @@ object CarbonSource {
         .contains("true")
       tableInfo.setTransactionalTable(isTransactionalTable)
       if (isTransactionalTable && !metaStore.isReadFromHiveMetaStore) {
+        CarbonInputFormatUtil.setS3Configurations(sparkSession.sessionState.newHadoopConf())
         // save to disk
         metaStore.saveToDisk(tableInfo, properties("tablePath"))
         // remove schema string from map as we don't store carbon schema to hive metastore


[10/50] [abbrv] carbondata git commit: [CARBONDATA-2749][dataload] In HDFS Empty tablestatus file is written during datalaod, iud or compaction when disk is full.

Posted by ja...@apache.org.
[CARBONDATA-2749][dataload] In HDFS Empty tablestatus file is written
during datalaod, iud or compaction when disk is full.

Problem:
When a failure happens due to disk full during load, IUD or Compaction,
then while updating the tablestatus file, the tablestaus.tmp file during
atomic file operation remains empty, and in the finally block the empty
tablestaus.tmp file is getting renamed to the actual file.
This leads to empty tablestatus file. Once such problem happens the
tablestatus file can not be retrieved and the already loaded data can be used.

Solution:
If the failures happens during write then the the schema rename in the finally
block must be avoided.

This closes #2517


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/76285717
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/76285717
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/76285717

Branch: refs/heads/external-format
Commit: 76285717c0ecd963b86fcdb13c63606822bcea3b
Parents: fd747a3
Author: mohammadshahidkhan <mo...@gmail.com>
Authored: Tue Jul 17 16:59:35 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Mon Jul 30 14:51:26 2018 +0530

----------------------------------------------------------------------
 .../status/DiskBasedDataMapStatusProvider.java  |  1 +
 .../AtomicFileOperationS3Impl.java              |  5 +-
 .../fileoperations/AtomicFileOperations.java    |  2 +
 .../AtomicFileOperationsImpl.java               | 24 ++++++++--
 .../core/metadata/SegmentFileStore.java         |  4 ++
 .../statusmanager/SegmentStatusManager.java     |  5 ++
 .../SegmentUpdateStatusManager.java             |  2 +
 .../hadoop/testutil/StoreCreator.java           |  4 ++
 .../processing/util/CarbonLoaderUtil.java       | 50 ++++----------------
 9 files changed, 50 insertions(+), 47 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/datamap/status/DiskBasedDataMapStatusProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/status/DiskBasedDataMapStatusProvider.java b/core/src/main/java/org/apache/carbondata/core/datamap/status/DiskBasedDataMapStatusProvider.java
index 22a7f6d..d42c98a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/status/DiskBasedDataMapStatusProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/status/DiskBasedDataMapStatusProvider.java
@@ -174,6 +174,7 @@ public class DiskBasedDataMapStatusProvider implements DataMapStatusStorageProvi
       brWriter.write(metadataInstance);
     } catch (IOException ioe) {
       LOG.error("Error message: " + ioe.getLocalizedMessage());
+      fileWrite.setFailed();
       throw ioe;
     } finally {
       if (null != brWriter) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationS3Impl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationS3Impl.java b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationS3Impl.java
index 71730f0..f5311cb 100644
--- a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationS3Impl.java
+++ b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationS3Impl.java
@@ -44,7 +44,6 @@ class AtomicFileOperationS3Impl implements AtomicFileOperations {
   AtomicFileOperationS3Impl(String filePath) {
     this.filePath = filePath;
   }
-
   @Override public DataInputStream openForRead() throws IOException {
     return FileFactory.getDataInputStream(filePath, FileFactory.getFileType(filePath));
   }
@@ -61,4 +60,8 @@ class AtomicFileOperationS3Impl implements AtomicFileOperations {
     dataOutStream = FileFactory.getDataOutputStream(filePath, fileType);
     return dataOutStream;
   }
+
+  @Override public void setFailed() {
+    // no implementation required
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperations.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperations.java b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperations.java
index ffaa1b1..a641a49 100644
--- a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperations.java
+++ b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperations.java
@@ -28,4 +28,6 @@ public interface AtomicFileOperations {
   void close() throws IOException;
 
   DataOutputStream openForWrite(FileWriteOperation operation) throws IOException;
+
+  void setFailed();
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationsImpl.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationsImpl.java b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationsImpl.java
index af2456a..f9f8647 100644
--- a/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationsImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/fileoperations/AtomicFileOperationsImpl.java
@@ -21,6 +21,8 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 
+import org.apache.carbondata.common.logging.LogService;
+import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
@@ -29,6 +31,11 @@ import org.apache.carbondata.core.util.CarbonUtil;
 
 class AtomicFileOperationsImpl implements AtomicFileOperations {
 
+  /**
+   * Logger instance
+   */
+  private static final LogService LOGGER =
+      LogServiceFactory.getLogService(AtomicFileOperationsImpl.class.getName());
   private String filePath;
 
   private FileType fileType;
@@ -36,6 +43,7 @@ class AtomicFileOperationsImpl implements AtomicFileOperations {
   private String tempWriteFilePath;
 
   private DataOutputStream dataOutStream;
+  private boolean setFailed;
 
   AtomicFileOperationsImpl(String filePath, FileType fileType) {
     this.filePath = filePath;
@@ -70,12 +78,20 @@ class AtomicFileOperationsImpl implements AtomicFileOperations {
     if (null != dataOutStream) {
       CarbonUtil.closeStream(dataOutStream);
       CarbonFile tempFile = FileFactory.getCarbonFile(tempWriteFilePath, fileType);
-      if (!tempFile.renameForce(filePath)) {
-        throw new IOException("temporary file renaming failed, src="
-            + tempFile.getPath() + ", dest=" + filePath);
+      if (!this.setFailed) {
+        if (!tempFile.renameForce(filePath)) {
+          throw new IOException(
+              "temporary file renaming failed, src=" + tempFile.getPath() + ", dest=" + filePath);
+        }
+      } else {
+        LOGGER.warn("The temporary file renaming skipped due to I/O error, deleting file "
+            + tempWriteFilePath);
+        tempFile.delete();
       }
     }
-
   }
 
+  @Override public void setFailed() {
+    this.setFailed = true;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
index 32f6155..67e58d1 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/SegmentFileStore.java
@@ -229,6 +229,10 @@ public class SegmentFileStore {
       String metadataInstance = gsonObjectToWrite.toJson(segmentFile);
       brWriter.write(metadataInstance);
       brWriter.flush();
+    } catch (IOException ie) {
+      LOGGER.error("Error message: " + ie.getLocalizedMessage());
+      fileWrite.setFailed();
+      throw ie;
     } finally {
       CarbonUtil.closeStreams(brWriter);
       fileWrite.close();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
index d5b456c..daf54a0 100755
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java
@@ -512,6 +512,7 @@ public class SegmentStatusManager {
       brWriter.write(metadataInstance);
     } catch (IOException ioe) {
       LOG.error("Error message: " + ioe.getLocalizedMessage());
+      fileWrite.setFailed();
       throw ioe;
     } finally {
       if (null != brWriter) {
@@ -881,6 +882,10 @@ public class SegmentStatusManager {
 
       String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
       brWriter.write(metadataInstance);
+    } catch (IOException ie) {
+      LOG.error("Error message: " + ie.getLocalizedMessage());
+      writeOperation.setFailed();
+      throw ie;
     } finally {
       try {
         if (null != brWriter) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
index 5d5e8b0..c3daac5 100644
--- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentUpdateStatusManager.java
@@ -724,6 +724,8 @@ public class SegmentUpdateStatusManager {
       brWriter.write(metadataInstance);
     } catch (IOException ioe) {
       LOG.error("Error message: " + ioe.getLocalizedMessage());
+      fileWrite.setFailed();
+      throw ioe;
     } finally {
       if (null != brWriter) {
         brWriter.flush();

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/hadoop/src/main/java/org/apache/carbondata/hadoop/testutil/StoreCreator.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/testutil/StoreCreator.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/testutil/StoreCreator.java
index 6e6a65b..c113228 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/testutil/StoreCreator.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/testutil/StoreCreator.java
@@ -449,6 +449,10 @@ public class StoreCreator {
 
       String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
       brWriter.write(metadataInstance);
+    } catch (IOException ioe) {
+      LOG.error("Error message: " + ioe.getLocalizedMessage());
+      writeOperation.setFailed();
+      throw ioe;
     } finally {
       try {
         if (null != brWriter) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/76285717/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
----------------------------------------------------------------------
diff --git a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
index 272abec..19353d1 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/util/CarbonLoaderUtil.java
@@ -16,14 +16,17 @@
  */
 package org.apache.carbondata.processing.util;
 
-import java.io.BufferedWriter;
-import java.io.DataOutputStream;
 import java.io.IOException;
-import java.io.OutputStreamWriter;
 import java.net.InetAddress;
 import java.net.UnknownHostException;
-import java.nio.charset.Charset;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
 
 import org.apache.carbondata.common.logging.LogService;
 import org.apache.carbondata.common.logging.LogServiceFactory;
@@ -40,9 +43,6 @@ import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
 import org.apache.carbondata.core.datastore.filesystem.CarbonFileFilter;
 import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.datastore.impl.FileFactory.FileType;
-import org.apache.carbondata.core.fileoperations.AtomicFileOperationFactory;
-import org.apache.carbondata.core.fileoperations.AtomicFileOperations;
-import org.apache.carbondata.core.fileoperations.FileWriteOperation;
 import org.apache.carbondata.core.locks.CarbonLockUtil;
 import org.apache.carbondata.core.locks.ICarbonLock;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
@@ -62,7 +62,6 @@ import org.apache.carbondata.processing.merger.NodeMultiBlockRelation;
 
 import static org.apache.carbondata.core.enums.EscapeSequences.*;
 
-import com.google.gson.Gson;
 import org.apache.commons.lang3.StringUtils;
 
 public final class CarbonLoaderUtil {
@@ -397,39 +396,6 @@ public final class CarbonLoaderUtil {
     loadMetadataDetails.setLoadStartTime(loadStartTime);
   }
 
-  public static void writeLoadMetadata(AbsoluteTableIdentifier identifier,
-      List<LoadMetadataDetails> listOfLoadFolderDetails) throws IOException {
-    String dataLoadLocation = CarbonTablePath.getTableStatusFilePath(identifier.getTablePath());
-
-    DataOutputStream dataOutputStream;
-    Gson gsonObjectToWrite = new Gson();
-    BufferedWriter brWriter = null;
-
-    AtomicFileOperations writeOperation =
-        AtomicFileOperationFactory.getAtomicFileOperations(dataLoadLocation);
-
-    try {
-      dataOutputStream = writeOperation.openForWrite(FileWriteOperation.OVERWRITE);
-      brWriter = new BufferedWriter(new OutputStreamWriter(dataOutputStream,
-              Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)));
-
-      String metadataInstance = gsonObjectToWrite.toJson(listOfLoadFolderDetails.toArray());
-      brWriter.write(metadataInstance);
-    } finally {
-      try {
-        if (null != brWriter) {
-          brWriter.flush();
-        }
-      } catch (Exception e) {
-        LOGGER.error("error in  flushing ");
-
-      }
-      CarbonUtil.closeStreams(brWriter);
-      writeOperation.close();
-    }
-
-  }
-
   public static boolean isValidEscapeSequence(String escapeChar) {
     return escapeChar.equalsIgnoreCase(NEW_LINE.getName()) ||
         escapeChar.equalsIgnoreCase(CARRIAGE_RETURN.getName()) ||


[03/50] [abbrv] carbondata git commit: [CARBONDATA-2775] Adaptive encoding fails for Unsafe OnHeap. if, target datatype is SHORT_INT

Posted by ja...@apache.org.
[CARBONDATA-2775] Adaptive encoding fails for Unsafe OnHeap. if, target datatype is SHORT_INT

problem:
[CARBONDATA-2775] Adaptive encoding fails for Unsafe OnHeap if, target data type is SHORT_INT

solution: If ENABLE_OFFHEAP_SORT = false, in carbon property. UnsafeFixLengthColumnPage.java will use different compress logic. Not the raw compression. In that case, for SHORT_INT data type , conversion need to handle.

This closes #2546


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/4d95dfcf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/4d95dfcf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/4d95dfcf

Branch: refs/heads/external-format
Commit: 4d95dfcff2895ce0aed8ba6f75ce9946ae5172af
Parents: 8d3e8b8
Author: ajantha-bhat <aj...@gmail.com>
Authored: Tue Jul 24 12:33:47 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Sun Jul 29 11:52:30 2018 +0530

----------------------------------------------------------------------
 .../page/UnsafeFixLengthColumnPage.java         |  2 +
 ...UnsafeHeapColumnPageForComplexDataType.scala | 61 ++++++++++++++++++++
 2 files changed, 63 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/4d95dfcf/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
index bcb74c0..f75deb6 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeFixLengthColumnPage.java
@@ -495,6 +495,8 @@ public class UnsafeFixLengthColumnPage extends ColumnPage {
       return totalLength / ByteUtil.SIZEOF_BYTE;
     } else if (dataType == DataTypes.SHORT) {
       return totalLength / ByteUtil.SIZEOF_SHORT;
+    } else if (dataType == DataTypes.SHORT_INT) {
+      return totalLength / ByteUtil.SIZEOF_SHORT_INT;
     } else if (dataType == DataTypes.INT) {
       return totalLength / ByteUtil.SIZEOF_INT;
     } else if (dataType == DataTypes.LONG) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/4d95dfcf/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeHeapColumnPageForComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeHeapColumnPageForComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeHeapColumnPageForComplexDataType.scala
new file mode 100644
index 0000000..acf75c1
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestAdaptiveEncodingUnsafeHeapColumnPageForComplexDataType.scala
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.integration.spark.testsuite.complexType
+
+import java.io.File
+
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+/**
+ * Test class of Adaptive Encoding UnSafe Column Page with Complex Data type
+ *
+ */
+
+class TestAdaptiveEncodingUnsafeHeapColumnPageForComplexDataType
+  extends QueryTest with BeforeAndAfterAll with TestAdaptiveComplexType {
+
+  override def beforeAll(): Unit = {
+
+    new File(CarbonProperties.getInstance().getSystemFolderLocation).delete()
+    sql("DROP TABLE IF EXISTS adaptive")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE,
+        "true")
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_OFFHEAP_SORT,
+        "false")
+  }
+
+  override def afterAll(): Unit = {
+    sql("DROP TABLE IF EXISTS adaptive")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE,
+        "true")
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_OFFHEAP_SORT,
+        "true")
+  }
+
+
+}


[08/50] [abbrv] carbondata git commit: [CARBONDATA-2794]Distinct count fails on ArrayOfStruct

Posted by ja...@apache.org.
[CARBONDATA-2794]Distinct count fails on ArrayOfStruct

This PR fixes Code Generator Error thrown when Select filter contains more than one count of distinct of ArrayofStruct with group by Clause

This closes #2573


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/3b9efed6
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/3b9efed6
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/3b9efed6

Branch: refs/heads/external-format
Commit: 3b9efed6282e504cb5c872caf9b70f4f08c0f93b
Parents: 91837a6
Author: Indhumathi27 <in...@gmail.com>
Authored: Fri Jul 27 23:39:50 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Sun Jul 29 21:33:04 2018 +0530

----------------------------------------------------------------------
 .../spark/testsuite/complexType/TestComplexDataType.scala | 10 ++++++++++
 .../apache/spark/sql/optimizer/CarbonLateDecodeRule.scala |  2 +-
 2 files changed, 11 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/3b9efed6/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
index e2884d8..2b3cfc0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexDataType.scala
@@ -675,6 +675,16 @@ class TestComplexDataType extends QueryTest with BeforeAndAfterAll {
         CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
   }
 
+  test("test arrayofstruct with count(distinct)") {
+    sql("DROP TABLE IF EXISTS test")
+    sql("create table test(cus_id string,array_of_struct array<struct<id:int,country:string," +
+        "state:string,city:string>>) stored by 'carbondata'")
+    sql("insert into test values('cus_01','123:abc:mno:xyz$1234:abc1:mno1:xyz1')")
+    checkAnswer(sql("select array_of_struct.state[0],count(distinct array_of_struct.id[0]) as count_country," +
+      "count(distinct array_of_struct.state[0]) as count_city from test group by array_of_struct" +
+      ".state[0]"), Seq(Row("mno", 1, 1)))
+  }
+
   test("test struct complex type with filter") {
     sql("DROP TABLE IF EXISTS test")
     sql("create table test(id int,a struct<b:int,c:int>) stored by 'carbondata'")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/3b9efed6/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
index 9738ab0..ccdf034 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonLateDecodeRule.scala
@@ -743,7 +743,7 @@ class CarbonLateDecodeRule extends Rule[LogicalPlan] with PredicateHelper {
             attrName.contains(n)
           })
         case ar : ArrayType =>
-          attrName.contains(a.name + "[")
+          attrName.contains(a.name + "[") || attrName.contains(a.name + ".")
         case _ => false
       }
     }


[44/50] [abbrv] carbondata git commit: [CARBONDATA-2809][DataMap] Block rebuilding for bloom/lucene and preagg datamap

Posted by ja...@apache.org.
[CARBONDATA-2809][DataMap] Block rebuilding for bloom/lucene and preagg datamap

As manual refresh currently only works fine for MV, it has some bugs
with other types of datamap such as preaggregate, timeserials, lucene,
bloomfilter, we will block 'deferred rebuild' for them as well as block
rebuild command for them.

Fix bugs in deferred rebuild for MV

MV datamap will be deferred rebuild no matter whether the deferred flag
is set or not.

This closes #2594


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/abcd4f6e
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/abcd4f6e
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/abcd4f6e

Branch: refs/heads/external-format
Commit: abcd4f6e23f8b8a9ac543e7dfad01cff40bf4ae1
Parents: b702a1b
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Mon Aug 6 19:26:04 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Tue Aug 7 18:12:07 2018 +0800

----------------------------------------------------------------------
 .../core/datamap/DataMapProvider.java           |  1 +
 .../core/datamap/dev/DataMapFactory.java        |  6 +++
 .../mv/datamap/MVDataMapProvider.scala          |  2 +
 docs/datamap/datamap-management.md              | 32 ++++++++-------
 .../lucene/LuceneFineGrainDataMapSuite.scala    | 33 +++++++++++++--
 ...eneFineGrainDataMapWithSearchModeSuite.scala |  2 +-
 .../preaggregate/TestPreAggCreateCommand.scala  | 10 -----
 .../preaggregate/TestPreAggregateLoad.scala     | 36 +++++++++++++++++
 .../testsuite/datamap/TestDataMapCommand.scala  |  1 -
 .../testsuite/datamap/TestDataMapStatus.scala   |  2 +
 .../datamap/IndexDataMapProvider.java           |  5 +++
 .../datamap/PreAggregateDataMapProvider.java    |  5 +++
 .../datamap/CarbonCreateDataMapCommand.scala    | 21 +++++++---
 .../datamap/CarbonDataMapRebuildCommand.scala   | 19 +++++++--
 .../datamap/CarbonDataMapShowCommand.scala      | 11 ++++-
 .../bloom/BloomCoarseGrainDataMapSuite.scala    | 42 +++++++++++++++++---
 16 files changed, 182 insertions(+), 46 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
index 086a1c0..cc05d31 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapProvider.java
@@ -125,4 +125,5 @@ public abstract class DataMapProvider {
 
   public abstract DataMapFactory getDataMapFactory();
 
+  public abstract boolean supportRebuild();
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index 67f82b2..de8dc58 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -173,4 +173,10 @@ public abstract class DataMapFactory<T extends DataMap> {
     return false;
   }
 
+  /**
+   * whether this datamap support rebuild
+   */
+  public boolean supportRebuild() {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
----------------------------------------------------------------------
diff --git a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
index 2aba23e..7108bf8 100644
--- a/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
+++ b/datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVDataMapProvider.scala
@@ -122,4 +122,6 @@ class MVDataMapProvider(
   override def getDataMapFactory: DataMapFactory[_ <: DataMap[_ <: Blocklet]] = {
     throw new UnsupportedOperationException
   }
+
+  override def supportRebuild(): Boolean = true
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/docs/datamap/datamap-management.md
----------------------------------------------------------------------
diff --git a/docs/datamap/datamap-management.md b/docs/datamap/datamap-management.md
index 23f1517..b5d1aaa 100644
--- a/docs/datamap/datamap-management.md
+++ b/docs/datamap/datamap-management.md
@@ -1,17 +1,17 @@
 <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more 
+    Licensed to the Apache Software Foundation (ASF) under one or more
     contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership. 
+    this work for additional information regarding copyright ownership.
     The ASF licenses this file to you under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with 
+    (the "License"); you may not use this file except in compliance with
     the License.  You may obtain a copy of the License at
 
       http://www.apache.org/licenses/LICENSE-2.0
 
-    Unless required by applicable law or agreed to in writing, software 
-    distributed under the License is distributed on an "AS IS" BASIS, 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and 
+    See the License for the specific language governing permissions and
     limitations under the License.
 -->
 
@@ -31,26 +31,30 @@ DataMap can be created using following DDL
     SELECT statement
 ```
 
-Currently, there are 5 DataMap implementation in CarbonData.
+Currently, there are 5 DataMap implementations in CarbonData.
 
 | DataMap Provider | Description                              | DMPROPERTIES                             | Management       |
 | ---------------- | ---------------------------------------- | ---------------------------------------- | ---------------- |
 | preaggregate     | single table pre-aggregate table         | No DMPROPERTY is required                | Automatic        |
-| timeseries       | time dimension rollup table.             | event_time, xx_granularity, please refer to [Timeseries DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/timeseries-datamap-guide.md) | Automatic        |
-| mv               | multi-table pre-aggregate table,         | No DMPROPERTY is required                | Manual           |
-| lucene           | lucene indexing for text column          | index_columns to specifying the index columns | Manual/Automatic |
-| bloomfilter      | bloom filter for high cardinality column, geospatial column | index_columns to specifying the index columns | Manual/Automatic |
+| timeseries       | time dimension rollup table              | event_time, xx_granularity, please refer to [Timeseries DataMap](https://github.com/apache/carbondata/blob/master/docs/datamap/timeseries-datamap-guide.md) | Automatic        |
+| mv               | multi-table pre-aggregate table          | No DMPROPERTY is required                | Manual           |
+| lucene           | lucene indexing for text column          | index_columns to specifying the index columns | Automatic |
+| bloomfilter      | bloom filter for high cardinality column, geospatial column | index_columns to specifying the index columns | Automatic |
 
 ## DataMap Management
 
 There are two kinds of management semantic for DataMap.
 
-1. Automatic Refresh: Create datamap without `WITH DEFERED REBUILD` in the statement, which is by default.
-2. Manual Refresh: Create datamap with `WITH DEFERED REBUILD` in the statement
+1. Automatic Refresh: Create datamap without `WITH DEFERRED REBUILD` in the statement, which is by default.
+2. Manual Refresh: Create datamap with `WITH DEFERRED REBUILD` in the statement
+
+**CAUTION:**
+Manual refresh currently only works fine for MV, it has some bugs with other types of datamap in Carbondata 1.4.1, so we block this option for them in this version.
+If user create MV datamap without specifying `WITH DEFERRED REBUILD`, carbondata will give a warning and treat the datamap as deferred rebuild.
 
 ### Automatic Refresh
 
-When user creates a datamap on the main table without using `WITH DEFERED REBUILD` syntax, the datamap will be managed by system automatically.
+When user creates a datamap on the main table without using `WITH DEFERRED REBUILD` syntax, the datamap will be managed by system automatically.
 For every data load to the main table, system will immediately triger a load to the datamap automatically. These two data loading (to main table and datamap) is executed in a transactional manner, meaning that it will be either both success or neither success. 
 
 The data loading to datamap is incremental based on Segment concept, avoiding a expesive total rebuild.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index 54cad00..e7bd366 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -132,7 +132,34 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     sql("drop datamap dm on table datamap_test")
   }
 
-  test("test lucene rebuild data map") {
+  // for CARBONDATA-2820, we will first block deferred rebuild for lucene
+  test("test block rebuild for lucene") {
+    val deferredRebuildException = intercept[MalformedDataMapCommandException] {
+      sql(
+        s"""
+           | CREATE DATAMAP dm ON TABLE datamap_test
+           | USING 'lucene'
+           | WITH DEFERRED REBUILD
+           | DMProperties('INDEX_COLUMNS'='city')
+      """.stripMargin)
+    }
+    assert(deferredRebuildException.getMessage.contains(
+      s"DEFERRED REBUILD is not supported on this datamap dm with provider lucene"))
+
+    sql(
+      s"""
+         | CREATE DATAMAP dm ON TABLE datamap_test
+         | USING 'lucene'
+         | DMProperties('INDEX_COLUMNS'='city')
+      """.stripMargin)
+    val exception = intercept[MalformedDataMapCommandException] {
+      sql(s"REBUILD DATAMAP dm ON TABLE datamap_test")
+    }
+    sql("drop datamap dm on table datamap_test")
+    assert(exception.getMessage.contains("Non-lazy datamap dm does not support rebuild"))
+  }
+
+  ignore("test lucene rebuild data map") {
     sql("DROP TABLE IF EXISTS datamap_test4")
     sql(
       """
@@ -658,7 +685,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     assert(ex6.getMessage.contains("Delete operation is not supported"))
   }
 
-  test("test lucene fine grain multiple data map on table") {
+  ignore("test lucene fine grain multiple data map on table") {
     sql("DROP TABLE IF EXISTS datamap_test5")
     sql(
       """
@@ -691,7 +718,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     sql("DROP TABLE IF EXISTS datamap_test5")
   }
 
-  test("test lucene fine grain datamap rebuild") {
+  ignore("test lucene fine grain datamap rebuild") {
     sql("DROP TABLE IF EXISTS datamap_test5")
     sql(
       """

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
index e6a2a36..0ac6e72 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapWithSearchModeSuite.scala
@@ -133,7 +133,7 @@ class LuceneFineGrainDataMapWithSearchModeSuite extends QueryTest with BeforeAnd
     sql("DROP TABLE IF EXISTS datamap_test_table")
   }
 
-  test("test lucene fine grain datamap rebuild") {
+  ignore("test lucene fine grain datamap rebuild") {
     sql("DROP TABLE IF EXISTS datamap_test5")
     sql(
       """

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
index ddfb9e7..f0c335d 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggCreateCommand.scala
@@ -437,16 +437,6 @@ class TestPreAggCreateCommand extends QueryTest with BeforeAndAfterAll {
     }
   }
 
-  test("test pre agg datamap with deferred rebuild") {
-    val e = intercept[MalformedDataMapCommandException] {
-      sql("create datamap failure on table PreAggMain1 " +
-          "using 'preaggregate' " +
-          "with deferred rebuild " +
-          "as select a as a1,sum(b) as sum from PreAggMain1 group by a")
-    }
-    assert(e.getMessage.contains("DEFERRED REBUILD is not supported on this DataMap"))
-  }
-
   // TODO: Need to Fix
   ignore("test creation of multiple preaggregate of same name concurrently") {
     sql("DROP TABLE IF EXISTS tbl_concurr")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
index 55994e8..818dd7c 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/preaggregate/TestPreAggregateLoad.scala
@@ -964,4 +964,40 @@ test("check load and select for avg double datatype") {
       .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
   }
 
+  test("test deferred rebuild is not supported for preagg") {
+    val baseTable = "maintable"
+    val preagg = "preaggtable"
+    sql(s"DROP TABLE IF EXISTS $baseTable")
+    sql(
+      s"""
+        | CREATE TABLE $baseTable(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format'
+      """.stripMargin)
+    val deferredRebuildException = intercept[MalformedDataMapCommandException] {
+      sql(
+        s"""
+           | CREATE DATAMAP $preagg ON TABLE $baseTable
+           | USING 'preaggregate'
+           | WITH DEFERRED REBUILD
+           | AS select id, sum(age) from $baseTable group by id
+       """.stripMargin)
+    }
+    assert(deferredRebuildException.getMessage.contains(
+      s"DEFERRED REBUILD is not supported on this datamap $preagg with provider preaggregate"))
+
+    sql(
+      s"""
+         | CREATE DATAMAP $preagg ON TABLE $baseTable
+         | USING 'preaggregate'
+         | AS select id, sum(age) from $baseTable group by id
+       """.stripMargin)
+    sql(s"LOAD DATA LOCAL INPATH '$testData' into table $baseTable")
+    checkExistence(sql(s"SHOW DATAMAP ON TABLE $baseTable"), true, preagg, "preaggregate")
+    val exception = intercept[MalformedDataMapCommandException] {
+      sql(s"REBUILD DATAMAP $preagg ON TABLE $baseTable").show()
+    }
+    LOGGER.error(s"XU ${exception.getMessage}")
+    assert(exception.getMessage.contains(s"Non-lazy datamap $preagg does not support rebuild"))
+    sql(s"DROP TABLE IF EXISTS $baseTable")
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala
index afca3b2..8ebed1f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala
@@ -225,7 +225,6 @@ class TestDataMapCommand extends QueryTest with BeforeAndAfterAll {
     sql(
       s"""
          | create datamap $datamapName3 on table $tableName using 'bloomfilter'
-         | with deferred rebuild
          | DMPROPERTIES ('index_columns'='c')
        """.stripMargin)
     var result = sql(s"show datamap on table $tableName").cache()

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
index f1c9432..fec2279 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapStatus.scala
@@ -310,4 +310,6 @@ class TestDataMapFactory(
       }
     }
   }
+
+  override def supportRebuild(): Boolean = true
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
index cb5a1b1..487148d 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/IndexDataMapProvider.java
@@ -127,4 +127,9 @@ public class IndexDataMapProvider extends DataMapProvider {
   public DataMapFactory getDataMapFactory() {
     return dataMapFactory;
   }
+
+  @Override
+  public boolean supportRebuild() {
+    return dataMapFactory.supportRebuild();
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
index 099d65d..233c41f 100644
--- a/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
+++ b/integration/spark2/src/main/java/org/apache/carbondata/datamap/PreAggregateDataMapProvider.java
@@ -104,4 +104,9 @@ public class PreAggregateDataMapProvider extends DataMapProvider {
   public DataMapFactory getDataMapFactory() {
     throw new UnsupportedOperationException();
   }
+
+  @Override
+  public boolean supportRebuild() {
+    return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
index 1e4c2c3..17376a9 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonCreateDataMapCommand.scala
@@ -44,9 +44,10 @@ case class CarbonCreateDataMapCommand(
     dmProperties: Map[String, String],
     queryString: Option[String],
     ifNotExistsSet: Boolean = false,
-    deferredRebuild: Boolean = false)
+    var deferredRebuild: Boolean = false)
   extends AtomicRunnableCommand {
 
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
   private var dataMapProvider: DataMapProvider = _
   private var mainTable: CarbonTable = _
   private var dataMapSchema: DataMapSchema = _
@@ -89,6 +90,13 @@ case class CarbonCreateDataMapCommand(
 
     val property = dmProperties.map(x => (x._1.trim, x._2.trim)).asJava
     val javaMap = new java.util.HashMap[String, String](property)
+    // for MV, it is deferred rebuild by default and cannot be non-deferred rebuild
+    if (dataMapSchema.getProviderName.equalsIgnoreCase(DataMapClassProvider.MV.getShortName)) {
+      if (!deferredRebuild) {
+        LOGGER.warn(s"DEFERRED REBUILD is enabled by default for MV datamap $dataMapName")
+      }
+      deferredRebuild = true
+    }
     javaMap.put(DataMapProperty.DEFERRED_REBUILD, deferredRebuild.toString)
     dataMapSchema.setProperties(javaMap)
 
@@ -97,6 +105,12 @@ case class CarbonCreateDataMapCommand(
         "For this datamap, main table is required. Use `CREATE DATAMAP ... ON TABLE ...` ")
     }
     dataMapProvider = DataMapManager.get.getDataMapProvider(mainTable, dataMapSchema, sparkSession)
+    if (deferredRebuild && !dataMapProvider.supportRebuild()) {
+      throw new MalformedDataMapCommandException(
+        s"DEFERRED REBUILD is not supported on this datamap $dataMapName" +
+        s" with provider ${dataMapSchema.getProviderName}")
+    }
+
     val systemFolderLocation: String = CarbonProperties.getInstance().getSystemFolderLocation
     val operationContext: OperationContext = new OperationContext()
 
@@ -138,10 +152,6 @@ case class CarbonCreateDataMapCommand(
         dataMapProvider.initMeta(queryString.orNull)
         DataMapStatusManager.disableDataMap(dataMapName)
       case _ =>
-        if (deferredRebuild) {
-          throw new MalformedDataMapCommandException(
-            "DEFERRED REBUILD is not supported on this DataMap")
-        }
         dataMapProvider.initMeta(queryString.orNull)
     }
     val createDataMapPostExecutionEvent: CreateDataMapPostExecutionEvent =
@@ -149,7 +159,6 @@ case class CarbonCreateDataMapCommand(
         systemFolderLocation, tableIdentifier, dmProviderName)
     OperationListenerBus.getInstance().fireEvent(createDataMapPostExecutionEvent,
       operationContext)
-    val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
     LOGGER.audit(s"DataMap $dataMapName successfully added")
     Seq.empty
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
index f3db6ca..0c85fe1 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapRebuildCommand.scala
@@ -21,10 +21,10 @@ import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.execution.command.DataCommand
 
-import org.apache.carbondata.core.datamap.{DataMapRegistry, DataMapStoreManager}
+import org.apache.carbondata.common.exceptions.sql.MalformedDataMapCommandException
 import org.apache.carbondata.core.datamap.status.DataMapStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
-import org.apache.carbondata.datamap.{DataMapManager, IndexDataMapRebuildRDD}
+import org.apache.carbondata.datamap.DataMapManager
 import org.apache.carbondata.events.{UpdateDataMapPostExecutionEvent, _}
 
 /**
@@ -36,7 +36,19 @@ case class CarbonDataMapRebuildCommand(
     tableIdentifier: Option[TableIdentifier]) extends DataCommand {
 
   override def processData(sparkSession: SparkSession): Seq[Row] = {
-    val schema = DataMapStoreManager.getInstance().getDataMapSchema(dataMapName)
+    import scala.collection.JavaConverters._
+    val schemaOption = CarbonDataMapShowCommand(tableIdentifier).getAllDataMaps(sparkSession)
+      .asScala
+      .find(p => p.getDataMapName.equalsIgnoreCase(dataMapName))
+    if (schemaOption.isEmpty) {
+      throw new MalformedDataMapCommandException(
+        s"Datamap with name $dataMapName does not exist on table ${tableIdentifier.get.table}")
+    }
+    val schema = schemaOption.get
+    if (!schema.isLazy) {
+      throw new MalformedDataMapCommandException(
+        s"Non-lazy datamap $dataMapName does not support rebuild")
+    }
 
     val table = tableIdentifier match {
       case Some(identifier) =>
@@ -47,6 +59,7 @@ case class CarbonDataMapRebuildCommand(
           schema.getRelationIdentifier.getTableName
         )(sparkSession)
     }
+
     val provider = DataMapManager.get().getDataMapProvider(table, schema, sparkSession)
     provider.rebuild()
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala
index 3ee8e67..b583a30 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/datamap/CarbonDataMapShowCommand.scala
@@ -47,6 +47,13 @@ case class CarbonDataMapShowCommand(tableIdentifier: Option[TableIdentifier])
   }
 
   override def processData(sparkSession: SparkSession): Seq[Row] = {
+    convertToRow(getAllDataMaps(sparkSession))
+  }
+
+  /**
+   * get all datamaps for this table, including preagg, index datamaps and mv
+   */
+  def getAllDataMaps(sparkSession: SparkSession): util.List[DataMapSchema] = {
     val dataMapSchemaList: util.List[DataMapSchema] = new util.ArrayList[DataMapSchema]()
     tableIdentifier match {
       case Some(table) =>
@@ -59,10 +66,10 @@ case class CarbonDataMapShowCommand(tableIdentifier: Option[TableIdentifier])
         if (!indexSchemas.isEmpty) {
           dataMapSchemaList.addAll(indexSchemas)
         }
-        convertToRow(dataMapSchemaList)
       case _ =>
-        convertToRow(DataMapStoreManager.getInstance().getAllDataMapSchemas)
+        dataMapSchemaList.addAll(DataMapStoreManager.getInstance().getAllDataMapSchemas)
     }
+    dataMapSchemaList
   }
 
   private def convertToRow(schemaList: util.List[DataMapSchema]) = {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/abcd4f6e/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
index 1d57268..cca1b67 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
@@ -177,7 +177,39 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  test("test create bloom datamap and REBUILD DATAMAP") {
+  // for CARBONDATA-2820, we will first block deferred rebuild for bloom
+  test("test block deferred rebuild for bloom") {
+    sql(
+      s"""
+         | CREATE TABLE $bloomDMSampleTable(id INT, name STRING, city STRING, age INT,
+         | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
+         | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128')
+         |  """.stripMargin)
+    val deferredRebuildException = intercept[MalformedDataMapCommandException] {
+      sql(
+        s"""
+           | CREATE DATAMAP $dataMapName ON TABLE $bloomDMSampleTable
+           | USING 'bloomfilter'
+           | WITH DEFERRED REBUILD
+           | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
+      """.stripMargin)
+    }
+    assert(deferredRebuildException.getMessage.contains(
+      s"DEFERRED REBUILD is not supported on this datamap $dataMapName with provider bloomfilter"))
+
+    sql(
+      s"""
+         | CREATE DATAMAP $dataMapName ON TABLE $bloomDMSampleTable
+         | USING 'bloomfilter'
+         | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
+      """.stripMargin)
+    val exception = intercept[MalformedDataMapCommandException] {
+      sql(s"REBUILD DATAMAP $dataMapName ON TABLE $bloomDMSampleTable")
+    }
+    assert(exception.getMessage.contains(s"Non-lazy datamap $dataMapName does not support rebuild"))
+  }
+
+  ignore("test create bloom datamap and REBUILD DATAMAP") {
     sql(
       s"""
          | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
@@ -219,7 +251,7 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  test("test create bloom datamap with DEFERRED REBUILD, query hit datamap") {
+  ignore("test create bloom datamap with DEFERRED REBUILD, query hit datamap") {
     sql(
       s"""
          | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
@@ -297,7 +329,7 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
     sql(s"DROP TABLE IF EXISTS $bloomDMSampleTable")
   }
 
-  test("test create bloom datamap with DEFERRED REBUILD, query not hit datamap") {
+  ignore("test create bloom datamap with DEFERRED REBUILD, query not hit datamap") {
     sql(
       s"""
          | CREATE TABLE $normalTable(id INT, name STRING, city STRING, age INT,
@@ -466,7 +498,7 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
     sql(
       s"""
          | CREATE DATAMAP $dataMapName ON TABLE $normalTable
-         | USING 'bloomfilter' WITH DEFERRED REBUILD
+         | USING 'bloomfilter'
          | DMProperties( 'INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
       """.stripMargin)
     val exception: MalformedCarbonCommandException = intercept[MalformedCarbonCommandException] {
@@ -487,7 +519,6 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
       s"""
          | CREATE DATAMAP $dataMapName ON TABLE $normalTable
          | USING 'bloomfilter'
-         | WITH DEFERRED REBUILD
          | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
       """.stripMargin)
     val exception: MalformedCarbonCommandException = intercept[MalformedCarbonCommandException] {
@@ -544,7 +575,6 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
         s"""
            | CREATE DATAMAP $dataMapName ON TABLE $normalTable
            | USING 'bloomfilter'
-           | WITH DEFERRED REBUILD
            | DMProperties('INDEX_COLUMNS'='city,id', 'BLOOM_SIZE'='640000')
            | """.stripMargin)
     }


[39/50] [abbrv] carbondata git commit: [Documentation] Editorial review comment fixed

Posted by ja...@apache.org.
[Documentation] Editorial review comment fixed

Minor issues fixed (spelling, syntax, and missing info)

This closes #2603


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/12725b75
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/12725b75
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/12725b75

Branch: refs/heads/external-format
Commit: 12725b75c7133971cc8a29d343def55ebd273c85
Parents: 9336924
Author: sgururajshetty <sg...@gmail.com>
Authored: Thu Aug 2 19:57:31 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Fri Aug 3 18:50:23 2018 +0530

----------------------------------------------------------------------
 docs/configuration-parameters.md          |  2 +-
 docs/data-management-on-carbondata.md     | 39 ++++++++++++++------------
 docs/datamap/bloomfilter-datamap-guide.md | 12 ++++----
 docs/datamap/lucene-datamap-guide.md      |  2 +-
 docs/datamap/timeseries-datamap-guide.md  |  2 +-
 docs/sdk-guide.md                         |  8 +++---
 6 files changed, 34 insertions(+), 31 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index 6e4dea5..77cf230 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -140,7 +140,7 @@ This section provides the details of all the configurations required for CarbonD
 | carbon.enableMinMax | true | Min max is feature added to enhance query performance. To disable this feature, set it false. |
 | carbon.dynamicallocation.schedulertimeout | 5 | Specifies the maximum time (unit in seconds) the scheduler can wait for executor to be active. Minimum value is 5 sec and maximum value is 15 sec. |
 | carbon.scheduler.minregisteredresourcesratio | 0.8 | Specifies the minimum resource (executor) ratio needed for starting the block distribution. The default value is 0.8, which indicates 80% of the requested resource is allocated for starting block distribution.  The minimum value is 0.1 min and the maximum value is 1.0. | 
-| carbon.search.enabled | false | If set to true, it will use CarbonReader to do distributed scan directly instead of using compute framework like spark, thus avoiding limitation of compute framework like SQL optimizer and task scheduling overhead. |
+| carbon.search.enabled (Alpha Feature) | false | If set to true, it will use CarbonReader to do distributed scan directly instead of using compute framework like spark, thus avoiding limitation of compute framework like SQL optimizer and task scheduling overhead. |
 
 * **Global Dictionary Configurations**
   

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 836fff9..41fd513 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -87,6 +87,25 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
      * BATCH_SORT: It increases the load performance but decreases the query performance if identified blocks > parallelism.
      * GLOBAL_SORT: It increases the query performance, especially high concurrent point query.
        And if you care about loading resources isolation strictly, because the system uses the spark GroupBy to sort data, the resource can be controlled by spark. 
+	 
+	### Example:
+
+   ```
+    CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
+                                   productNumber INT,
+                                   productName STRING,
+                                   storeCity STRING,
+                                   storeProvince STRING,
+                                   productCategory STRING,
+                                   productBatch STRING,
+                                   saleQuantity INT,
+                                   revenue INT)
+    STORED BY 'carbondata'
+    TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
+                   'SORT_SCOPE'='NO_SORT')
+   ```
+   
+   **NOTE:** CarbonData also supports "using carbondata". Find example code at [SparkSessionExample](https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala) in the CarbonData repo.
  
    - **Table Block Size Configuration**
 
@@ -170,23 +189,6 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
      TBLPROPERTIES('LOCAL_DICTIONARY_ENABLE'='true','LOCAL_DICTIONARY_THRESHOLD'='1000',
      'LOCAL_DICTIONARY_INCLUDE'='column1','LOCAL_DICTIONARY_EXCLUDE'='column2')
    ```
-### Example:
-
-   ```
-    CREATE TABLE IF NOT EXISTS productSchema.productSalesTable (
-                                   productNumber INT,
-                                   productName STRING,
-                                   storeCity STRING,
-                                   storeProvince STRING,
-                                   productCategory STRING,
-                                   productBatch STRING,
-                                   saleQuantity INT,
-                                   revenue INT)
-    STORED BY 'carbondata'
-    TBLPROPERTIES ('SORT_COLUMNS'='productName,storeCity',
-                   'SORT_SCOPE'='NO_SORT')
-   ```
-  **NOTE:** CarbonData also supports "using carbondata". Find example code at [SparkSessionExample](https://github.com/apache/carbondata/blob/master/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala) in the CarbonData repo.
    
    - **Caching Min/Max Value for Required Columns**
      By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. 
@@ -210,7 +212,7 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
 	 COLUMN_META_CACHE=’col1,col2,col3,…’
 	 ```
 	 
-	 Columns to be cached can be specifies either while creating tale or after creation of the table.
+	 Columns to be cached can be specified either while creating table or after creation of the table.
 	 During create table operation; specify the columns to be cached in table properties.
 	 
 	 Syntax:
@@ -574,6 +576,7 @@ Users can specify which columns to include and exclude for local dictionary gene
   ```
   REFRESH TABLE dbcarbon.productSalesTable
   ```
+  
   **NOTE:** 
   * The new database name and the old database name should be same.
   * Before executing this command the old table schema and data should be copied into the new database location.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/datamap/bloomfilter-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/bloomfilter-datamap-guide.md b/docs/datamap/bloomfilter-datamap-guide.md
index 8955cde..ccbcabe 100644
--- a/docs/datamap/bloomfilter-datamap-guide.md
+++ b/docs/datamap/bloomfilter-datamap-guide.md
@@ -1,4 +1,4 @@
-# CarbonData BloomFilter DataMap (Alpha feature in 1.4.0)
+# CarbonData BloomFilter DataMap (Alpha Feature)
 
 * [DataMap Management](#datamap-management)
 * [BloomFilter Datamap Introduction](#bloomfilter-datamap-introduction)
@@ -41,10 +41,10 @@ Disable Datamap
 
 ## BloomFilter DataMap Introduction
 A Bloom filter is a space-efficient probabilistic data structure that is used to test whether an element is a member of a set.
-Carbondata introduce BloomFilter as an index datamap to enhance the performance of querying with precise value.
+Carbondata introduced BloomFilter as an index datamap to enhance the performance of querying with precise value.
 It is well suitable for queries that do precise match on high cardinality columns(such as Name/ID).
 Internally, CarbonData maintains a BloomFilter per blocklet for each index column to indicate that whether a value of the column is in this blocklet.
-Just like the other datamaps, BloomFilter datamap is managed ablong with main tables by CarbonData.
+Just like the other datamaps, BloomFilter datamap is managed along with main tables by CarbonData.
 User can create BloomFilter datamap on specified columns with specified BloomFilter configurations such as size and probability.
 
 For instance, main table called **datamap_test** which is defined as:
@@ -83,9 +83,9 @@ User can create BloomFilter datamap using the Create DataMap DDL:
 
 | Property | Is Required | Default Value | Description |
 |-------------|----------|--------|---------|
-| INDEX_COLUMNS | YES |  | Carbondata will generate BloomFilter index on these columns. Queries on there columns are usually like 'COL = VAL'. |
-| BLOOM_SIZE | NO | 640000 | This value is internally used by BloomFilter as the number of expected insertions, it will affects the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the default value is the approximate distinct index values in a blocklet assuming that each blocklet contains 20 pages and each page contains 32000 records. The value should be an integer. |
-| BLOOM_FPP | NO | 0.00001 | This value is internally used by BloomFilter as the False-Positive Probability, it will affects the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in range (0, 1). In one test scenario, a 96GB TPCH customer table with bloom_size=320000 and bloom_fpp=0.00001 will result in 18 false positive samples. |
+| INDEX_COLUMNS | YES |  | Carbondata will generate BloomFilter index on these columns. Queries on these columns are usually like 'COL = VAL'. |
+| BLOOM_SIZE | NO | 640000 | This value is internally used by BloomFilter as the number of expected insertions, it will affect the size of BloomFilter index. Since each blocklet has a BloomFilter here, so the default value is the approximate distinct index values in a blocklet assuming that each blocklet contains 20 pages and each page contains 32000 records. The value should be an integer. |
+| BLOOM_FPP | NO | 0.00001 | This value is internally used by BloomFilter as the False-Positive Probability, it will affect the size of bloomfilter index as well as the number of hash functions for the BloomFilter. The value should be in the range (0, 1). In one test scenario, a 96GB TPCH customer table with bloom_size=320000 and bloom_fpp=0.00001 will result in 18 false positive samples. |
 | BLOOM_COMPRESS | NO | true | Whether to compress the BloomFilter index files. |
 
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/datamap/lucene-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/lucene-datamap-guide.md b/docs/datamap/lucene-datamap-guide.md
index 5f7a2e4..119b609 100644
--- a/docs/datamap/lucene-datamap-guide.md
+++ b/docs/datamap/lucene-datamap-guide.md
@@ -1,4 +1,4 @@
-# CarbonData Lucene DataMap (Alpha feature in 1.4.0)
+# CarbonData Lucene DataMap (Alpha Feature)
   
 * [DataMap Management](#datamap-management)
 * [Lucene Datamap](#lucene-datamap-introduction)

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/datamap/timeseries-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/timeseries-datamap-guide.md b/docs/datamap/timeseries-datamap-guide.md
index bea5286..15ca3fc 100644
--- a/docs/datamap/timeseries-datamap-guide.md
+++ b/docs/datamap/timeseries-datamap-guide.md
@@ -4,7 +4,7 @@
 * [Compaction](#compacting-pre-aggregate-tables)
 * [Data Management](#data-management-with-pre-aggregate-tables)
 
-## Timeseries DataMap Introduction (Alpha feature in 1.3.0)
+## Timeseries DataMap Introduction (Alpha Feature)
 Timeseries DataMap a pre-aggregate table implementation based on 'pre-aggregate' DataMap.
 Difference is that Timeseries DataMap has built-in understanding of time hierarchy and
 levels: year, month, day, hour, minute, so that it supports automatic roll-up in time dimension 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/12725b75/docs/sdk-guide.md
----------------------------------------------------------------------
diff --git a/docs/sdk-guide.md b/docs/sdk-guide.md
index 562269e..c7bff59 100644
--- a/docs/sdk-guide.md
+++ b/docs/sdk-guide.md
@@ -130,7 +130,7 @@ public class TestSdkJson {
        testJsonSdkWriter();
    }
    
-   public void testJsonSdkWriter() throws InvalidLoadOptionException {
+   public static void testJsonSdkWriter() throws InvalidLoadOptionException {
     String path = "./target/testJsonSdkWriter";
 
     Field[] fields = new Field[2];
@@ -297,7 +297,7 @@ public CarbonWriterBuilder persistSchemaFile(boolean persist);
 *               by default it is system time in nano seconds.
 * @return updated CarbonWriterBuilder
 */
-public CarbonWriterBuilder taskNo(String taskNo);
+public CarbonWriterBuilder taskNo(long taskNo);
 ```
 
 ```
@@ -340,7 +340,7 @@ public CarbonWriterBuilder withLoadOptions(Map<String, String> options);
 * @throws IOException
 * @throws InvalidLoadOptionException
 */
-public CarbonWriter buildWriterForCSVInput() throws IOException, InvalidLoadOptionException;
+public CarbonWriter buildWriterForCSVInput(org.apache.carbondata.sdk.file.Schema schema) throws IOException, InvalidLoadOptionException;
 ```
 
 ```  
@@ -351,7 +351,7 @@ public CarbonWriter buildWriterForCSVInput() throws IOException, InvalidLoadOpti
 * @throws IOException
 * @throws InvalidLoadOptionException
 */
-public CarbonWriter buildWriterForAvroInput() throws IOException, InvalidLoadOptionException;
+public CarbonWriter buildWriterForAvroInput(org.apache.avro.Schema schema) throws IOException, InvalidLoadOptionException;
 ```
 
 ```


[43/50] [abbrv] carbondata git commit: [CARBONDATA-2829][CARBONDATA-2832] Fix creating merge index on older V1 V2 store

Posted by ja...@apache.org.
[CARBONDATA-2829][CARBONDATA-2832] Fix creating merge index on older V1 V2 store

Block merge index creation for the old store V1 V2 versions

This closes #2608


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b702a1b0
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b702a1b0
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b702a1b0

Branch: refs/heads/external-format
Commit: b702a1b01414308de710c1d1471a064184843c37
Parents: 40571b8
Author: dhatchayani <dh...@gmail.com>
Authored: Mon Aug 6 12:15:26 2018 +0530
Committer: manishgupta88 <to...@gmail.com>
Committed: Tue Aug 7 14:10:44 2018 +0530

----------------------------------------------------------------------
 .../management/CarbonAlterTableCompactionCommand.scala    | 10 ++++++++++
 1 file changed, 10 insertions(+)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b702a1b0/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
index a4adbbb..e0b0547 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAlterTableCompactionCommand.scala
@@ -37,6 +37,7 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.exception.ConcurrentOperationException
 import org.apache.carbondata.core.locks.{CarbonLockFactory, LockUsage}
+import org.apache.carbondata.core.metadata.ColumnarFormatVersion
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
 import org.apache.carbondata.core.mutate.CarbonUpdateUtil
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager
@@ -122,6 +123,15 @@ case class CarbonAlterTableCompactionCommand(
           "Unsupported alter operation on carbon table: Merge index is not supported on streaming" +
           " table")
       }
+      val version = CarbonUtil.getFormatVersion(table)
+      val isOlderVersion = version == ColumnarFormatVersion.V1 ||
+                           version == ColumnarFormatVersion.V2
+      if (isOlderVersion) {
+        throw new MalformedCarbonCommandException(
+          "Unsupported alter operation on carbon table: Merge index is not supported on V1 V2 " +
+          "store segments")
+      }
+
       val alterTableMergeIndexEvent: AlterTableMergeIndexEvent =
         AlterTableMergeIndexEvent(sparkSession, table, alterTableModel)
       OperationListenerBus.getInstance


[22/50] [abbrv] carbondata git commit: Problem: Insert into select is failing as both are running as single task, both are sharing the same taskcontext and resources are cleared once if any one of the RDD(Select query's ScanRDD) is completed, so the othe

Posted by ja...@apache.org.
Problem:
Insert into select is failing as both are running as single task, both are sharing the same taskcontext and resources are cleared once if any one of the RDD(Select query's ScanRDD) is completed, so the other RDD(LoadRDD) running is crashing as it is trying to access the cleared memory.

Solution:
Check if any other RDD is sharing the same task context. If so, don't the clear the resource at that time, the other RDD which shared the context should clear the memory once after the task is finished.

This closes #2591


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/de924606
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/de924606
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/de924606

Branch: refs/heads/external-format
Commit: de92460665bafb403a4b90b513a9136b6f1fb34c
Parents: 3816e90
Author: dhatchayani <dh...@gmail.com>
Authored: Tue Jul 31 23:11:49 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Wed Aug 1 19:10:56 2018 +0530

----------------------------------------------------------------------
 .../executor/impl/AbstractQueryExecutor.java    | 11 ++-
 .../carbondata/core/scan/model/QueryModel.java  | 11 +++
 .../carbondata/spark/rdd/CarbonScanRDD.scala    | 64 +++++--------
 .../rdd/InsertTaskCompletionListener.scala      | 33 +++++++
 .../spark/rdd/NewCarbonDataLoadRDD.scala        |  3 +-
 .../spark/rdd/QueryTaskCompletionListener.scala | 94 ++++++++++++++++++++
 6 files changed, 168 insertions(+), 48 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index 5b67921..f87e46e 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -87,6 +87,9 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
    */
   protected QueryExecutorProperties queryProperties;
 
+  // whether to clear/free unsafe memory or not
+  private boolean freeUnsafeMemory;
+
   /**
    * query result iterator which will execute the query
    * and give the result
@@ -114,6 +117,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
         queryModel.getQueryId());
     LOGGER.info("Query will be executed on table: " + queryModel.getAbsoluteTableIdentifier()
         .getCarbonTableIdentifier().getTableName());
+    this.freeUnsafeMemory = queryModel.isFreeUnsafeMemory();
     // Initializing statistics list to record the query statistics
     // creating copy on write to handle concurrent scenario
     queryProperties.queryStatisticsRecorder = queryModel.getStatisticsRecorder();
@@ -641,8 +645,11 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
         exceptionOccurred = e;
       }
     }
-    // clear all the unsafe memory used for the given task ID
-    UnsafeMemoryManager.INSTANCE.freeMemoryAll(ThreadLocalTaskInfo.getCarbonTaskInfo().getTaskId());
+    // clear all the unsafe memory used for the given task ID only if it is neccessary to be cleared
+    if (freeUnsafeMemory) {
+      UnsafeMemoryManager.INSTANCE
+          .freeMemoryAll(ThreadLocalTaskInfo.getCarbonTaskInfo().getTaskId());
+    }
     if (null != queryProperties.executorService) {
       // In case of limit query when number of limit records is already found so executors
       // must stop all the running execution otherwise it will keep running and will hit

http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
index 55dafb9..31c7a86 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModel.java
@@ -114,6 +114,9 @@ public class QueryModel {
    */
   private boolean isFG;
 
+  // whether to clear/free unsafe memory or not
+  private boolean freeUnsafeMemory = true;
+
   private QueryModel(CarbonTable carbonTable) {
     tableBlockInfos = new ArrayList<TableBlockInfo>();
     invalidSegmentIds = new ArrayList<>();
@@ -390,4 +393,12 @@ public class QueryModel {
         projection.getDimensions().size() + projection.getMeasures().size(),
         filterExpressionResolverTree.getFilterExpression().toString());
   }
+
+  public boolean isFreeUnsafeMemory() {
+    return freeUnsafeMemory;
+  }
+
+  public void setFreeUnsafeMemory(boolean freeUnsafeMemory) {
+    this.freeUnsafeMemory = freeUnsafeMemory;
+  }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 67ea332..6b43999 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -38,6 +38,7 @@ import org.apache.spark.sql.SparkSession
 import org.apache.spark.sql.execution.SQLExecution
 import org.apache.spark.sql.profiler.{GetPartition, Profiler, QueryTaskEnd}
 import org.apache.spark.sql.util.SparkSQLUtil.sessionState
+import org.apache.spark.util.{CarbonReflectionUtils, TaskCompletionListener}
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonCommonConstantsInternal}
@@ -457,17 +458,29 @@ class CarbonScanRDD[T: ClassTag](
         }
       }
 
+      // create a statistics recorder
+      val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder(model.getQueryId())
+      model.setStatisticsRecorder(recorder)
+
+      // TODO: rewrite this logic to call free memory in FailureListener on failures. On success,
+      // no memory leak should be there, resources should be freed on success completion.
+      val listeners = CarbonReflectionUtils.getField("onCompleteCallbacks", context)
+        .asInstanceOf[ArrayBuffer[TaskCompletionListener]]
+      val isAdded = listeners.exists(p => p.isInstanceOf[InsertTaskCompletionListener])
+      model.setFreeUnsafeMemory(!isAdded)
       // add task completion before calling initialize as initialize method will internally call
       // for usage of unsafe method for processing of one blocklet and if there is any exception
       // while doing that the unsafe memory occupied for that task will not get cleared
-      context.addTaskCompletionListener { _ =>
-        closeReader.apply()
-        close()
-        logStatistics(executionId, taskId, queryStartTime, model.getStatisticsRecorder, split)
+      context.addTaskCompletionListener { new QueryTaskCompletionListener(!isAdded,
+        reader,
+        inputMetricsStats,
+        executionId,
+        taskId,
+        queryStartTime,
+        model.getStatisticsRecorder,
+        split,
+        queryId)
       }
-      // create a statistics recorder
-      val recorder = CarbonTimeStatisticsFactory.createExecutorRecorder(model.getQueryId())
-      model.setStatisticsRecorder(recorder)
       // initialize the reader
       reader.initialize(inputSplit, attemptContext)
 
@@ -625,43 +638,6 @@ class CarbonScanRDD[T: ClassTag](
     format
   }
 
-  def logStatistics(
-      executionId: String,
-      taskId: Long,
-      queryStartTime: Long,
-      recorder: QueryStatisticsRecorder,
-      split: Partition
-  ): Unit = {
-    if (null != recorder) {
-      val queryStatistic = new QueryStatistic()
-      queryStatistic.addFixedTimeStatistic(QueryStatisticsConstants.EXECUTOR_PART,
-        System.currentTimeMillis - queryStartTime)
-      recorder.recordStatistics(queryStatistic)
-      // print executor query statistics for each task_id
-      val statistics = recorder.statisticsForTask(taskId, queryStartTime)
-      if (statistics != null && executionId != null) {
-        Profiler.invokeIfEnable {
-          val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
-          inputSplit.calculateLength()
-          val size = inputSplit.getLength
-          val files = inputSplit.getAllSplits.asScala.map { s =>
-            s.getSegmentId + "/" + s.getPath.getName
-          }.toArray[String]
-          Profiler.send(
-            QueryTaskEnd(
-              executionId.toLong,
-              queryId,
-              statistics.getValues,
-              size,
-              files
-            )
-          )
-        }
-      }
-      recorder.logStatisticsForTask(statistics)
-    }
-  }
-
   /**
    * This method will check and remove InExpression from filterExpression to prevent the List
    * Expression values from serializing and deserializing on executor

http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/InsertTaskCompletionListener.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/InsertTaskCompletionListener.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/InsertTaskCompletionListener.scala
new file mode 100644
index 0000000..9439ae5
--- /dev/null
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/InsertTaskCompletionListener.scala
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.rdd
+
+import org.apache.spark.TaskContext
+import org.apache.spark.util.TaskCompletionListener
+
+import org.apache.carbondata.core.util.ThreadLocalTaskInfo
+import org.apache.carbondata.processing.loading.DataLoadExecutor
+import org.apache.carbondata.spark.util.CommonUtil
+
+class InsertTaskCompletionListener(dataLoadExecutor: DataLoadExecutor)
+  extends TaskCompletionListener {
+  override def onTaskCompletion(context: TaskContext): Unit = {
+    dataLoadExecutor.close()
+    CommonUtil.clearUnsafeMemory(ThreadLocalTaskInfo.getCarbonTaskInfo.getTaskId)
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 6b136bc..3848bad 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -370,8 +370,7 @@ class NewDataFrameLoaderRDD[K, V](
         loader.initialize()
         val executor = new DataLoadExecutor
         // in case of success, failure or cancelation clear memory and stop execution
-        context.addTaskCompletionListener { context => executor.close()
-          CommonUtil.clearUnsafeMemory(ThreadLocalTaskInfo.getCarbonTaskInfo.getTaskId)}
+        context.addTaskCompletionListener (new InsertTaskCompletionListener(executor))
         executor.execute(model, loader.storeLocation, recordReaders.toArray)
       } catch {
         case e: NoRetryException =>

http://git-wip-us.apache.org/repos/asf/carbondata/blob/de924606/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/QueryTaskCompletionListener.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/QueryTaskCompletionListener.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/QueryTaskCompletionListener.scala
new file mode 100644
index 0000000..e4cb3f8
--- /dev/null
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/QueryTaskCompletionListener.scala
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.rdd
+
+import scala.collection.JavaConverters._
+
+import org.apache.hadoop.mapreduce.RecordReader
+import org.apache.spark.{Partition, TaskContext}
+import org.apache.spark.sql.profiler.{Profiler, QueryTaskEnd}
+import org.apache.spark.util.TaskCompletionListener
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.memory.UnsafeMemoryManager
+import org.apache.carbondata.core.stats.{QueryStatistic, QueryStatisticsConstants, QueryStatisticsRecorder}
+import org.apache.carbondata.core.util.{TaskMetricsMap, ThreadLocalTaskInfo}
+import org.apache.carbondata.spark.InitInputMetrics
+
+class QueryTaskCompletionListener(freeMemory: Boolean,
+    var reader: RecordReader[Void, Object],
+    inputMetricsStats: InitInputMetrics, executionId: String, taskId: Int, queryStartTime: Long,
+    queryStatisticsRecorder: QueryStatisticsRecorder, split: Partition, queryId: String)
+  extends TaskCompletionListener {
+  override def onTaskCompletion(context: TaskContext): Unit = {
+    if (reader != null) {
+      try {
+        reader.close()
+      } catch {
+        case e: Exception =>
+          LogServiceFactory.getLogService(this.getClass.getCanonicalName).error(e)
+      }
+      reader = null
+    }
+    TaskMetricsMap.getInstance().updateReadBytes(Thread.currentThread().getId)
+    inputMetricsStats.updateAndClose()
+    logStatistics(executionId, taskId, queryStartTime, queryStatisticsRecorder, split)
+    if (freeMemory) {
+      UnsafeMemoryManager.INSTANCE
+        .freeMemoryAll(ThreadLocalTaskInfo.getCarbonTaskInfo.getTaskId)
+    }
+  }
+
+  def logStatistics(
+      executionId: String,
+      taskId: Long,
+      queryStartTime: Long,
+      recorder: QueryStatisticsRecorder,
+      split: Partition
+  ): Unit = {
+    if (null != recorder) {
+      val queryStatistic = new QueryStatistic()
+      queryStatistic.addFixedTimeStatistic(QueryStatisticsConstants.EXECUTOR_PART,
+        System.currentTimeMillis - queryStartTime)
+      recorder.recordStatistics(queryStatistic)
+      // print executor query statistics for each task_id
+      val statistics = recorder.statisticsForTask(taskId, queryStartTime)
+      if (statistics != null && executionId != null) {
+        Profiler.invokeIfEnable {
+          val inputSplit = split.asInstanceOf[CarbonSparkPartition].split.value
+          inputSplit.calculateLength()
+          val size = inputSplit.getLength
+          val files = inputSplit.getAllSplits.asScala.map { s =>
+            s.getSegmentId + "/" + s.getPath.getName
+          }.toArray[String]
+          Profiler.send(
+            QueryTaskEnd(
+              executionId.toLong,
+              queryId,
+              statistics.getValues,
+              size,
+              files
+            )
+          )
+        }
+      }
+      recorder.logStatisticsForTask(statistics)
+    }
+  }
+}
+


[16/50] [abbrv] carbondata git commit: [HotFix][CARBONDATA-2788][BloomDataMap] Fix bugs in incorrect query result with bloom datamap

Posted by ja...@apache.org.
[HotFix][CARBONDATA-2788][BloomDataMap] Fix bugs in incorrect query result with bloom datamap

This PR solve two problems which will affect the correctness of the query on bloom.

Revert PR2539
After review the code, we found that modification in PR2539 is not needed, so we revert that PR.

Bugs in overflow for blocklet count
Carbondata stores blocklet count for each block in byte data type, when a block contains more than 128 blocklets, it will overflow the byte
limits. Here we change the data type to short.

For cache_leve=block, after pruning by main BlockDataMap, the blockletNo in Blocklet is -1, which indicate that the following percedure will scan
the whole block -- all the blocklets in the block. So, when doing intersection with the pruned result from BloomDataMap, we
need to take care of these blocklets. In this implementation, we added the result from BloomDataMap based on the blocklet's existence in BlockDataMap.

This closes #2565


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/1cea4d33
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/1cea4d33
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/1cea4d33

Branch: refs/heads/external-format
Commit: 1cea4d33ff9096fab5d38a1403e1e78c2fa2d6dc
Parents: 34ca021
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Thu Jul 26 23:22:58 2018 +0800
Committer: manishgupta88 <to...@gmail.com>
Committed: Wed Aug 1 10:40:07 2018 +0530

----------------------------------------------------------------------
 .../indexstore/blockletindex/BlockDataMap.java  | 24 +++++++---
 .../blockletindex/BlockletDataMapFactory.java   |  2 +-
 .../hadoop/api/CarbonInputFormat.java           | 28 ++++++++++--
 .../lucene/LuceneFineGrainDataMapSuite.scala    | 14 +++---
 .../datamap/IndexDataMapRebuildRDD.scala        | 10 ++++-
 .../BloomCoarseGrainDataMapFunctionSuite.scala  | 46 +++++++++++++++++++-
 .../bloom/BloomCoarseGrainDataMapSuite.scala    |  2 +-
 7 files changed, 104 insertions(+), 22 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
index 82006c3..f4bb58e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
@@ -17,6 +17,7 @@
 package org.apache.carbondata.core.indexstore.blockletindex;
 
 import java.io.*;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.BitSet;
 import java.util.List;
@@ -58,7 +59,6 @@ import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataFileFooterConverter;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
-import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.fs.Path;
 
@@ -248,8 +248,8 @@ public class BlockDataMap extends CoarseGrainDataMap
     byte[][] blockMinValues = null;
     byte[][] blockMaxValues = null;
     DataMapRowImpl summaryRow = null;
-    List<Byte> blockletCountInEachBlock = new ArrayList<>(indexInfo.size());
-    byte totalBlockletsInOneBlock = 0;
+    List<Short> blockletCountInEachBlock = new ArrayList<>(indexInfo.size());
+    short totalBlockletsInOneBlock = 0;
     boolean isLastFileFooterEntryNeedToBeAdded = false;
     CarbonRowSchema[] schema = getFileFooterEntrySchema();
     for (DataFileFooter fileFooter : indexInfo) {
@@ -318,13 +318,22 @@ public class BlockDataMap extends CoarseGrainDataMap
               blockMinValues, blockMaxValues);
       blockletCountInEachBlock.add(totalBlockletsInOneBlock);
     }
-    byte[] blockletCount = ArrayUtils
-        .toPrimitive(blockletCountInEachBlock.toArray(new Byte[blockletCountInEachBlock.size()]));
+    byte[] blockletCount = convertRowCountFromShortToByteArray(blockletCountInEachBlock);
     // blocklet count index is the last index
     summaryRow.setByteArray(blockletCount, taskSummarySchema.length - 1);
     return summaryRow;
   }
 
+  private byte[] convertRowCountFromShortToByteArray(List<Short> blockletCountInEachBlock) {
+    int bufferSize = blockletCountInEachBlock.size() * 2;
+    ByteBuffer byteBuffer = ByteBuffer.allocate(bufferSize);
+    for (Short blockletCount : blockletCountInEachBlock) {
+      byteBuffer.putShort(blockletCount);
+    }
+    byteBuffer.rewind();
+    return byteBuffer.array();
+  }
+
   protected void setLocations(String[] locations, DataMapRow row, int ordinal)
       throws UnsupportedEncodingException {
     // Add location info
@@ -696,7 +705,7 @@ public class BlockDataMap extends CoarseGrainDataMap
       relativeBlockletId = (short) absoluteBlockletId;
     } else {
       int diff = absoluteBlockletId;
-      byte[] blockletRowCountForEachBlock = getBlockletRowCountForEachBlock();
+      ByteBuffer byteBuffer = ByteBuffer.wrap(getBlockletRowCountForEachBlock());
       // Example: absoluteBlockletID = 17, blockletRowCountForEachBlock = {4,3,2,5,7}
       // step1: diff = 17-4, diff = 13
       // step2: diff = 13-3, diff = 10
@@ -704,7 +713,8 @@ public class BlockDataMap extends CoarseGrainDataMap
       // step4: diff = 8-5, diff = 3
       // step5: diff = 3-7, diff = -4 (satisfies <= 0)
       // step6: relativeBlockletId = -4+7, relativeBlockletId = 3 (4th index starting from 0)
-      for (byte blockletCount : blockletRowCountForEachBlock) {
+      while (byteBuffer.hasRemaining()) {
+        short blockletCount = byteBuffer.getShort();
         diff = diff - blockletCount;
         if (diff < 0) {
           relativeBlockletId = (short) (diff + blockletCount);

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index 643cc45..4dd78ee 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -71,7 +71,7 @@ public class BlockletDataMapFactory extends CoarseGrainDataMapFactory
   /**
    * variable for cache level BLOCKLET
    */
-  private static final String CACHE_LEVEL_BLOCKLET = "BLOCKLET";
+  public static final String CACHE_LEVEL_BLOCKLET = "BLOCKLET";
 
   public static final DataMapSchema DATA_MAP_SCHEMA =
       new DataMapSchema(NAME, BlockletDataMapFactory.class.getName());

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
----------------------------------------------------------------------
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index 356dd5a..eeb3ae8 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -37,6 +37,7 @@ import org.apache.carbondata.core.datamap.dev.expr.DataMapWrapperSimpleInfo;
 import org.apache.carbondata.core.exception.InvalidConfigurationException;
 import org.apache.carbondata.core.indexstore.ExtendedBlocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
+import org.apache.carbondata.core.indexstore.blockletindex.BlockletDataMapFactory;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.ColumnarFormatVersion;
 import org.apache.carbondata.core.metadata.schema.PartitionInfo;
@@ -53,6 +54,7 @@ import org.apache.carbondata.core.scan.model.QueryModelBuilder;
 import org.apache.carbondata.core.stats.QueryStatistic;
 import org.apache.carbondata.core.stats.QueryStatisticsConstants;
 import org.apache.carbondata.core.stats.QueryStatisticsRecorder;
+import org.apache.carbondata.core.util.BlockletDataMapUtil;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonTimeStatisticsFactory;
 import org.apache.carbondata.core.util.CarbonUtil;
@@ -462,8 +464,7 @@ m filterExpression
       }
       // since index datamap prune in segment scope,
       // the result need to intersect with previous pruned result
-      prunedBlocklets = (List) CollectionUtils.intersection(
-          cgPrunedBlocklets, prunedBlocklets);
+      prunedBlocklets = intersectFilteredBlocklets(carbonTable, prunedBlocklets, cgPrunedBlocklets);
       ExplainCollector.recordCGDataMapPruning(
           DataMapWrapperSimpleInfo.fromDataMapWrapper(cgDataMapExprWrapper),
           prunedBlocklets.size());
@@ -482,8 +483,8 @@ m filterExpression
             resolver, segmentIds, fgDataMapExprWrapper, dataMapJob, partitionsToPrune);
         // note that the 'fgPrunedBlocklets' has extra datamap related info compared with
         // 'prunedBlocklets', so the intersection should keep the elements in 'fgPrunedBlocklets'
-        prunedBlocklets = (List) CollectionUtils.intersection(fgPrunedBlocklets,
-            prunedBlocklets);
+        prunedBlocklets = intersectFilteredBlocklets(carbonTable, prunedBlocklets,
+            fgPrunedBlocklets);
         ExplainCollector.recordFGDataMapPruning(
             DataMapWrapperSimpleInfo.fromDataMapWrapper(fgDataMapExprWrapper),
             prunedBlocklets.size());
@@ -492,6 +493,25 @@ m filterExpression
     return prunedBlocklets;
   }
 
+  private List<ExtendedBlocklet> intersectFilteredBlocklets(CarbonTable carbonTable,
+      List<ExtendedBlocklet> previousDataMapPrunedBlocklets,
+      List<ExtendedBlocklet> otherDataMapPrunedBlocklets) {
+    List<ExtendedBlocklet> prunedBlocklets = null;
+    if (BlockletDataMapUtil.isCacheLevelBlock(
+        carbonTable, BlockletDataMapFactory.CACHE_LEVEL_BLOCKLET)) {
+      prunedBlocklets = new ArrayList<>();
+      for (ExtendedBlocklet otherBlocklet : otherDataMapPrunedBlocklets) {
+        if (previousDataMapPrunedBlocklets.contains(otherBlocklet)) {
+          prunedBlocklets.add(otherBlocklet);
+        }
+      }
+    } else {
+      prunedBlocklets = (List) CollectionUtils
+          .intersection(otherDataMapPrunedBlocklets, previousDataMapPrunedBlocklets);
+    }
+    return prunedBlocklets;
+  }
+
   /**
    * Prune the segments from the already pruned blocklets.
    * @param segments

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
index a380f04..54cad00 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapSuite.scala
@@ -371,7 +371,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """
         | CREATE TABLE datamap_test_table(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='GLOBAL_SORT')
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='GLOBAL_SORT', 'CACHE_LEVEL'='BLOCKLET')
       """.stripMargin)
     sql(
       s"""
@@ -571,7 +571,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """
         | CREATE TABLE main(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name')
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'CACHE_LEVEL'='BLOCKLET')
       """.stripMargin)
     sql(
       s"""
@@ -664,7 +664,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """
         | CREATE TABLE datamap_test5(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT')
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT', 'CACHE_LEVEL'='BLOCKLET')
       """.stripMargin)
     sql(
       s"""
@@ -803,7 +803,7 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists table_stop")
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.CARBON_LUCENE_INDEX_STOP_WORDS, "false")
-    sql("create table table_stop(suggestion string,goal string) stored by 'carbondata'")
+    sql("create table table_stop(suggestion string,goal string) stored by 'carbondata' TBLPROPERTIES('CACHE_LEVEL'='BLOCKLET')")
     sql(
       "create datamap stop_dm on table table_stop using 'lucene' DMPROPERTIES('index_columns'='suggestion')")
     sql("insert into table_stop select 'The is the stop word','abcde'")
@@ -821,13 +821,15 @@ class LuceneFineGrainDataMapSuite extends QueryTest with BeforeAndAfterAll {
       """
         | CREATE TABLE datamap_test4(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT', 'autorefreshdatamap' = 'false')
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT',
+        | 'autorefreshdatamap' = 'false', 'CACHE_LEVEL'='BLOCKLET')
       """.stripMargin)
     sql(
       """
         | CREATE TABLE datamap_copy(id INT, name STRING, city STRING, age INT)
         | STORED BY 'carbondata'
-        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT', 'autorefreshdatamap' = 'false')
+        | TBLPROPERTIES('SORT_COLUMNS'='city,name', 'SORT_SCOPE'='LOCAL_SORT',
+        | 'autorefreshdatamap' = 'false', 'CACHE_LEVEL'='BLOCKLET')
       """.stripMargin)
     sql("insert into datamap_test4 select 1,'name','city',20")
     sql("insert into datamap_test4 select 2,'name1','city1',20")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
index e4d5b26..2d684bf 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
@@ -357,13 +357,21 @@ class IndexDataMapRebuildRDD[K, V](
         // skip clear datamap and we will do this adter rebuild
         reader.setSkipClearDataMapAtClose(true)
 
+        // Note that blockletId in rowWithPosition does not work properly,
+        // here we use another way to generate it.
+        var blockletId = 0
+        var firstRow = true
         while (reader.nextKeyValue()) {
           val rowWithPosition = reader.getCurrentValue
           val size = rowWithPosition.length
-          val blockletId = rowWithPosition(size - 3).asInstanceOf[Int]
           val pageId = rowWithPosition(size - 2).asInstanceOf[Int]
           val rowId = rowWithPosition(size - 1).asInstanceOf[Int]
 
+          if (!firstRow && pageId == 0 && rowId == 0) {
+            blockletId = blockletId + 1
+          } else {
+            firstRow = false
+          }
           refresher.addRow(blockletId, pageId, rowId, rowWithPosition)
         }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
index 92943b2..496a506 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
@@ -1,14 +1,15 @@
 package org.apache.carbondata.datamap.bloom
 
 import java.io.File
+import java.util.{Random, UUID}
 
 import org.apache.commons.io.FileUtils
-import org.apache.spark.sql.CarbonEnv
+import org.apache.spark.sql.{CarbonEnv, SaveMode}
 import org.apache.spark.sql.test.Spark2TestQueryExecutor
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
 
-import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonV3DataFormatConstants}
 import org.apache.carbondata.core.datamap.status.DataMapStatusManager
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonTablePath
@@ -790,6 +791,47 @@ class BloomCoarseGrainDataMapFunctionSuite  extends QueryTest with BeforeAndAfte
     assert(FileUtils.listFiles(FileUtils.getFile(datamapPath), Array("bloomindex"), true).asScala.nonEmpty)
   }
 
+  // two blocklets in one block are hit by bloom datamap while block cache level hit this block
+  test("CARBONDATA-2788: enable block cache level and bloom datamap") {
+    // minimum per page is 2000 rows
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE, "2000")
+    // minimum per blocklet is 16MB
+    CarbonProperties.getInstance().addProperty(CarbonV3DataFormatConstants.BLOCKLET_SIZE_IN_MB, "16")
+    // these lines will result in 3 blocklets in one block and bloom will hit at least 2 of them
+    val lines = 100000
+    sql("drop table if exists test_rcd").collect()
+    val r = new Random()
+    import sqlContext.implicits._
+    val df = sqlContext.sparkContext.parallelize(1 to lines)
+      .map(x => ("No." + r.nextInt(10000), "country" + x % 10000, "city" + x % 10000, x % 10000,
+        UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
+        UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
+        UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString,
+        UUID.randomUUID().toString, UUID.randomUUID().toString, UUID.randomUUID().toString))
+      .toDF("ID", "country", "city", "population",
+        "random1", "random2", "random3",
+        "random4", "random5", "random6",
+        "random7", "random8", "random9",
+        "random10", "random11", "random12")
+    df.write
+      .format("carbondata")
+      .option("tableName", "test_rcd")
+      .option("SORT_COLUMNS", "id")
+      .option("SORT_SCOPE", "LOCAL_SORT")
+      .mode(SaveMode.Overwrite)
+      .save()
+
+    val withoutBloom = sql("select count(*) from test_rcd where city = 'city40'").collect().toSeq
+    sql("CREATE DATAMAP dm_rcd ON TABLE test_rcd " +
+        "USING 'bloomfilter' DMPROPERTIES " +
+        "('INDEX_COLUMNS' = 'city', 'BLOOM_SIZE'='640000', 'BLOOM_FPP'='0.00001')")
+    checkAnswer(sql("select count(*) from test_rcd where city = 'city40'"), withoutBloom)
+
+    sql("drop table if exists test_rcd").collect()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE,
+      CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
+  }
+
   override def afterAll(): Unit = {
     deleteFile(bigFile)
     sql(s"DROP TABLE IF EXISTS $normalTable")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1cea4d33/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
index 7871518..1d57268 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapSuite.scala
@@ -563,7 +563,7 @@ class BloomCoarseGrainDataMapSuite extends QueryTest with BeforeAndAfterAll with
          | CREATE TABLE $bloomDMSampleTable(id INT, name STRING, city STRING, age INT,
          | s1 STRING, s2 STRING, s3 STRING, s4 STRING, s5 STRING, s6 STRING, s7 STRING, s8 STRING)
          | STORED BY 'carbondata' TBLPROPERTIES('table_blocksize'='128',
-         | 'DICTIONARY_INCLUDE'='s1,s2')
+         | 'DICTIONARY_INCLUDE'='s1,s2', 'CACHE_LEVEL'='BLOCKLET')
          |  """.stripMargin)
 
     // load data into table (segment0)


[47/50] [abbrv] carbondata git commit: [CARBONDATA-2585] Fix local dictionary for both table level and system level property based on priority

Posted by ja...@apache.org.
[CARBONDATA-2585] Fix local dictionary for both table level and system level property based on priority

Added a System level Property for local dictionary Support.
Property 'carbon.local.dictionary.enable' can be set to true/false to enable/disable local dictionary at system level.
If table level property LOCAL_DICTIONARY_ENABLE is configured, then Local Dictionary generation will be considered based on the table level property irrespective of the system level property.
If not, then the System level property 'carbon.local.dictionary.enable' value will be considered for local dictionary generation.

By default, both 'carbon.local.dictionary.enable' and LOCAL_DICTIONARY_ENABLE are false (Local Dictionary generation is disabled).

This closes #2605


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/78438451
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/78438451
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/78438451

Branch: refs/heads/external-format
Commit: 78438451b98f9224972d5c6c1bda2413cd5426ca
Parents: f27efb3
Author: akashrn5 <ak...@gmail.com>
Authored: Thu Aug 2 20:20:48 2018 +0530
Committer: Jacky Li <ja...@qq.com>
Committed: Tue Aug 7 20:39:45 2018 +0800

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  5 ++
 .../carbondata/core/util/CarbonProperties.java  |  9 ++
 .../LocalDictionarySupportAlterTableTest.scala  | 53 +++++++++++
 .../LocalDictionarySupportCreateTableTest.scala | 35 +++++++-
 .../carbondata/spark/util/CarbonScalaUtil.scala | 74 ++++++++++++++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala | 95 ++++----------------
 .../command/carbonTableSchemaCommon.scala       | 55 ++++++++++--
 .../scala/org/apache/spark/sql/CarbonEnv.scala  |  3 +-
 .../sql/parser/CarbonSparkSqlParserUtil.scala   |  8 +-
 9 files changed, 243 insertions(+), 94 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index e480007..f2b9308 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -932,6 +932,11 @@ public final class CarbonCommonConstants {
   public static final String LOCAL_DICTIONARY_ENABLE_DEFAULT = "false";
 
   /**
+   * System property to enable or disable local dictionary generation
+   */
+  public static final String LOCAL_DICTIONARY_SYSTEM_ENABLE = "carbon.local.dictionary.enable";
+
+  /**
    * Threshold value for local dictionary
    */
   public static final String LOCAL_DICTIONARY_THRESHOLD = "local_dictionary_threshold";

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index 8a91a43..c3a4934 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -875,6 +875,15 @@ public final class CarbonProperties {
   }
 
   /**
+   * This method will be used to add a new property which need not be serialized
+   *
+   * @param key
+   */
+  public void addNonSerializableProperty(String key, String value) {
+    carbonProperties.setProperty(key, value);
+  }
+
+  /**
    * Remove the specified key in property
    */
   public CarbonProperties removeProperty(String key) {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
index 24af99e..38ecde8 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
@@ -21,6 +21,8 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
 
 class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfterAll{
 
@@ -1405,7 +1407,58 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     }
   }
 
+  test("test alter table add column system level property and table level property") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE, "false")
+    sql("drop table if exists local1")
+    sql(
+      """
+        | CREATE TABLE local1(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_enable'='true',
+        | 'local_dictionary_threshold'='20000','local_dictionary_include'='city','no_inverted_index'='name')
+      """.stripMargin)
+    sql("alter table local1 add columns (alt string) tblproperties('local_dictionary_include'='alt')")
+    val descLoc = sql("describe formatted local1").collect
+    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
+      case Some(row) => assert(row.get(1).toString.contains("20000"))
+      case None => assert(false)
+    }
+    descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
+      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case None => assert(false)
+    }
+    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
+      case Some(row) => assert(row.get(1).toString.contains("city,alt"))
+      case None => assert(false)
+    }
+  }
+
+  test("test alter table add column system level property") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE, "false")
+    sql("drop table if exists local1")
+    sql(
+      """
+        | CREATE TABLE local1(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format' tblproperties
+        | ('local_dictionary_threshold'='20000','local_dictionary_include'='city',
+        | 'no_inverted_index'='name')
+      """.stripMargin)
+    // exception will not be thrown as validation is not done, because table level local
+    // dictionary property is not configured, but system level it is configured false
+    sql(
+      "alter table local1 add columns (alt int)")
+    val descLoc = sql("describe formatted local1").collect
+    descLoc.find(_.get(0).toString.contains("Local Dictionary Enable")) match {
+      case Some(row) => assert(row.get(1).toString.contains("false"))
+      case None => assert(false)
+    }
+  }
+
   override protected def afterAll(): Unit = {
     sql("DROP TABLE IF EXISTS LOCAL1")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE,
+        CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
index a02d3ef..6162cd8 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
@@ -21,6 +21,8 @@ import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
 import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
 
 class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfterAll {
 
@@ -67,7 +69,6 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
 
   test("test local dictionary custom configurations for local dict columns _002") {
     sql("drop table if exists local1")
-
     intercept[MalformedCarbonCommandException] {
       sql(
         """
@@ -2426,7 +2427,39 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
     }
   }
 
+
+  test("test local dictionary for system level configuration") {
+    sql("drop table if exists local1")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE, "false")
+    // should not throw exception as system level it is false and table level is not configured
+      sql(
+        """
+          | CREATE TABLE local1(id int, name string, city string, age int)
+          | STORED BY 'org.apache.carbondata.format'
+          | tblproperties('local_dictionary_include'='name,name')
+        """.stripMargin)
+  }
+
+  test("test local dictionary for system level configuration and table level priority") {
+    sql("drop table if exists local1")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE, "false")
+    // should not throw exception as system level it is false and table level is not configured
+    intercept[MalformedCarbonCommandException] {
+      sql(
+        """
+        | CREATE TABLE local1(id int, name string, city string, age int)
+        | STORED BY 'org.apache.carbondata.format'
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name,name')
+      """.
+          stripMargin)
+      }
+  }
+
+
   override protected def afterAll(): Unit = {
     sql("DROP TABLE IF EXISTS LOCAL1")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE,
+        CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
   }
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
index 28cd7ef..9e76021 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
@@ -31,7 +31,7 @@ import org.apache.spark.SparkException
 import org.apache.spark.sql._
 import org.apache.spark.sql.catalyst.catalog.CatalogTablePartition
 import org.apache.spark.sql.catalyst.util.DateTimeUtils
-import org.apache.spark.sql.execution.command.{DataTypeInfo, UpdateTableModel}
+import org.apache.spark.sql.execution.command.{Field, UpdateTableModel}
 import org.apache.spark.sql.types._
 import org.apache.spark.util.CarbonReflectionUtils
 
@@ -641,6 +641,78 @@ object CarbonScalaUtil {
     }
   }
 
+  /**
+   * This method validates all the child columns of complex column recursively to check whether
+   * any of the child column is of string dataType or not
+   *
+   * @param field
+   */
+  def validateChildColumnsRecursively(field: Field): Boolean = {
+    if (field.children.isDefined && null != field.children.get) {
+      field.children.get.exists { childColumn =>
+        if (childColumn.children.isDefined && null != childColumn.children.get) {
+          validateChildColumnsRecursively(childColumn)
+        } else {
+          childColumn.dataType.get.equalsIgnoreCase("string")
+        }
+      }
+    } else {
+      false
+    }
+  }
+
+  /**
+   * This method validates the local dictionary configured columns
+   *
+   * @param fields
+   * @param tableProperties
+   */
+  def validateLocalConfiguredDictionaryColumns(fields: Seq[Field],
+      tableProperties: mutable.Map[String, String], localDictColumns: Seq[String]): Unit = {
+    var dictIncludeColumns: Seq[String] = Seq[String]()
+
+    // validate the local dict columns
+    CarbonScalaUtil.validateLocalDictionaryColumns(tableProperties, localDictColumns)
+    // check if the column specified exists in table schema
+    localDictColumns.foreach { distCol =>
+      if (!fields.exists(x => x.column.equalsIgnoreCase(distCol.trim))) {
+        val errormsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " + distCol.trim +
+                       " does not exist in table. Please check the DDL."
+        throw new MalformedCarbonCommandException(errormsg)
+      }
+    }
+
+    // check if column is other than STRING or VARCHAR datatype
+    localDictColumns.foreach { dictColm =>
+      if (fields
+        .exists(x => x.column.equalsIgnoreCase(dictColm) &&
+                     !x.dataType.get.equalsIgnoreCase("STRING") &&
+                     !x.dataType.get.equalsIgnoreCase("VARCHAR") &&
+                     !x.dataType.get.equalsIgnoreCase("STRUCT") &&
+                     !x.dataType.get.equalsIgnoreCase("ARRAY"))) {
+        val errormsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " +
+                       dictColm.trim +
+                       " is not a string/complex/varchar datatype column. LOCAL_DICTIONARY_COLUMN" +
+                       " should be no dictionary string/complex/varchar datatype column." +
+                       "Please check the DDL."
+        throw new MalformedCarbonCommandException(errormsg)
+      }
+    }
+
+    // Validate whether any of the child columns of complex dataType column is a string column
+    localDictColumns.foreach { dictColm =>
+      if (fields
+        .exists(x => x.column.equalsIgnoreCase(dictColm) && x.children.isDefined &&
+                     null != x.children.get &&
+                     !validateChildColumnsRecursively(x))) {
+        val errMsg =
+          s"None of the child columns of complex dataType column $dictColm specified in " +
+          "local_dictionary_include are not of string dataType."
+        throw new MalformedCarbonCommandException(errMsg)
+      }
+    }
+  }
+
   def isStringDataType(dataType: DataType): Boolean = {
     dataType == StringType
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index bb68ec5..12999d0 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -306,9 +306,12 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
           CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
       }
     } else if (!isAlterFlow) {
-      // if LOCAL_DICTIONARY_ENABLE is not defined, consider the default value which is true
-      tableProperties.put(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
-        CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
+      // if LOCAL_DICTIONARY_ENABLE is not defined, try to get from system level property
+      tableProperties
+        .put(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
+          CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE,
+              CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT))
     }
 
     // validate the local dictionary threshold property if defined
@@ -328,9 +331,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
     // is enabled, else it is not validated
     // if it is preaggregate flow no need to validate anything, as all the properties will be
     // inherited from parent table
-    if (!(tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE).isDefined &&
+    if ((tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE).isDefined &&
           tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE).trim
-            .equalsIgnoreCase("false")) && !isPreAggFlow || isAlterFlow) {
+            .equalsIgnoreCase("true")) && !isPreAggFlow) {
       var localDictIncludeColumns: Seq[String] = Seq[String]()
       var localDictExcludeColumns: Seq[String] = Seq[String]()
       val isLocalDictIncludeDefined = tableProperties
@@ -343,13 +346,19 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         localDictIncludeColumns =
           tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE).split(",").map(_.trim)
         // validate all the local dictionary include columns
-        validateLocalDictionaryColumns(fields, tableProperties, localDictIncludeColumns)
+        CarbonScalaUtil
+          .validateLocalConfiguredDictionaryColumns(fields,
+            tableProperties,
+            localDictIncludeColumns)
       }
       if (isLocalDictExcludeDefined) {
         localDictExcludeColumns =
           tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_EXCLUDE).split(",").map(_.trim)
         // validate all the local dictionary exclude columns
-        validateLocalDictionaryColumns(fields, tableProperties, localDictExcludeColumns)
+        CarbonScalaUtil
+          .validateLocalConfiguredDictionaryColumns(fields,
+            tableProperties,
+            localDictExcludeColumns)
       }
 
       // validate if both local dictionary include and exclude contains same column
@@ -435,78 +444,6 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   }
 
   /**
-   * This method validates all the child columns of complex column recursively to check whether
-   * any of the child column is of string dataType or not
-   *
-   * @param field
-   */
-  def validateChildColumnsRecursively(field: Field): Boolean = {
-    if (field.children.isDefined && null != field.children.get) {
-      field.children.get.exists { childColumn =>
-        if (childColumn.children.isDefined && null != childColumn.children.get) {
-          validateChildColumnsRecursively(childColumn)
-        } else {
-          childColumn.dataType.get.equalsIgnoreCase("string")
-        }
-      }
-    } else {
-      false
-    }
-  }
-
-  /**
-   * This method validates the local dictionary configured columns
-   *
-   * @param fields
-   * @param tableProperties
-   */
-  private def validateLocalDictionaryColumns(fields: Seq[Field],
-      tableProperties: Map[String, String], localDictColumns: Seq[String]): Unit = {
-    var dictIncludeColumns: Seq[String] = Seq[String]()
-
-    // validate the local dict columns
-    CarbonScalaUtil.validateLocalDictionaryColumns(tableProperties, localDictColumns)
-    // check if the column specified exists in table schema
-    localDictColumns.foreach { distCol =>
-      if (!fields.exists(x => x.column.equalsIgnoreCase(distCol.trim))) {
-        val errormsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " + distCol.trim +
-                       " does not exist in table. Please check the DDL."
-        throw new MalformedCarbonCommandException(errormsg)
-      }
-    }
-
-    // check if column is other than STRING or VARCHAR datatype
-    localDictColumns.foreach { dictColm =>
-      if (fields
-        .exists(x => x.column.equalsIgnoreCase(dictColm) &&
-                     !x.dataType.get.equalsIgnoreCase("STRING") &&
-                     !x.dataType.get.equalsIgnoreCase("VARCHAR") &&
-                     !x.dataType.get.equalsIgnoreCase("STRUCT") &&
-                     !x.dataType.get.equalsIgnoreCase("ARRAY"))) {
-        val errormsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " +
-                       dictColm.trim +
-                       " is not a string/complex/varchar datatype column. LOCAL_DICTIONARY_COLUMN" +
-                       " should be no dictionary string/complex/varchar datatype column." +
-                       "Please check the DDL."
-        throw new MalformedCarbonCommandException(errormsg)
-      }
-    }
-
-    // Validate whether any of the child columns of complex dataType column is a string column
-    localDictColumns.foreach { dictColm =>
-      if (fields
-        .exists(x => x.column.equalsIgnoreCase(dictColm) && x.children.isDefined &&
-                     null != x.children.get &&
-                     !validateChildColumnsRecursively(x))) {
-        val errMsg =
-          s"None of the child columns of complex dataType column $dictColm specified in " +
-          "local_dictionary_include are not of string dataType."
-        throw new MalformedCarbonCommandException(errMsg)
-      }
-    }
-  }
-
-  /**
    * This method validates the long string columns, will check:
    * 1.the column in tblproperty long_string_columns must be in table fields.
    * 2.the column datatype in tblproperty long_string_columns should be string.

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
index 4a99ac7..1b48c08 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala
@@ -39,10 +39,8 @@ import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes, DecimalType}
 import org.apache.carbondata.core.metadata.encoder.Encoding
 import org.apache.carbondata.core.metadata.schema._
-import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, RelationIdentifier,
-  TableInfo, TableSchema}
-import org.apache.carbondata.core.metadata.schema.table.column.{ColumnSchema,
-  ParentColumnTableRelation}
+import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, RelationIdentifier, TableInfo, TableSchema}
+import org.apache.carbondata.core.metadata.schema.table.column.{ColumnSchema, ParentColumnTableRelation}
 import org.apache.carbondata.core.service.impl.ColumnUniqueIdGenerator
 import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentUpdateStatusManager}
 import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
@@ -50,7 +48,7 @@ import org.apache.carbondata.processing.loading.FailureCauses
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 import org.apache.carbondata.processing.merger.CompactionType
 import org.apache.carbondata.spark.CarbonSparkFactory
-import org.apache.carbondata.spark.util.DataTypeConverterUtil
+import org.apache.carbondata.spark.util.{CarbonScalaUtil, DataTypeConverterUtil}
 
 case class TableModel(
     ifNotExistsSet: Boolean,
@@ -378,19 +376,58 @@ class AlterTableColumnSchemaGenerator(
       }
     }
 
+    val isLocalDictEnabledForMainTable = tableSchema.getTableProperties
+      .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)
+
+    val alterMutableTblProperties: scala.collection.mutable.Map[String, String] = mutable
+      .Map(alterTableModel.tableProperties.toSeq: _*)
+
+    // if local dictionary is enabled, then validate include and exclude columns if defined
+    if (null != isLocalDictEnabledForMainTable && isLocalDictEnabledForMainTable.toBoolean) {
+      var localDictIncludeColumns: Seq[String] = Seq[String]()
+      var localDictExcludeColumns: Seq[String] = Seq[String]()
+      // validate local dictionary include columns if defined
+      if (alterTableModel.tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE)
+        .isDefined) {
+        localDictIncludeColumns =
+          alterTableModel.tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_INCLUDE).split(",")
+            .map(_.trim)
+        CarbonScalaUtil
+          .validateLocalDictionaryColumns(alterMutableTblProperties, localDictIncludeColumns)
+        CarbonScalaUtil
+          .validateLocalConfiguredDictionaryColumns(
+            alterTableModel.dimCols ++ alterTableModel.msrCols,
+            alterMutableTblProperties,
+            localDictIncludeColumns)
+      }
+
+      // validate local dictionary exclude columns if defined
+      if (alterTableModel.tableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_EXCLUDE)
+        .isDefined) {
+        localDictExcludeColumns =
+          alterTableModel.tableProperties(CarbonCommonConstants.LOCAL_DICTIONARY_EXCLUDE).split(",")
+            .map(_.trim)
+        CarbonScalaUtil
+          .validateLocalDictionaryColumns(alterMutableTblProperties, localDictExcludeColumns)
+        CarbonScalaUtil
+          .validateLocalConfiguredDictionaryColumns(
+            alterTableModel.dimCols ++ alterTableModel.msrCols,
+            alterMutableTblProperties,
+            localDictExcludeColumns)
+      }
+
+      // validate if both local dictionary include and exclude contains same column
+      CarbonScalaUtil.validateDuplicateLocalDictIncludeExcludeColmns(alterMutableTblProperties)
 
-    if (alterTableModel.tableProperties != null) {
       CarbonUtil
         .setLocalDictColumnsToWrapperSchema(newCols.asJava,
           alterTableModel.tableProperties.asJava,
-          tableSchema.getTableProperties.get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE))
+          isLocalDictEnabledForMainTable)
     }
 
     val includeExcludeColOfMainTable = getLocalDictColumnList(tableSchema.getTableProperties
       .asScala,
       columnsWithoutNewCols)
-    val alterMutableTblProperties: scala.collection.mutable.Map[String, String] = mutable
-      .Map(alterTableModel.tableProperties.toSeq: _*)
     val includeExcludeColOfAlterTable = getLocalDictColumnList(alterMutableTblProperties,
       newCols.to[mutable.ListBuffer])
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
index 074568d..7f26888 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonEnv.scala
@@ -116,7 +116,8 @@ class CarbonEnv {
 
           CarbonMetaStoreFactory.createCarbonMetaStore(sparkSession.conf)
         }
-        CarbonProperties.getInstance.addProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "true")
+        CarbonProperties.getInstance
+          .addNonSerializableProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "true")
         initialized = true
       }
     }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/78438451/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
index 9c0a099..9752535 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSparkSqlParserUtil.scala
@@ -27,8 +27,7 @@ import org.apache.spark.sql.catalyst.parser.ParserUtils.operationNotAllowed
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 import org.apache.spark.sql.execution.command.{PartitionerField, TableModel, TableNewProcessor}
-import org.apache.spark.sql.execution.command.table.{CarbonCreateTableAsSelectCommand,
-CarbonCreateTableCommand}
+import org.apache.spark.sql.execution.command.table.{CarbonCreateTableAsSelectCommand, CarbonCreateTableCommand}
 import org.apache.spark.sql.types.StructField
 
 import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
@@ -37,6 +36,7 @@ import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
 import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.core.metadata.schema.SchemaReader
+import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.spark.CarbonOption
 import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil}
@@ -164,7 +164,9 @@ object CarbonSparkSqlParserUtil {
       if (null == isLocalDic_enabled) {
         table.getFactTable.getTableProperties
           .put(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
-            CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
+            CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.LOCAL_DICTIONARY_SYSTEM_ENABLE,
+                CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT))
       }
       isLocalDic_enabled = table.getFactTable.getTableProperties
         .get(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)


[24/50] [abbrv] carbondata git commit: [CARBONDATA-2800][Doc] Add useful tips about bloomfilter datamap

Posted by ja...@apache.org.
[CARBONDATA-2800][Doc] Add useful tips about bloomfilter datamap

add useful tips about bloomfilter datamap

This closes #2581


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a302cd1c
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a302cd1c
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a302cd1c

Branch: refs/heads/external-format
Commit: a302cd1cef6c48c667eacea01df5cb6c75e7685f
Parents: f9b02a5
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Mon Jul 30 20:32:05 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Wed Aug 1 22:10:59 2018 +0800

----------------------------------------------------------------------
 docs/datamap/bloomfilter-datamap-guide.md | 27 +++++++++++++++++++++++++-
 docs/useful-tips-on-carbondata.md         |  4 ++++
 2 files changed, 30 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a302cd1c/docs/datamap/bloomfilter-datamap-guide.md
----------------------------------------------------------------------
diff --git a/docs/datamap/bloomfilter-datamap-guide.md b/docs/datamap/bloomfilter-datamap-guide.md
index 2dba3dc..8955cde 100644
--- a/docs/datamap/bloomfilter-datamap-guide.md
+++ b/docs/datamap/bloomfilter-datamap-guide.md
@@ -5,6 +5,7 @@
 * [Loading Data](#loading-data)
 * [Querying Data](#querying-data)
 * [Data Management](#data-management-with-bloomfilter-datamap)
+* [Useful Tips](#useful-tips)
 
 #### DataMap Management
 Creating BloomFilter DataMap
@@ -102,4 +103,28 @@ which will show the transformed logical plan, and thus user can check whether th
 If the datamap does not prune blocklets well, you can try to increase the value of property `BLOOM_SIZE` and decrease the value of property `BLOOM_FPP`.
 
 ## Data Management With BloomFilter DataMap
-Data management with BloomFilter datamap has no difference with that on Lucene datamap. You can refer to the corresponding section in `CarbonData BloomFilter DataMap`.
+Data management with BloomFilter datamap has no difference with that on Lucene datamap.
+You can refer to the corresponding section in `CarbonData Lucene DataMap`.
+
+## Useful Tips
++ BloomFilter DataMap is suggested to be created on the high cardinality columns.
+ Query conditions on these columns are always simple `equal` or `in`,
+ such as 'col1=XX', 'col1 in (XX, YY)'.
++ We can create multiple BloomFilter datamaps on one table,
+ but we do recommend you to create one BloomFilter datamap that contains multiple index columns,
+ because the data loading and query performance will be better.
++ `BLOOM_FPP` is only the expected number from user, the actually FPP may be worse.
+ If the BloomFilter datamap does not work well,
+ you can try to increase `BLOOM_SIZE` and decrease `BLOOM_FPP` at the same time.
+ Notice that bigger `BLOOM_SIZE` will increase the size of index file
+ and smaller `BLOOM_FPP` will increase runtime calculation while performing query.
++ '0' skipped blocklets of BloomFilter datamap in explain output indicates that
+ BloomFilter datamap does not prune better than Main datamap.
+ (For example since the data is not ordered, a specific value may be contained in many blocklets. In this case, bloom may not work better than Main DataMap.)
+ If this occurs very often, it means that current BloomFilter is useless. You can disable or drop it.
+ Sometimes we cannot see any pruning result about BloomFilter datamap in the explain output,
+ this indicates that the previous datamap has pruned all the blocklets and there is no need to continue pruning.
++ In some scenarios, the BloomFilter datamap may not enhance the query performance significantly
+ but if it can reduce the number of spark task,
+ there is still a chance that BloomFilter datamap can enhance the performance for concurrent query.
++ Note that BloomFilter datamap will decrease the data loading performance and may cause slightly storage expansion (for datamap index file).
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a302cd1c/docs/useful-tips-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/useful-tips-on-carbondata.md b/docs/useful-tips-on-carbondata.md
index d00f785..b4e3bd3 100644
--- a/docs/useful-tips-on-carbondata.md
+++ b/docs/useful-tips-on-carbondata.md
@@ -125,6 +125,10 @@
     TBLPROPERTIES ('SORT_COLUMNS'='Dime_1, HOST, MSISDN')
   ```
 
+  **NOTE:**
+  + BloomFilter can be created to enhance performance for queries with precise equal/in conditions. You can find more information about it in BloomFilter datamap [document](https://github.com/apache/carbondata/blob/master/docs/datamap/bloomfilter-datamap-guide.md).
+
+
 ## Configuration for Optimizing Data Loading performance for Massive Data
 
 


[02/50] [abbrv] carbondata git commit: [CARBONDATA-2753][Compatibility] Row count of page is calculated wrong for old store(V2 store)

Posted by ja...@apache.org.
[CARBONDATA-2753][Compatibility] Row count of page is calculated wrong for old store(V2 store)

Row count of page is calculated wrong for V2 store.


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/8d3e8b82
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/8d3e8b82
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/8d3e8b82

Branch: refs/heads/external-format
Commit: 8d3e8b82cbb0d75c66219119c281ed910ac185e6
Parents: c79fc90
Author: dhatchayani <dh...@gmail.com>
Authored: Wed Jul 25 14:41:58 2018 +0530
Committer: kumarvishal09 <ku...@gmail.com>
Committed: Sun Jul 29 11:47:25 2018 +0530

----------------------------------------------------------------------
 .../blockletindex/BlockletDataRefNode.java        | 18 +++++++++++++-----
 .../scan/scanner/impl/BlockletFullScanner.java    |  9 +--------
 2 files changed, 14 insertions(+), 13 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/8d3e8b82/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
index a11ae8d..5681528 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
@@ -61,18 +61,26 @@ public class BlockletDataRefNode implements DataRefNode {
       int numberOfPagesCompletelyFilled = detailInfo.getRowCount();
       // no. of rows to a page is 120000 in V2 and 32000 in V3, same is handled to get the number
       // of pages filled
-      if (blockInfo.getVersion() == ColumnarFormatVersion.V2) {
+      int lastPageRowCount;
+      int fullyFilledRowsCount;
+      if (blockInfo.getVersion() == ColumnarFormatVersion.V2
+          || blockInfo.getVersion() == ColumnarFormatVersion.V1) {
         numberOfPagesCompletelyFilled /=
             CarbonVersionConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT_V2;
+        lastPageRowCount = detailInfo.getRowCount()
+            % CarbonVersionConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT_V2;
+        fullyFilledRowsCount =
+            CarbonVersionConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT_V2;
       } else {
         numberOfPagesCompletelyFilled /=
             CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+        lastPageRowCount = detailInfo.getRowCount()
+            % CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+        fullyFilledRowsCount =
+            CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
       }
-      int lastPageRowCount = detailInfo.getRowCount()
-          % CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
       for (int i = 0; i < numberOfPagesCompletelyFilled; i++) {
-        pageRowCount[i] =
-            CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
+        pageRowCount[i] = fullyFilledRowsCount;
       }
       if (lastPageRowCount > 0) {
         pageRowCount[pageRowCount.length - 1] = lastPageRowCount;

http://git-wip-us.apache.org/repos/asf/carbondata/blob/8d3e8b82/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFullScanner.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFullScanner.java b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFullScanner.java
index c3d4df8..f61a8b1 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFullScanner.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/scanner/impl/BlockletFullScanner.java
@@ -19,7 +19,6 @@ package org.apache.carbondata.core.scan.scanner.impl;
 import java.io.IOException;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
-import org.apache.carbondata.core.constants.CarbonV3DataFormatConstants;
 import org.apache.carbondata.core.datastore.DataRefNode;
 import org.apache.carbondata.core.datastore.chunk.DimensionColumnPage;
 import org.apache.carbondata.core.datastore.chunk.impl.DimensionRawColumnChunk;
@@ -123,13 +122,7 @@ public class BlockletFullScanner implements BlockletScanner {
     if (numberOfRows == null) {
       numberOfRows = new int[rawBlockletColumnChunks.getDataBlock().numberOfPages()];
       for (int i = 0; i < numberOfRows.length; i++) {
-        numberOfRows[i] =
-            CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
-      }
-      int lastPageSize = rawBlockletColumnChunks.getDataBlock().numRows()
-          % CarbonV3DataFormatConstants.NUMBER_OF_ROWS_PER_BLOCKLET_COLUMN_PAGE_DEFAULT;
-      if (lastPageSize > 0) {
-        numberOfRows[numberOfRows.length - 1] = lastPageSize;
+        numberOfRows[i] = rawBlockletColumnChunks.getDataBlock().getPageRowCount(i);
       }
     }
     scannedResult.setPageFilteredRowCount(numberOfRows);


[34/50] [abbrv] carbondata git commit: [Documentation] [Unsafe Configuration] Added carbon.unsafe.driver.working.memory.in.mb parameter to differentiate between driver and executor unsafe memory

Posted by ja...@apache.org.
[Documentation] [Unsafe Configuration] Added carbon.unsafe.driver.working.memory.in.mb parameter to differentiate between driver and executor unsafe memory

Added carbon.unsafe.driver.working.memory.in.mb parameter to differentiate between driver and executor unsafe memory

Usually in production scenarios driver memory will be less than the executor memory. Now we are using unsafe for caching block/blocklet dataMap in driver. Current unsafe memory configured for executor is getting used for driver also which is not a good idea.
Therefore it is required to separate out driver and executor unsafe memory.
You can observe the same in spark configuration also that spark has given different parameters for configuring driver and executor memory overhead to control the unsafe memory usage.
spark.yarn.driver.memoryOverhead and spark.yarn.executor.memoryOverhead

This closes #2595


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/a2928e31
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/a2928e31
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/a2928e31

Branch: refs/heads/external-format
Commit: a2928e314a4c45dd35923d7d29b75508e401dd3f
Parents: 7e93d7b
Author: manishgupta88 <to...@gmail.com>
Authored: Wed Aug 1 19:38:30 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Thu Aug 2 17:07:16 2018 +0530

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  4 ++++
 .../core/memory/UnsafeMemoryManager.java        | 25 ++++++++++++++++----
 docs/configuration-parameters.md                |  2 ++
 3 files changed, 27 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/a2928e31/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 6d7215e..e480007 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1276,6 +1276,10 @@ public final class CarbonCommonConstants {
   @CarbonProperty
   public static final String UNSAFE_WORKING_MEMORY_IN_MB = "carbon.unsafe.working.memory.in.mb";
   public static final String UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT = "512";
+
+  @CarbonProperty
+  public static final String UNSAFE_DRIVER_WORKING_MEMORY_IN_MB =
+      "carbon.unsafe.driver.working.memory.in.mb";
   /**
    * Sorts the data in batches and writes the batch data to store with index file.
    */

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a2928e31/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
index 2115f82..9133f0f 100644
--- a/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
+++ b/core/src/main/java/org/apache/carbondata/core/memory/UnsafeMemoryManager.java
@@ -41,11 +41,28 @@ public class UnsafeMemoryManager {
           CarbonCommonConstants.ENABLE_OFFHEAP_SORT_DEFAULT));
   private static Map<Long,Set<MemoryBlock>> taskIdToMemoryBlockMap;
   static {
-    long size;
+    long size = 0L;
     try {
-      size = Long.parseLong(CarbonProperties.getInstance()
-          .getProperty(CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB,
-              CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT));
+      // check if driver unsafe memory is configured and JVM process is in driver. In that case
+      // initialize unsafe memory configured for driver
+      boolean isDriver = Boolean.parseBoolean(CarbonProperties.getInstance()
+          .getProperty(CarbonCommonConstants.IS_DRIVER_INSTANCE, "false"));
+      boolean initializedWithUnsafeDriverMemory = false;
+      if (isDriver) {
+        String driverUnsafeMemorySize = CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.UNSAFE_DRIVER_WORKING_MEMORY_IN_MB);
+        if (null != driverUnsafeMemorySize) {
+          size = Long.parseLong(CarbonProperties.getInstance()
+              .getProperty(CarbonCommonConstants.UNSAFE_DRIVER_WORKING_MEMORY_IN_MB,
+                  CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT));
+          initializedWithUnsafeDriverMemory = true;
+        }
+      }
+      if (!initializedWithUnsafeDriverMemory) {
+        size = Long.parseLong(CarbonProperties.getInstance()
+            .getProperty(CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB,
+                CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT));
+      }
     } catch (Exception e) {
       size = Long.parseLong(CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB_DEFAULT);
       LOGGER.info("Wrong memory size given, "

http://git-wip-us.apache.org/repos/asf/carbondata/blob/a2928e31/docs/configuration-parameters.md
----------------------------------------------------------------------
diff --git a/docs/configuration-parameters.md b/docs/configuration-parameters.md
index b614918..6e4dea5 100644
--- a/docs/configuration-parameters.md
+++ b/docs/configuration-parameters.md
@@ -40,6 +40,8 @@ This section provides the details of all the configurations required for the Car
 | carbon.streaming.segment.max.size | 1024000000 | This parameter defines the maximum size of the streaming segment. Setting this parameter to appropriate value will avoid impacting the streaming ingestion. The value is in bytes.|
 | carbon.query.show.datamaps | true | If this parameter value is set to true, show tables command will list all the tables including datatmaps(eg: Preaggregate table), else datamaps will be excluded from the table list. |
 | carbon.segment.lock.files.preserve.hours | 48 | This property value indicates the number of hours the segment lock files will be preserved after dataload. These lock files will be deleted with the clean command after the configured number of hours. |
+| carbon.unsafe.working.memory.in.mb | 512 | Specifies the size of executor unsafe working memory. Used for sorting data, storing column pages,etc. This value is expressed in MB. |
+| carbon.unsafe.driver.working.memory.in.mb | 512 | Specifies the size of driver unsafe working memory. Used for storing block or blocklet datamap cache. If not configured then carbon.unsafe.working.memory.in.mb value is considered. This value is expressed in MB. |
 
 ##  Performance Configuration
 This section provides the details of all the configurations required for CarbonData Performance Optimization.


[15/50] [abbrv] carbondata git commit: [CARBONDATA-2585]disable local dictionary by default

Posted by ja...@apache.org.
[CARBONDATA-2585]disable local dictionary by default

make local dictionary false by default

This closes #2570


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/34ca0214
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/34ca0214
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/34ca0214

Branch: refs/heads/external-format
Commit: 34ca021423cff8aa2dd03f98f99ea2c46a5aa9a6
Parents: 9a3b0b1
Author: akashrn5 <ak...@gmail.com>
Authored: Fri Jul 27 12:31:48 2018 +0530
Committer: ravipesala <ra...@gmail.com>
Committed: Tue Jul 31 21:35:26 2018 +0530

----------------------------------------------------------------------
 .../core/constants/CarbonCommonConstants.java   |  2 +-
 docs/data-management-on-carbondata.md           |  2 +-
 ...CreateTableWithLocalDictionaryTestCase.scala | 70 ++++++++------------
 .../TestNonTransactionalCarbonTable.scala       | 43 ++----------
 ...ransactionalCarbonTableWithComplexType.scala | 10 +--
 .../describeTable/TestDescribeTable.scala       |  4 +-
 .../LocalDictionarySupportAlterTableTest.scala  | 52 +++++++--------
 .../LocalDictionarySupportCreateTableTest.scala | 58 ++++++++--------
 .../LocalDictionarySupportLoadTableTest.scala   | 14 ++--
 .../table/CarbonDescribeFormattedCommand.scala  |  3 +-
 10 files changed, 98 insertions(+), 160 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 8bf22c9..6d7215e 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -929,7 +929,7 @@ public final class CarbonCommonConstants {
   /**
    * default value for local dictionary generation
    */
-  public static final String LOCAL_DICTIONARY_ENABLE_DEFAULT = "true";
+  public static final String LOCAL_DICTIONARY_ENABLE_DEFAULT = "false";
 
   /**
    * Threshold value for local dictionary

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/docs/data-management-on-carbondata.md
----------------------------------------------------------------------
diff --git a/docs/data-management-on-carbondata.md b/docs/data-management-on-carbondata.md
index 28bc7d3..6aaaaa3 100644
--- a/docs/data-management-on-carbondata.md
+++ b/docs/data-management-on-carbondata.md
@@ -137,7 +137,7 @@ This tutorial is going to introduce all commands and data operations on CarbonDa
           
      | Properties | Default value | Description |
      | ---------- | ------------- | ----------- |
-     | LOCAL_DICTIONARY_ENABLE | true | By default, local dictionary will be enabled for the table | 
+     | LOCAL_DICTIONARY_ENABLE | false | By default, local dictionary will not be enabled for the table | 
      | LOCAL_DICTIONARY_THRESHOLD | 10000 | The maximum cardinality for local dictionary generation (range- 1000 to 100000) |
      | LOCAL_DICTIONARY_INCLUDE | all no-dictionary string/varchar columns | Columns for which Local Dictionary is generated. |
      | LOCAL_DICTIONARY_EXCLUDE | none | Columns for which Local Dictionary is not generated |

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/CreateTableWithLocalDictionaryTestCase.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/CreateTableWithLocalDictionaryTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/CreateTableWithLocalDictionaryTestCase.scala
index b6ecef9..43d5956 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/CreateTableWithLocalDictionaryTestCase.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/CreateTableWithLocalDictionaryTestCase.scala
@@ -40,10 +40,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
 
     val descLoc = sql("describe formatted local1").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
     }
   }
 
@@ -53,7 +50,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.
         stripMargin)
     val descFormatted1 = sql("describe formatted local1").collect
@@ -65,21 +62,6 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
     }
   }
 
-  test(
-    "test local dictionary custom configurations for local dict columns _002")
-  {
-    sql("drop table if exists local1")
-
-    intercept[MalformedCarbonCommandException] {
-      sql(
-        """
-          | CREATE TABLE local1(id int, name string, city string, age int)
-          | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='name,name')
-        """.stripMargin)
-    }
-  }
-
   test("test local dictionary custom configurations for local dict columns _003") {
     sql("drop table if exists local1")
     val exception = intercept[MalformedCarbonCommandException] {
@@ -87,7 +69,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='')
         """.
           stripMargin)
     }
@@ -104,7 +86,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='abc')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='abc')
         """.
           stripMargin)
     }
@@ -121,7 +103,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='id')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='id')
         """.
           stripMargin)
     }
@@ -139,7 +121,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('dictionary_include'='name','local_dictionary_include'='name')
+          | tblproperties('local_dictionary_enable'='true','dictionary_include'='name','local_dictionary_include'='name')
         """.
           stripMargin)
     }
@@ -151,7 +133,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='20000')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -170,7 +152,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='-100')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -186,7 +168,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='21474874811')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='21474874811')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -202,7 +184,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -218,7 +200,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='hello')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='hello')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -235,7 +217,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -258,7 +240,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='-100','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -281,7 +263,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -304,7 +286,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='vdslv','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='vdslv','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -328,7 +310,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name,name')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name,name')
         """.stripMargin)
     }
   }
@@ -342,7 +324,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'=' ')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'=' ')
         """.stripMargin)
     }
   }
@@ -356,7 +338,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='hello')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='hello')
         """.stripMargin)
     }
   }
@@ -370,7 +352,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name',
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name',
           | 'dictionary_include'='name')
         """.stripMargin)
     }
@@ -385,7 +367,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='','local_dictionary_include'='name,name')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='','local_dictionary_include'='name,name')
         """.stripMargin)
     }
   }
@@ -399,7 +381,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='-100','local_dictionary_include'='Hello')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100','local_dictionary_include'='Hello')
         """.stripMargin)
     }
   }
@@ -413,7 +395,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='23213497321591234324',
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='23213497321591234324',
           | 'local_dictionary_include'='name','dictionary_include'='name')
         """.stripMargin)
     }
@@ -1496,7 +1478,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
       case Some(row) => assert(row.get(1).toString.contains("global_sort"))
@@ -1516,7 +1498,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
       case Some(row) => assert(row.get(1).toString.contains("batch_sort"))
@@ -1535,7 +1517,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
       case Some(row) => assert(row.get(1).toString.contains("no_sort"))
@@ -1554,7 +1536,7 @@ class CreateTableWithLocalDictionaryTestCase extends QueryTest with BeforeAndAft
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
       case Some(row) => assert(row.get(1).toString.contains("local_sort"))

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 62c3df6..8a1d465 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -2328,15 +2328,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
          |'$writerPath' """.stripMargin)
     val descLoc = sql("describe formatted sdkTable").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("name,surname"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     FileUtils.deleteDirectory(new File(writerPath))
@@ -2357,15 +2349,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
          |'$writerPath' """.stripMargin)
     val descLoc = sql("describe formatted sdkTable").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("name,surname"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     FileUtils.deleteDirectory(new File(writerPath))
@@ -2386,15 +2370,7 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
          |'$writerPath' """.stripMargin)
     val descLoc = sql("describe formatted sdkTable").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("name,surname"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     FileUtils.deleteDirectory(new File(writerPath))
@@ -2416,21 +2392,12 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     FileUtils.deleteDirectory(new File(writerPath))
     sql("insert into sdkTable select 's1','s2',23 ")
     assert(FileFactory.getCarbonFile(writerPath).exists())
-    assert(testUtil.checkForLocalDictionary(testUtil.getDimRawChunk(0,writerPath)))
+    assert(!testUtil.checkForLocalDictionary(testUtil.getDimRawChunk(0,writerPath)))
     val descLoc = sql("describe formatted sdkTable").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("name,surname"))
-      case None => assert(false)
-    }
-
     checkAnswer(sql("select count(*) from sdkTable"), Seq(Row(1)))
     FileUtils.deleteDirectory(new File(writerPath))
   }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
index 7593f38..62ba03e 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableWithComplexType.scala
@@ -218,15 +218,7 @@ class TestNonTransactionalCarbonTableWithComplexType extends QueryTest with Befo
     sql("describe formatted localComplex").show(30, false)
     val descLoc = sql("describe formatted localComplex").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Include")) match {
-      case Some(row) => assert(row.get(1).toString.contains("name,val1.val2.street,val1.val2.city,val1.val2.WindSpeed,val1.val2.year"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/describeTable/TestDescribeTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/describeTable/TestDescribeTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/describeTable/TestDescribeTable.scala
index 93f1736..d0d89aa 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/describeTable/TestDescribeTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/describeTable/TestDescribeTable.scala
@@ -51,10 +51,10 @@ class TestDescribeTable extends QueryTest with BeforeAndAfterAll {
   test("test describe formatted table desc1") {
 
     val resultCol = Seq("", "", "##Detailed Column property", "##Detailed Table Information", "ADAPTIVE", "CARBON Store Path", "Comment", "Database Name", "Last Update Time",
-    "SORT_COLUMNS", "SORT_SCOPE", "CACHE_LEVEL", "Streaming", "Table Block Size", "Local Dictionary Enabled", "Local Dictionary Include", "Local Dictionary Threshold","Table Data Size", "Table Index Size", "Table Name", "dec2col1", "dec2col2", "dec2col3", "dec2col4")
+    "SORT_COLUMNS", "SORT_SCOPE", "CACHE_LEVEL", "Streaming", "Table Block Size", "Local Dictionary Enabled","Table Data Size", "Table Index Size", "Table Name", "dec2col1", "dec2col2", "dec2col3", "dec2col4")
     val resultRow: Seq[Row] = resultCol map(propName => Row(f"$propName%-36s"))
     checkAnswer(sql("desc formatted DESC1").select("col_name"), resultRow)
-    assert(sql("desc formatted desc1").count() == 24)
+    assert(sql("desc formatted desc1").count() == 22)
   }
 
   test("test describe formatted for partition table") {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
index 47a58ae..373b309 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportAlterTableTest.scala
@@ -232,7 +232,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_threshold'='300000')
+        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='300000')
       """.stripMargin)
     val descLoc = sql("describe formatted local1").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
@@ -243,7 +243,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_threshold'='500')
+        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='500')
       """.stripMargin)
     val descLoc1 = sql("describe formatted local1").collect
     descLoc1.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
@@ -278,12 +278,12 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     }
   }
 
-  test("test local dictionary foer varchar datatype columns") {
+  test("test local dictionary for varchar datatype columns") {
     sql("drop table if exists local1")
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_include'='city',
+        | STORED BY 'org.apache.carbondata.format' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='city',
         | 'LONG_STRING_COLUMNS'='city')
       """.stripMargin)
     val descLoc = sql("describe formatted local1").collect
@@ -302,7 +302,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata'
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -325,7 +325,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_threshold'='300000')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='300000')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -340,7 +340,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_threshold'='300000')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='300000')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -367,7 +367,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata'
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -393,7 +393,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata'
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -418,7 +418,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata'
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -447,7 +447,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -473,7 +473,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -499,7 +499,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -529,7 +529,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -551,7 +551,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -570,7 +570,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -589,7 +589,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -608,7 +608,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_exclude'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_exclude'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -637,7 +637,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_exclude'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_exclude'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -656,7 +656,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -675,7 +675,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -694,7 +694,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -745,7 +745,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='name','local_dictionary_exclude'='city')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name','local_dictionary_exclude'='city')
       """.stripMargin)
 
     sql("alter table local1 unset tblproperties('local_dictionary_exclude')")
@@ -878,7 +878,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql("alter table local1 unset tblproperties('local_dictionary_enable')")
     val descLoc2 = sql("describe formatted local1").collect
     descLoc2.find(_.get(0).toString.contains("Local Dictionary Enable")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
   }
@@ -888,7 +888,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int,add string)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='city')
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='city')
       """.stripMargin)
 
     val descLoc1 = sql("describe formatted local1").collect
@@ -909,7 +909,7 @@ class LocalDictionarySupportAlterTableTest extends QueryTest with BeforeAndAfter
     sql(
       """
         | CREATE TABLE local1(id int, name string, city string, age int,add string)
-        | STORED BY 'carbondata' tblproperties('local_dictionary_include'='city',
+        | STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true','local_dictionary_include'='city',
         | 'local_dictionary_exclude'='name')
       """.stripMargin)
 

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
index eec1582..52c18d0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportCreateTableTest.scala
@@ -40,11 +40,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
 
     val descLoc = sql("describe formatted local1").collect
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
-      case None => assert(false)
-    }
-    descLoc.find(_.get(0).toString.contains("Local Dictionary Threshold")) match {
-      case Some(row) => assert(row.get(1).toString.contains("10000"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
   }
@@ -55,7 +51,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name')
       """.
         stripMargin)
     val descFormatted1 = sql("describe formatted local1").collect
@@ -77,7 +73,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='name,name')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='name,name')
         """.stripMargin)
     }
   }
@@ -89,7 +85,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='')
         """.
           stripMargin)
     }
@@ -106,7 +102,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='abc')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='abc')
         """.
           stripMargin)
     }
@@ -123,7 +119,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_include'='id')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='id')
         """.
           stripMargin)
     }
@@ -142,7 +138,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('dictionary_include'='name','local_dictionary_include'='name')
+          | tblproperties('local_dictionary_enable'='true','dictionary_include'='name','local_dictionary_include'='name')
         """.
           stripMargin)
     }
@@ -154,7 +150,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='20000')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -174,7 +170,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='-100')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -190,7 +186,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='21474874811')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='21474874811')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -206,7 +202,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -222,7 +218,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='hello')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='hello')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -239,7 +235,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -264,7 +260,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='-100','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -289,7 +285,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -314,7 +310,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('local_dictionary_threshold'='vdslv','local_dictionary_include'='name')
+        | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='vdslv','local_dictionary_include'='name')
       """.stripMargin)
 
     val descLoc = sql("describe formatted local1").collect
@@ -340,7 +336,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name,
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name,
           | name')
         """.stripMargin)
     }
@@ -354,7 +350,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'=' ')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'=' ')
         """.stripMargin)
     }
   }
@@ -367,7 +363,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='hello')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='hello')
         """.stripMargin)
     }
   }
@@ -380,7 +376,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='20000','local_dictionary_include'='name',
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='20000','local_dictionary_include'='name',
           | 'dictionary_include'='name')
         """.stripMargin)
     }
@@ -394,7 +390,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='','local_dictionary_include'='name,name')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='','local_dictionary_include'='name,name')
         """.stripMargin)
     }
   }
@@ -407,7 +403,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='-100','local_dictionary_include'='Hello')
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='-100','local_dictionary_include'='Hello')
         """.stripMargin)
     }
   }
@@ -420,7 +416,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
         """
           | CREATE TABLE local1(id int, name string, city string, age int)
           | STORED BY 'org.apache.carbondata.format'
-          | tblproperties('local_dictionary_threshold'='23213497321591234324',
+          | tblproperties('local_dictionary_enable'='true','local_dictionary_threshold'='23213497321591234324',
           | 'local_dictionary_include'='name','dictionary_include'='name')
         """.stripMargin)
     }
@@ -1504,7 +1500,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
       """
         | CREATE TABLE local1(id int, name string, city string, age int)
         | STORED BY 'org.apache.carbondata.format'
-        | tblproperties('dictionary_include'='city','sort_scope'='global_sort',
+        | tblproperties('local_dictionary_enable'='true','dictionary_include'='city','sort_scope'='global_sort',
         | 'sort_columns'='city,name')
       """.stripMargin)
 
@@ -1533,7 +1529,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
@@ -1554,7 +1550,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {
@@ -1575,7 +1571,7 @@ class LocalDictionarySupportCreateTableTest extends QueryTest with BeforeAndAfte
     val descLoc = sql("describe formatted local1").collect
 
     descLoc.find(_.get(0).toString.contains("Local Dictionary Enabled")) match {
-      case Some(row) => assert(row.get(1).toString.contains("true"))
+      case Some(row) => assert(row.get(1).toString.contains("false"))
       case None => assert(false)
     }
     descLoc.find(_.get(0).toString.contains("SORT_SCOPE")) match {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportLoadTableTest.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportLoadTableTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportLoadTableTest.scala
index a3ab851..59586c0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportLoadTableTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/localdictionary/LocalDictionarySupportLoadTableTest.scala
@@ -65,14 +65,14 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
     sql("drop table if exists local2")
     sql(
       "CREATE TABLE local2(name string) STORED BY 'carbondata' tblproperties" +
-      "('local_dictionary_threshold'='9001','local_dictionary_include'='name')")
+      "('local_dictionary_enable'='true','local_dictionary_threshold'='9001','local_dictionary_include'='name')")
     sql("load data inpath '" + file1 + "' into table local2 OPTIONS('header'='false')")
     assert(checkForLocalDictionary(getDimRawChunk(0)))
   }
 
   test("test successful local dictionary generation for default configs") {
     sql("drop table if exists local2")
-    sql("CREATE TABLE local2(name string) STORED BY 'carbondata'")
+    sql("CREATE TABLE local2(name string) STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')")
     sql("load data inpath '" + file1 + "' into table local2 OPTIONS('header'='false')")
     assert(checkForLocalDictionary(getDimRawChunk(0)))
   }
@@ -90,7 +90,7 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
     sql("drop table if exists local2")
     sql(
       "CREATE TABLE local2(name string) STORED BY 'carbondata' tblproperties" +
-      "('dictionary_exclude'='name')")
+      "('local_dictionary_enable'='true','dictionary_exclude'='name')")
     sql("load data inpath '" + file1 + "' into table local2 OPTIONS('header'='false')")
     assert(checkForLocalDictionary(getDimRawChunk(0)))
   }
@@ -108,7 +108,7 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
     sql("drop table if exists local2")
     sql(
       "CREATE TABLE local2(name string) STORED BY 'carbondata' tblproperties" +
-      "('local_dictionary_include'='name','local_dictionary_threshold'='300000')")
+      "('local_dictionary_enable'='true','local_dictionary_include'='name','local_dictionary_threshold'='300000')")
     sql("load data inpath '" + file1 + "' into table local2 OPTIONS('header'='false')")
     assert(checkForLocalDictionary(getDimRawChunk(0)))
   }
@@ -117,7 +117,7 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
     sql("drop table if exists local2")
     sql(
       "CREATE TABLE local2(name string, age string) STORED BY 'carbondata' tblproperties" +
-      "('local_dictionary_include'='name', 'local_dictionary_exclude'='age')")
+      "('local_dictionary_enable'='true','local_dictionary_include'='name', 'local_dictionary_exclude'='age')")
     sql("insert into table local2 values('vishal', '30')")
     assert(checkForLocalDictionary(getDimRawChunk(0)))
     assert(!checkForLocalDictionary(getDimRawChunk(1)))
@@ -127,7 +127,7 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
     sql("drop table if exists local2")
     sql(
       "CREATE TABLE local2(name struct<i:string,s:string>) STORED BY 'carbondata' tblproperties" +
-      "('local_dictionary_include'='name')")
+      "('local_dictionary_enable'='true','local_dictionary_include'='name')")
     sql("load data inpath '" + file2 +
         "' into table local2 OPTIONS('header'='false','COMPLEX_DELIMITER_LEVEL_1'='$', " +
         "'COMPLEX_DELIMITER_LEVEL_2'=':')")
@@ -138,7 +138,7 @@ class LocalDictionarySupportLoadTableTest extends QueryTest with BeforeAndAfterA
 
   test("test to validate local dictionary values"){
     sql("drop table if exists local2")
-    sql("CREATE TABLE local2(name string) STORED BY 'carbondata'")
+    sql("CREATE TABLE local2(name string) STORED BY 'carbondata' tblproperties('local_dictionary_enable'='true')")
     sql("load data inpath '" + resourcesPath + "/localdictionary.csv" + "' into table local2")
     val dimRawChunk = getDimRawChunk(0)
     val dictionaryData = Array("vishal", "kumar", "akash", "praveen", "brijoo")

http://git-wip-us.apache.org/repos/asf/carbondata/blob/34ca0214/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
index 41dfea5..9b9e8bd 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
@@ -160,7 +160,8 @@ private[sql] case class CarbonDescribeFormattedCommand(
         }
       }
     } else {
-      results ++= Seq(("Local Dictionary Enabled", "false", ""))
+      results ++=
+      Seq(("Local Dictionary Enabled", CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT, ""))
     }
 
     /**


[35/50] [abbrv] carbondata git commit: [CARBONDATA-2812] Implement freeMemory for complex pages

Posted by ja...@apache.org.
[CARBONDATA-2812] Implement freeMemory for complex pages

Problem:
The memory used by the ColumnPageWrapper (for complex data types) is not
cleared and so it requires more memory to Load and Query.

Solution:
Clear the used memory in the freeMemory method.

This closes #2599


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/f2e898ac
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/f2e898ac
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/f2e898ac

Branch: refs/heads/external-format
Commit: f2e898ac585458b6c99e08c8fac0e47bec93fee0
Parents: a2928e3
Author: dhatchayani <dh...@gmail.com>
Authored: Thu Aug 2 08:30:32 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Aug 2 17:49:26 2018 +0530

----------------------------------------------------------------------
 .../core/datastore/chunk/store/ColumnPageWrapper.java           | 5 ++++-
 1 file changed, 4 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/f2e898ac/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
----------------------------------------------------------------------
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
index 180b3a2..a5d5917 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
@@ -163,7 +163,10 @@ public class ColumnPageWrapper implements DimensionColumnPage {
 
   @Override
   public void freeMemory() {
-
+    if (null != columnPage) {
+      columnPage.freeMemory();
+      columnPage = null;
+    }
   }
 
   public boolean isAdaptiveComplexPrimitive() {


[29/50] [abbrv] carbondata git commit: [CARBONDATA-2792][schema restructure] Create external table fails post schema restructure.

Posted by ja...@apache.org.
[CARBONDATA-2792][schema restructure] Create external table fails post schema restructure.

Problem
Once the table schema is restructured "(do column drop and column add)".
The api org.apache.carbondata.spark.util.CarbonSparkUtil.getRawSchema(carbonRelation: CarbonRelation) : String
to get visible columns raw schema string in ascending order of their ordinal value throws
ArrayIndexOutOfBoundException while creating external table.
The api prepares the array of raw column schema of visible columns in the ascending order of schema ordinal.
It uses the schemaOrdinal as index to prepare the array, so the colum having the schemaOrdinal value more
than the visible column count will cause the ArrayIndexOutOfBoundException.

Solution
Filter the visible and valid columns
sort the columns based on the schema ordinal.
Prepare the raw column schema based on the sorted columns index.

This closes #2571


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/b483a574
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/b483a574
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/b483a574

Branch: refs/heads/external-format
Commit: b483a57464a860fe9dbd2d074a3e08fd59141edc
Parents: 625a2ef
Author: mohammadshahidkhan <mo...@gmail.com>
Authored: Fri Jul 27 12:51:49 2018 +0530
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Aug 2 16:38:02 2018 +0530

----------------------------------------------------------------------
 .../createTable/TestCreateExternalTable.scala         | 14 ++++++++++++++
 .../carbondata/spark/util/CarbonSparkUtil.scala       | 14 +++++++++-----
 2 files changed, 23 insertions(+), 5 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/b483a574/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
----------------------------------------------------------------------
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
index a9b8d57..6fb24c7 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateExternalTable.scala
@@ -32,6 +32,8 @@ class TestCreateExternalTable extends QueryTest with BeforeAndAfterAll {
 
   override def beforeAll(): Unit = {
     sql("DROP TABLE IF EXISTS origin")
+    sql("drop table IF EXISTS rsext")
+    sql("drop table IF EXISTS rstest1")
     // create carbon table and insert data
     sql("CREATE TABLE origin(key INT, value STRING) STORED BY 'carbondata'")
     sql("INSERT INTO origin select 100,'spark'")
@@ -41,6 +43,8 @@ class TestCreateExternalTable extends QueryTest with BeforeAndAfterAll {
 
   override def afterAll(): Unit = {
     sql("DROP TABLE IF EXISTS origin")
+    sql("drop table IF EXISTS rsext")
+    sql("drop table IF EXISTS rstest1")
   }
 
   test("create external table with existing files") {
@@ -111,5 +115,15 @@ class TestCreateExternalTable extends QueryTest with BeforeAndAfterAll {
     }
     assert(exception.getMessage().contains("Create external table as select"))
   }
+  test("create external table with post schema resturcture") {
+    sql("create table rstest1 (c1 string,c2 int) STORED BY 'org.apache.carbondata.format'")
+    sql("Alter table rstest1 drop columns(c2)")
+    sql(
+      "Alter table rstest1 add columns(c4 string) TBLPROPERTIES('DICTIONARY_EXCLUDE'='c4', " +
+      "'DEFAULT.VALUE.c4'='def')")
+    sql(s"""CREATE EXTERNAL TABLE rsext STORED BY 'carbondata' LOCATION '$storeLocation/rstest1'""")
+    sql("insert into rsext select 'shahid', 1")
+    checkAnswer(sql("select * from rstest1"),  sql("select * from rsext"))
+  }
 
 }

http://git-wip-us.apache.org/repos/asf/carbondata/blob/b483a574/integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala b/integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
index b9e2442..a0c0545 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/spark/util/CarbonSparkUtil.scala
@@ -18,13 +18,14 @@
 package org.apache.carbondata.spark.util
 
 import scala.collection.JavaConverters._
+import scala.collection.mutable
 
 import org.apache.spark.sql.hive.{CarbonMetaData, CarbonRelation, DictionaryMap}
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.metadata.encoder.Encoding
 import org.apache.carbondata.core.metadata.schema.table.{CarbonTable, TableInfo}
-import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn
+import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, ColumnSchema}
 import org.apache.carbondata.core.util.CarbonUtil
 
 case class TransformHolder(rdd: Any, mataData: CarbonMetaData)
@@ -87,18 +88,21 @@ object CarbonSparkUtil {
     val fields = new Array[String](
       carbonRelation.dimensionsAttr.size + carbonRelation.measureAttr.size)
     val carbonTable = carbonRelation.carbonTable
+    val columnSchemas: mutable.Buffer[ColumnSchema] = carbonTable.getTableInfo.getFactTable.
+      getListOfColumns.asScala
+      .filter(cSchema => !cSchema.isInvisible && cSchema.getSchemaOrdinal != -1).
+      sortWith(_.getSchemaOrdinal < _.getSchemaOrdinal)
+    val columnList = columnSchemas.toList.asJava
     carbonRelation.dimensionsAttr.foreach(attr => {
-      val carbonDimension = carbonTable.getDimensionByName(carbonRelation.tableName, attr.name)
       val carbonColumn = carbonTable.getColumnByName(carbonRelation.tableName, attr.name)
       val columnComment = getColumnComment(carbonColumn)
-      fields(carbonDimension.getSchemaOrdinal) =
+      fields(columnList.indexOf(carbonColumn.getColumnSchema)) =
         '`' + attr.name + '`' + ' ' + attr.dataType.catalogString + columnComment
     })
     carbonRelation.measureAttr.foreach(msrAtrr => {
-      val carbonMeasure = carbonTable.getMeasureByName(carbonRelation.tableName, msrAtrr.name)
       val carbonColumn = carbonTable.getColumnByName(carbonRelation.tableName, msrAtrr.name)
       val columnComment = getColumnComment(carbonColumn)
-      fields(carbonMeasure.getSchemaOrdinal) =
+      fields(columnList.indexOf(carbonColumn.getColumnSchema)) =
         '`' + msrAtrr.name + '`' + ' ' + msrAtrr.dataType.catalogString + columnComment
     })
     fields.mkString(",")


[37/50] [abbrv] carbondata git commit: [CARBONDATA-2802][BloomDataMap] Remove clearing cache after rebuiding index datamap

Posted by ja...@apache.org.
[CARBONDATA-2802][BloomDataMap] Remove clearing cache after rebuiding index datamap

This is no need to clear cache after rebuilding index datamap due to the
following reasons:

1.currently it will clear all the caches for all index datamaps, not
only for the current rebuilding one
2.the life cycle of table data and index datamap data is the same,
there is no need to clear it. (once the index datamap is created or
once the main table is loaded, data of the datamap will be generated too
-- in both scenarios, data of the datamap is up to date with the main
table.

This closes #2597


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/26d9f3d8
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/26d9f3d8
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/26d9f3d8

Branch: refs/heads/external-format
Commit: 26d9f3d8e4cbba1242768eec46e8b119b6678bfe
Parents: 38384cb
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Thu Aug 2 10:45:17 2018 +0800
Committer: Jacky Li <ja...@qq.com>
Committed: Fri Aug 3 11:55:24 2018 +0800

----------------------------------------------------------------------
 .../org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala      | 1 -
 1 file changed, 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/26d9f3d8/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
index 2d684bf..f92ed6c 100644
--- a/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
+++ b/integration/spark2/src/main/scala/org/apache/carbondata/datamap/IndexDataMapRebuildRDD.scala
@@ -131,7 +131,6 @@ object IndexDataMapRebuildRDD {
     if (failedSegments.nonEmpty) {
       throw new Exception(s"Failed to refresh datamap ${ schema.getDataMapName }")
     }
-    DataMapStoreManager.getInstance().clearDataMaps(tableIdentifier)
 
     val buildDataMapPostExecutionEvent = new BuildDataMapPostExecutionEvent(sparkSession,
       tableIdentifier)


[32/50] [abbrv] carbondata git commit: [CARBONDATA-2799][BloomDataMap] Fix bugs in querying with bloom datamap on preagg with dictionary column

Posted by ja...@apache.org.
[CARBONDATA-2799][BloomDataMap] Fix bugs in querying with bloom datamap on preagg with dictionary column

For preaggregate table, if the groupby column is dictionary column in
parent table, the preaggregate table will inherit the dictionary
encoding as well as the dictionary file from the parent table.

So for dictionary columns, during query with bloom, we need to
convert the plain filter value to dictionarty encoded value based on
parent table's dictionary file.

This closes #2580


Project: http://git-wip-us.apache.org/repos/asf/carbondata/repo
Commit: http://git-wip-us.apache.org/repos/asf/carbondata/commit/bd6abbbf
Tree: http://git-wip-us.apache.org/repos/asf/carbondata/tree/bd6abbbf
Diff: http://git-wip-us.apache.org/repos/asf/carbondata/diff/bd6abbbf

Branch: refs/heads/external-format
Commit: bd6abbbffd36b5ca0aaad9d937d401982d1d60eb
Parents: b65bf9b
Author: xuchuanyin <xu...@hust.edu.cn>
Authored: Mon Jul 30 17:50:51 2018 +0800
Committer: kunal642 <ku...@gmail.com>
Committed: Thu Aug 2 16:55:59 2018 +0530

----------------------------------------------------------------------
 .../datamap/bloom/BloomCoarseGrainDataMap.java  | 21 ++++-
 .../BloomCoarseGrainDataMapFunctionSuite.scala  | 97 ++++++++++++++++++++
 2 files changed, 117 insertions(+), 1 deletion(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/carbondata/blob/bd6abbbf/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
----------------------------------------------------------------------
diff --git a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
index be531d6..71b1c55 100644
--- a/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
+++ b/datamap/bloom/src/main/java/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMap.java
@@ -47,10 +47,12 @@ import org.apache.carbondata.core.devapi.DictionaryGenerationException;
 import org.apache.carbondata.core.indexstore.Blocklet;
 import org.apache.carbondata.core.indexstore.PartitionSpec;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
+import org.apache.carbondata.core.metadata.CarbonMetadata;
 import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.RelationIdentifier;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
 import org.apache.carbondata.core.scan.expression.ColumnExpression;
 import org.apache.carbondata.core.scan.expression.Expression;
@@ -108,6 +110,7 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
     for (CarbonColumn col : indexedColumn) {
       this.name2Col.put(col.getColName(), col);
     }
+    String parentTablePath = getAncestorTablePath(carbonTable);
 
     try {
       this.name2Converters = new HashMap<>(indexedColumn.size());
@@ -129,7 +132,7 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
         dataField.setTimestampFormat(tsFormat);
         FieldConverter fieldConverter = FieldEncoderFactory.getInstance()
             .createFieldEncoder(dataField, absoluteTableIdentifier, i, nullFormat, null, false,
-                localCaches[i], false, carbonTable.getTablePath());
+                localCaches[i], false, parentTablePath);
         this.name2Converters.put(indexedColumn.get(i).getColName(), fieldConverter);
       }
     } catch (IOException e) {
@@ -140,6 +143,22 @@ public class BloomCoarseGrainDataMap extends CoarseGrainDataMap {
     this.badRecordLogHolder.setLogged(false);
   }
 
+  /**
+   * recursively find the ancestor's table path. This is used for dictionary scenario
+   * where preagg will use the dictionary of the parent table.
+   */
+  private String getAncestorTablePath(CarbonTable currentTable) {
+    if (!currentTable.isChildDataMap()) {
+      return currentTable.getTablePath();
+    }
+
+    RelationIdentifier parentIdentifier =
+        currentTable.getTableInfo().getParentRelationIdentifiers().get(0);
+    CarbonTable parentTable = CarbonMetadata.getInstance().getCarbonTable(
+        parentIdentifier.getDatabaseName(), parentIdentifier.getTableName());
+    return getAncestorTablePath(parentTable);
+  }
+
   @Override
   public List<Blocklet> prune(FilterResolverIntf filterExp, SegmentProperties segmentProperties,
       List<PartitionSpec> partitions) throws IOException {

http://git-wip-us.apache.org/repos/asf/carbondata/blob/bd6abbbf/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
index 496a506..fd1345c 100644
--- a/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
+++ b/integration/spark2/src/test/scala/org/apache/carbondata/datamap/bloom/BloomCoarseGrainDataMapFunctionSuite.scala
@@ -832,6 +832,103 @@ class BloomCoarseGrainDataMapFunctionSuite  extends QueryTest with BeforeAndAfte
       CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
   }
 
+  /**
+   * create bloom and preagg on base table, then create bloom on preagg table,
+   * index column and group by column is dictionary column.
+   * note that the test steps are copied from issue.
+   * In the CI env, sometime it will become timeout, so we ignore the newly added tests
+   */
+  ignore("test bloom datamap: CARBONDATA-2799 bloom datamap on preaggregate") {
+    sql(
+      s"""
+         | CREATE TABLE $normalTable (id int, name string, salary float,dob date)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES('dictionary_include'='id')
+       """.stripMargin)
+    sql(
+      s"""
+         | CREATE TABLE $bloomDMSampleTable (id int, name string, salary float,dob date)
+         | STORED BY 'carbondata'
+         | TBLPROPERTIES('dictionary_include'='id')
+       """.stripMargin)
+    (1 to 2).foreach { _ =>
+      sql(
+        s"""
+           | INSERT INTO $bloomDMSampleTable VALUES
+           | ('1', 'name1', '11.1', '2018-07-01'),
+           | ('2', 'name2', '21.1', '2018-07-02'),
+           | ('3', 'name3', '31.1', '2018-07-03'),
+           | ('4', 'name4', '41.1', '2018-07-04')
+       """.stripMargin)
+      sql(
+        s"""
+           | INSERT INTO $normalTable VALUES
+           | ('1', 'name1', '11.1', '2018-07-01'),
+           | ('2', 'name2', '21.1', '2018-07-02'),
+           | ('3', 'name3', '31.1', '2018-07-03'),
+           | ('4', 'name4', '41.1', '2018-07-04')
+       """.stripMargin)
+    }
+    sql(
+      s"""
+         | CREATE DATAMAP $dataMapName ON TABLE $bloomDMSampleTable
+         | USING 'bloomfilter'
+         | DMPROPERTIES('INDEX_COLUMNS'='id', 'BLOOM_SIZE'='320000', 'BLOOM_FPP'='0.01', 'BLOOM_COMPRESS'='TRUE')
+       """.stripMargin)
+    sql(
+      s"""
+         | INSERT INTO $bloomDMSampleTable VALUES
+         | ('1', 'name1', '11.1', '2018-07-01'),
+         | ('2', 'name2', '21.1', '2018-07-02'),
+         | ('3', 'name3', '31.1', '2018-07-03'),
+         | ('4', 'name4', '41.1', '2018-07-04')
+       """.stripMargin)
+    sql(
+      s"""
+         | INSERT INTO $normalTable VALUES
+         | ('1', 'name1', '11.1', '2018-07-01'),
+         | ('2', 'name2', '21.1', '2018-07-02'),
+         | ('3', 'name3', '31.1', '2018-07-03'),
+         | ('4', 'name4', '41.1', '2018-07-04')
+       """.stripMargin)
+    val preAggOnBase = "preagg_on_base"
+    sql(
+      s"""
+         | CREATE DATAMAP $preAggOnBase ON TABLE $bloomDMSampleTable
+         | USING 'preaggregate' AS
+         | select id, count(id) from $bloomDMSampleTable group by id
+       """.stripMargin)
+    checkAnswer(sql(s"SELECT id, count(id) from $bloomDMSampleTable where id = 3 group by id"),
+      sql(s"SELECT id, count(id) from $normalTable where id = 3 group by id"))
+
+    val bloomOnPreAgg = "bloom_on_pre_agg"
+    sql(
+      s"""
+         | CREATE DATAMAP $bloomOnPreAgg ON TABLE ${bloomDMSampleTable}_${preAggOnBase}
+         | USING 'bloomfilter'
+         | DMPROPERTIES('INDEX_COLUMNS'='${bloomDMSampleTable}_id')
+       """.stripMargin)
+    checkAnswer(sql(s"SELECT id, count(id) from $bloomDMSampleTable where id = 3 group by id"),
+      sql(s"SELECT id, count(id) from $normalTable where id = 3 group by id"))
+
+    sql(s"DROP DATAMAP $bloomOnPreAgg on table ${bloomDMSampleTable}_${preAggOnBase}")
+    checkAnswer(sql(s"SELECT id, count(id) from $bloomDMSampleTable where id = 3 group by id"),
+      sql(s"SELECT id, count(id) from $normalTable where id = 3 group by id"))
+
+    sql(
+      s"""
+         | CREATE DATAMAP $bloomOnPreAgg ON TABLE ${bloomDMSampleTable}_${preAggOnBase}
+         | USING 'bloomfilter'
+         | DMPROPERTIES('INDEX_COLUMNS'='${bloomDMSampleTable}_id')
+       """.stripMargin)
+    checkAnswer(sql(s"SELECT id, count(id) from $bloomDMSampleTable where id = 3 group by id"),
+      sql(s"SELECT id, count(id) from $normalTable where id = 3 group by id"))
+
+    sql(s"DROP DATAMAP $bloomOnPreAgg on table ${bloomDMSampleTable}_${preAggOnBase}")
+    checkAnswer(sql(s"SELECT id, count(id) from $bloomDMSampleTable where id = 3 group by id"),
+      sql(s"SELECT id, count(id) from $normalTable where id = 3 group by id"))
+  }
+
   override def afterAll(): Unit = {
     deleteFile(bigFile)
     sql(s"DROP TABLE IF EXISTS $normalTable")


[48/50] [abbrv] carbondata git commit: [CARBONDATA-2613] Support csv based carbon table

Posted by ja...@apache.org.
http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
new file mode 100644
index 0000000..e7f6c7f
--- /dev/null
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonAddSegmentCommand.scala
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.execution.command.management
+
+import java.util.UUID
+
+import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
+import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
+import org.apache.spark.sql.execution.command.AtomicRunnableCommand
+import org.apache.spark.sql.hive.CarbonRelation
+import org.apache.spark.util.FileUtils
+
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.datamap.status.DataMapStatusManager
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.mutate.CarbonUpdateUtil
+import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatus, SegmentStatusManager}
+import org.apache.carbondata.core.util.CarbonUtil
+import org.apache.carbondata.core.util.path.CarbonTablePath
+import org.apache.carbondata.events.{OperationContext, OperationListenerBus}
+import org.apache.carbondata.processing.loading.events.LoadEvents.LoadMetadataEvent
+import org.apache.carbondata.processing.loading.model.{CarbonDataLoadSchema, CarbonLoadModel}
+import org.apache.carbondata.processing.util.CarbonLoaderUtil
+
+/**
+ * support `alter table tableName add segment location 'path'` command.
+ * It will create a segment and map the path of datafile to segment's storage
+ */
+case class CarbonAddSegmentCommand(
+    dbNameOp: Option[String],
+    tableName: String,
+    filePathFromUser: String,
+    var operationContext: OperationContext = new OperationContext) extends AtomicRunnableCommand {
+  private val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+  var carbonTable: CarbonTable = _
+
+  override def processMetadata(sparkSession: SparkSession): Seq[Row] = {
+    val dbName = CarbonEnv.getDatabaseName(dbNameOp)(sparkSession)
+    carbonTable = {
+      val relation = CarbonEnv.getInstance(sparkSession).carbonMetastore
+        .lookupRelation(Option(dbName), tableName)(sparkSession).asInstanceOf[CarbonRelation]
+      if (relation == null) {
+        LOGGER.error(s"Add segment failed due to table $dbName.$tableName not found")
+        throw new NoSuchTableException(dbName, tableName)
+      }
+      relation.carbonTable
+    }
+
+    if (carbonTable.isHivePartitionTable) {
+      LOGGER.error("Ignore hive partition table for now")
+    }
+
+    operationContext.setProperty("isOverwrite", false)
+    if (CarbonUtil.hasAggregationDataMap(carbonTable)) {
+      val loadMetadataEvent = new LoadMetadataEvent(carbonTable, false)
+      OperationListenerBus.getInstance().fireEvent(loadMetadataEvent, operationContext)
+    }
+    Seq.empty
+  }
+
+  // will just mapping external files to segment metadata
+  override def processData(sparkSession: SparkSession): Seq[Row] = {
+    // clean up invalid segment before creating a new entry
+    SegmentStatusManager.deleteLoadsAndUpdateMetadata(carbonTable, false, null)
+    val currentLoadMetadataDetails = SegmentStatusManager.readLoadMetadata(
+      CarbonTablePath.getMetadataPath(carbonTable.getTablePath))
+    val newSegmentId = SegmentStatusManager.createNewSegmentId(currentLoadMetadataDetails).toString
+    // create new segment folder in carbon store
+    CarbonLoaderUtil.checkAndCreateCarbonDataLocation(newSegmentId, carbonTable)
+
+    val factFilePath = FileUtils.getPaths(filePathFromUser)
+
+    val uuid = if (carbonTable.isChildDataMap) {
+      Option(operationContext.getProperty("uuid")).getOrElse("").toString
+    } else if (carbonTable.hasAggregationDataMap) {
+      UUID.randomUUID().toString
+    } else {
+      ""
+    }
+    // associate segment meta with file path, files are separated with comma
+    val loadModel: CarbonLoadModel = new CarbonLoadModel
+    loadModel.setSegmentId(newSegmentId)
+    loadModel.setDatabaseName(carbonTable.getDatabaseName)
+    loadModel.setTableName(carbonTable.getTableName)
+    loadModel.setTablePath(carbonTable.getTablePath)
+    loadModel.setCarbonTransactionalTable(carbonTable.isTransactionalTable)
+    loadModel.readAndSetLoadMetadataDetails()
+    loadModel.setFactTimeStamp(CarbonUpdateUtil.readCurrentTime())
+    val loadSchema: CarbonDataLoadSchema = new CarbonDataLoadSchema(carbonTable)
+    loadModel.setCarbonDataLoadSchema(loadSchema)
+
+    val newLoadMetadataDetail: LoadMetadataDetails = new LoadMetadataDetails
+
+    // for external datasource table, there are no index files, so no need to write segment file
+
+    // update table status file
+    newLoadMetadataDetail.setSegmentFile(null)
+    newLoadMetadataDetail.setSegmentStatus(SegmentStatus.SUCCESS)
+    newLoadMetadataDetail.setLoadStartTime(loadModel.getFactTimeStamp)
+    newLoadMetadataDetail.setLoadEndTime(CarbonUpdateUtil.readCurrentTime())
+    newLoadMetadataDetail.setIndexSize("1")
+    newLoadMetadataDetail.setDataSize("1")
+    newLoadMetadataDetail.setFileFormat(FileFormat.EXTERNAL)
+    newLoadMetadataDetail.setFactFilePath(factFilePath)
+
+    val done = CarbonLoaderUtil.recordNewLoadMetadata(newLoadMetadataDetail, loadModel, true,
+      false, uuid)
+    if (!done) {
+      val errorMsg =
+        s"""
+           | Data load is failed due to table status update failure for
+           | ${loadModel.getDatabaseName}.${loadModel.getTableName}
+         """.stripMargin
+      throw new Exception(errorMsg)
+    } else {
+      DataMapStatusManager.disableAllLazyDataMaps(carbonTable)
+    }
+    Seq.empty
+  }
+}

http://git-wip-us.apache.org/repos/asf/carbondata/blob/1a26ac16/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
----------------------------------------------------------------------
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
index 8eb47fc..d6691f6 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
@@ -79,7 +79,7 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
     alterPartition | datamapManagement | alterTableFinishStreaming | stream
 
   protected lazy val loadManagement: Parser[LogicalPlan] =
-    deleteLoadsByID | deleteLoadsByLoadDate | cleanFiles | loadDataNew
+    deleteLoadsByID | deleteLoadsByLoadDate | cleanFiles | loadDataNew | addSegment
 
   protected lazy val restructure: Parser[LogicalPlan] =
     alterTableModifyDataType | alterTableDropColumn | alterTableAddColumns
@@ -443,6 +443,17 @@ class CarbonSpark2SqlParser extends CarbonDDLSqlParser {
           partition = partitionSpec)
     }
 
+  /**
+   * The syntax of
+   * ALTER TABLE [dbName.]tableName ADD SEGMENT LOCATION 'path/to/data'
+   */
+  protected lazy val addSegment: Parser[LogicalPlan] =
+    ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~
+    (ADD ~> SEGMENT ~> LOCATION ~> stringLit) <~ opt(";") ^^ {
+      case dbName ~ tableName ~ filePath =>
+        CarbonAddSegmentCommand(convertDbNameToLowerCase(dbName), tableName, filePath)
+    }
+
   protected lazy val deleteLoadsByID: Parser[LogicalPlan] =
     DELETE ~> FROM ~ TABLE ~> (ident <~ ".").? ~ ident ~
     (WHERE ~> (SEGMENT ~ "." ~ ID) ~> IN ~> "(" ~> repsep(segmentId, ",")) <~ ")" ~