You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@carbondata.apache.org by ra...@apache.org on 2019/05/16 19:05:33 UTC

[carbondata] branch branch-1.5 updated (4f95559 -> ea1e86c)

This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a change to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git.


    from 4f95559  [maven-release-plugin] prepare for next development iteration
     new 9a54c88  [CARBONDATA-3341] fixed invalid NULL result in filter query
     new eeb7e3a  [CARBONDATA-3001] configurable page size in MB
     new f0e2706  [CARBONDATA-3331] Fix for external table in Show Metacache
     new d1bb3a0  [CARBONDATA-3334] fixed multiple segment file issue for partition
     new 743b843  [CARBONDATA-3353 ]Fixed MinMax Based Pruning for Measure column in case of Legacy store
     new 4f7f17d  [CARBONDATA-3344] Fix Drop column not present in table
     new 69b8873  [CARBONDATA-3351] Support Binary Data Type
     new d1b455f  [CARBONDATA-3348] Support alter SORT_COLUMNS property
     new 9a9c791  [CARBONDATA-3353 ][HOTFIX]Fixed MinMax Based Pruning for Measure column in case of Legacy store
     new 9f23d2c  [HOTFIX] support compact segments with different sort_columns
     new f7cdb47  [CARBONDATA-3359]Fix data mismatch issue for decimal column after delete operation
     new f80a28d  [CARBONDATA-3345]A growing streaming ROW_V1 carbondata file would be ingored some InputSplits
     new 7449c34  [CARBONDATA-3343] Compaction for Range Sort
     new bc80a22  [CARBONDATA-3360]fix NullPointerException in delete and clean files operation
     new f46ad43  [CARBONDATA-3369] Fix issues during concurrent execution of Create table If not exists
     new d8b0ff4  [CARBONDATA-3371] Fix ArrayIndexOutOfBoundsException of compaction after sort_columns modification
     new 7e7792e  [CARBONDATA-3375] [CARBONDATA-3376] Fix GC Overhead limit exceeded issue and partition column as range column issue
     new 4d21b6a  [DOC] Update doc for sort_columns modification
     new 251cbdc  [CARBONDATA-3362] Document update for pagesize table property scenario
     new b42f1ac  [CARBONDATA-3374] Optimize documentation and fix some spell errors.
     new 4abed04  [CARBONDATA-3377] Fix for Null pointer exception in Range Col compaction
     new ea1e86c  [CARBONDATA-3391] Count star output is wrong when BLOCKLET CACHE is enabled

The 22 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../core/constants/CarbonCommonConstants.java      |   25 +
 .../carbondata/core/datamap/DataMapFilter.java     |   89 ++
 .../carbondata/core/datamap/TableDataMap.java      |   91 +-
 .../core/datamap/dev/DataMapFactory.java           |    4 +-
 .../datamap/dev/expr/DataMapExprWrapperImpl.java   |    3 +-
 .../block/SegmentPropertiesAndSchemaHolder.java    |   13 +-
 .../core/datastore/blocklet/EncodedBlocklet.java   |   19 +
 .../datastore/chunk/store/ColumnPageWrapper.java   |    7 +-
 .../safe/AbstractNonDictionaryVectorFiller.java    |    2 +-
 .../SafeVariableLengthDimensionDataChunkStore.java |    2 +-
 .../carbondata/core/datastore/page/ColumnPage.java |   11 +-
 .../core/datastore/page/LazyColumnPage.java        |    2 +
 .../datastore/page/UnsafeVarLengthColumnPage.java  |    7 +-
 .../datastore/page/VarLengthColumnPageBase.java    |    1 +
 .../page/encoding/ColumnPageEncoderMeta.java       |    4 +-
 .../page/encoding/DefaultEncodingFactory.java      |   14 +-
 .../carbondata/core/datastore/row/CarbonRow.java   |    6 +-
 .../core/indexstore/BlockletDetailsFetcher.java    |    4 +-
 .../indexstore/blockletindex/BlockDataMap.java     |   23 +-
 .../blockletindex/BlockletDataMapFactory.java      |    2 +-
 .../blockletindex/BlockletDataRefNode.java         |    6 +-
 .../core/indexstore/schema/SchemaGenerator.java    |    2 +-
 .../core/metadata/blocklet/BlockletInfo.java       |   10 +
 .../ThriftWrapperSchemaConverterImpl.java          |    4 +
 .../datatype/{StringType.java => BinaryType.java}  |   14 +-
 .../core/metadata/datatype/DataType.java           |    2 +-
 .../core/metadata/datatype/DataTypes.java          |    4 +
 .../metadata/datatype/DecimalConverterFactory.java |   55 +-
 .../core/metadata/schema/table/CarbonTable.java    |   44 +-
 .../core/metadata/schema/table/TableInfo.java      |   23 +
 .../metadata/schema/table/TableSchemaBuilder.java  |   11 +
 .../carbondata/core/mutate/CarbonUpdateUtil.java   |   48 +-
 .../scan/executor/impl/AbstractQueryExecutor.java  |   83 +-
 .../executor/impl/QueryExecutorProperties.java     |    5 -
 .../core/scan/executor/util/QueryUtil.java         |    2 +-
 .../core/scan/executor/util/RestructureUtil.java   |   77 +-
 .../core/scan/expression/Expression.java           |   13 +
 .../scan/filter/FilterExpressionProcessor.java     |    5 +-
 .../carbondata/core/scan/filter/FilterUtil.java    |   60 +-
 .../filter/executer/IncludeFilterExecuterImpl.java |    9 +-
 .../RowLevelRangeGrtThanFiterExecuterImpl.java     |   10 +-
 ...LevelRangeGrtrThanEquaToFilterExecuterImpl.java |   10 +-
 ...wLevelRangeLessThanEqualFilterExecuterImpl.java |   10 +-
 .../RowLevelRangeLessThanFilterExecuterImpl.java   |   10 +-
 .../resolver/ConditionalFilterResolverImpl.java    |    2 +-
 .../resolver/RowLevelRangeFilterResolverImpl.java  |   40 +-
 .../core/scan/model/QueryModelBuilder.java         |   20 +-
 .../core/scan/result/BlockletScannedResult.java    |   62 +-
 .../scan/result/impl/FilterQueryScannedResult.java |   20 +-
 .../result/impl/NonFilterQueryScannedResult.java   |   59 +-
 .../iterator/ColumnDriftRawResultIterator.java     |  128 +++
 .../scan/result/iterator/RawResultIterator.java    |   12 +-
 .../result/vector/impl/CarbonColumnVectorImpl.java |    6 +-
 .../core/scan/wrappers/ByteArrayWrapper.java       |    3 +
 .../carbondata/core/util/CarbonMetadataUtil.java   |   15 +-
 .../carbondata/core/util/CarbonProperties.java     |    6 +
 .../apache/carbondata/core/util/CarbonUtil.java    |   56 +-
 .../core/util/DataFileFooterConverterV3.java       |    8 +
 .../apache/carbondata/core/util/DataTypeUtil.java  |   11 +-
 .../carbondata/core/util/path/CarbonTablePath.java |    2 +-
 .../scan/executor/util/RestructureUtilTest.java    |   11 +-
 .../carbondata/core/util/CarbonUtilTest.java       |   46 -
 .../datamap/lucene/LuceneDataMapFactoryBase.java   |    2 +-
 .../lucene/LuceneFineGrainDataMapFactory.java      |    2 +-
 docs/carbon-as-spark-datasource-guide.md           |    3 +-
 docs/ddl-of-carbondata.md                          |   51 +-
 docs/sdk-guide.md                                  |    2 +
 docs/supported-data-types-in-carbondata.md         |    2 +-
 format/src/main/thrift/carbondata.thrift           |    1 +
 format/src/main/thrift/schema.thrift               |    1 +
 .../carbondata/hadoop/api/CarbonInputFormat.java   |   29 +-
 .../hadoop/util/CarbonVectorizedRecordReader.java  |    3 +-
 .../carbondata/presto/impl/CarbonTableReader.java  |    9 +-
 .../cluster/sdv/generated/AlterTableTestCase.scala |   12 +
 integration/spark-common-test/pom.xml              |    1 -
 .../org/apache/carbondata/sdk/util/BinaryUtil.java |   88 ++
 .../src/test/resources/binaryStringNullData.csv    |    4 +
 .../src/test/resources/binarydata.csv              |    3 +
 .../src/test/resources/binarystringdata.csv        |    3 +
 .../src/test/resources/binarystringdata2.csv       |    3 +
 .../src/test/resources/decimalData.csv             |    4 +
 .../resources/jsonFiles/data/allPrimitiveType.json |    3 +-
 .../test/resources/sort_columns/alldatatype1.csv   |   13 +
 .../test/resources/sort_columns/alldatatype2.csv   |   13 +
 .../testsuite/binary/TestBinaryDataType.scala      | 1153 ++++++++++++++++++++
 .../testsuite/dataload/TestLoadDataGeneral.scala   |    4 +-
 ...ryWithColumnMetCacheAndCacheLevelProperty.scala |    2 -
 .../TestAlterTableSortColumnsProperty.scala        |  573 ++++++++++
 .../createTable/TestCreateTableIfNotExists.scala   |   36 +
 .../TestCreateTableWithPageSizeInMb.scala          |   67 ++
 .../TestNonTransactionalCarbonTable.scala          |   49 +
 .../TestNonTransactionalCarbonTableForBinary.scala |  162 +++
 ...TestNonTransactionalCarbonTableJsonWriter.scala |   37 +-
 .../dataload/TestRangeColumnDataLoad.scala         |  734 ++++++++++++-
 .../testsuite/datamap/CGDataMapTestCase.scala      |    4 +-
 .../testsuite/datamap/DataMapWriterSuite.scala     |    2 +-
 .../testsuite/datamap/FGDataMapTestCase.scala      |    4 +-
 .../testsuite/iud/DeleteCarbonTableTestCase.scala  |   17 +
 .../testsuite/sortcolumns/TestSortColumns.scala    |   14 +
 .../StandardPartitionBadRecordLoggerTest.scala     |    2 -
 .../StandardPartitionTableLoadingTestCase.scala    |    8 +
 .../org/apache/carbondata/spark/CarbonOption.scala |    2 +
 .../spark/load/DataLoadProcessBuilderOnSpark.scala |   51 +-
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     |  277 ++++-
 .../carbondata/spark/rdd/CarbonScanRDD.scala       |   17 +-
 .../spark/rdd/NewCarbonDataLoadRDD.scala           |    2 +-
 .../carbondata/spark/rdd/StreamHandoffRDD.scala    |    2 +-
 .../carbondata/spark/util/CarbonScalaUtil.scala    |   21 +-
 .../apache/carbondata/spark/util/CommonUtil.scala  |  112 +-
 .../spark/util/DataTypeConverterUtil.scala         |    3 +
 .../org/apache/spark/CarbonInputMetrics.scala      |    0
 .../apache/spark/DataSkewRangePartitioner.scala    |   26 +-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    |   87 +-
 integration/spark-datasource/pom.xml               |    1 -
 .../converter/SparkDataTypeConverterImpl.java      |    5 +-
 .../vectorreader/VectorizedCarbonRecordReader.java |    2 +-
 .../datasources/CarbonSparkDataSourceUtil.scala    |   14 +-
 .../datasources/SparkCarbonFileFormat.scala        |    5 +-
 .../apache/spark/sql/util/SparkTypeConverter.scala |    1 +
 .../org/apache/carbondata/sdk/util/BinaryUtil.java |   89 ++
 .../SparkCarbonDataSourceBinaryTest.scala          |  544 +++++++++
 .../datasource/SparkCarbonDataSourceTest.scala     |   26 +
 .../apache/spark/sql/CarbonCatalystOperators.scala |    4 +-
 .../apache/spark/sql/CarbonDataFrameWriter.scala   |    1 +
 .../spark/sql/CarbonDatasourceHadoopRelation.scala |    1 -
 .../command/cache/CarbonShowCacheCommand.scala     |   91 +-
 .../command/management/CarbonLoadDataCommand.scala |    5 +-
 .../schema/CarbonAlterTableDropColumnCommand.scala |   14 +-
 .../command/table/CarbonCreateTableCommand.scala   |   33 +-
 .../table/CarbonDescribeFormattedCommand.scala     |    9 +-
 .../apache/spark/sql/optimizer/CarbonFilters.scala |   10 +-
 .../org/apache/spark/util/AlterTableUtil.scala     |  126 ++-
 .../sql/CarbonGetTableDetailComandTestCase.scala   |    0
 .../processing/datatypes/ArrayDataType.java        |   15 +
 .../processing/datatypes/GenericDataType.java      |    5 +
 .../processing/datatypes/PrimitiveDataType.java    |    6 +
 .../processing/datatypes/StructDataType.java       |   14 +
 .../converter/impl/BinaryFieldConverterImpl.java   |   78 ++
 .../converter/impl/FieldEncoderFactory.java        |    2 +
 .../loading/sort/SortStepRowHandler.java           |    9 +
 .../merger/CarbonCompactionExecutor.java           |   43 +-
 .../processing/merger/CarbonCompactionUtil.java    |  179 ++-
 .../merger/RowResultMergerProcessor.java           |    6 +-
 .../store/CarbonFactDataHandlerColumnar.java       |  191 +++-
 .../store/CarbonFactDataHandlerModel.java          |  106 +-
 .../carbondata/processing/store/TablePage.java     |   46 +-
 .../writer/v3/CarbonFactDataWriterImplV3.java      |    7 +
 store/sdk/pom.xml                                  |    3 +-
 .../carbondata/sdk/file/CSVCarbonWriter.java       |    2 +-
 .../apache/carbondata/sdk/file/CarbonReader.java   |    1 -
 .../carbondata/sdk/file/CarbonWriterBuilder.java   |   92 +-
 .../java/org/apache/carbondata/sdk/file/Field.java |    4 +
 .../carbondata/sdk/file/JsonCarbonWriter.java      |    3 +-
 .../org/apache/carbondata/sdk/file/RowUtil.java    |   11 +
 .../apache/carbondata/sdk/file/utils/SDKUtil.java  |   79 ++
 .../carbondata/sdk/file/CSVCarbonWriterTest.java   |   16 +-
 .../carbondata/sdk/file/CarbonReaderTest.java      |  186 +++-
 .../org/apache/carbondata/sdk/file/ImageTest.java  |  818 ++++++++++++++
 .../org/apache/carbondata/util/BinaryUtil.java     |  126 +++
 .../src/test/resources/image/carbondatalogo.jpg    |  Bin 0 -> 59099 bytes
 .../image/flowers/10686568196_b1915544a8.jpg       |  Bin 0 -> 97920 bytes
 .../image/flowers/10686568196_b1915544a8.txt       |    1 +
 .../image/flowers/10712722853_5632165b04.jpg       |  Bin 0 -> 63389 bytes
 .../image/flowers/10712722853_5632165b04.txt       |    1 +
 .../flowers/subfolder/10841136265_af473efc60.jpg   |  Bin 0 -> 62144 bytes
 .../flowers/subfolder/10841136265_af473efc60.txt   |    1 +
 .../src/test/resources/image/voc/2007_000027.jpg   |  Bin 0 -> 145493 bytes
 .../src/test/resources/image/voc/2007_000027.xml   |   63 ++
 .../src/test/resources/image/voc/2007_000032.jpg   |  Bin 0 -> 54757 bytes
 .../src/test/resources/image/voc/2007_000032.xml   |   63 ++
 .../src/test/resources/image/voc/2007_000033.jpg   |  Bin 0 -> 71205 bytes
 .../src/test/resources/image/voc/2007_000033.xml   |   51 +
 .../src/test/resources/image/voc/2007_000039.jpg   |  Bin 0 -> 64668 bytes
 .../src/test/resources/image/voc/2007_000039.xml   |   27 +
 .../src/test/resources/image/voc/2009_001444.jpg   |  Bin 0 -> 677151 bytes
 .../src/test/resources/image/voc/2009_001444.xml   |   28 +
 .../image/vocForSegmentationClass/2007_000032.jpg  |  Bin 0 -> 54757 bytes
 .../image/vocForSegmentationClass/2007_000032.png  |  Bin 0 -> 2334 bytes
 .../image/vocForSegmentationClass/2007_000033.jpg  |  Bin 0 -> 71205 bytes
 .../image/vocForSegmentationClass/2007_000033.png  |  Bin 0 -> 2814 bytes
 .../image/vocForSegmentationClass/2007_000042.jpg  |  Bin 0 -> 82847 bytes
 .../image/vocForSegmentationClass/2007_000042.png  |  Bin 0 -> 3620 bytes
 .../org/apache/carbondata/tool/CarbonCliTest.java  |   61 +-
 183 files changed, 7482 insertions(+), 916 deletions(-)
 create mode 100644 core/src/main/java/org/apache/carbondata/core/datamap/DataMapFilter.java
 copy core/src/main/java/org/apache/carbondata/core/metadata/datatype/{StringType.java => BinaryType.java} (81%)
 create mode 100644 core/src/main/java/org/apache/carbondata/core/scan/result/iterator/ColumnDriftRawResultIterator.java
 create mode 100644 integration/spark-common-test/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
 create mode 100644 integration/spark-common-test/src/test/resources/binaryStringNullData.csv
 create mode 100644 integration/spark-common-test/src/test/resources/binarydata.csv
 create mode 100644 integration/spark-common-test/src/test/resources/binarystringdata.csv
 create mode 100644 integration/spark-common-test/src/test/resources/binarystringdata2.csv
 create mode 100644 integration/spark-common-test/src/test/resources/decimalData.csv
 create mode 100644 integration/spark-common-test/src/test/resources/sort_columns/alldatatype1.csv
 create mode 100644 integration/spark-common-test/src/test/resources/sort_columns/alldatatype2.csv
 create mode 100644 integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
 create mode 100644 integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
 create mode 100644 integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithPageSizeInMb.scala
 create mode 100644 integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
 rename integration/{spark2 => spark-common}/src/main/scala/org/apache/spark/CarbonInputMetrics.scala (100%)
 create mode 100644 integration/spark-datasource/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
 create mode 100644 integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
 copy processing/src/test/resources/schemas/modifiedTime.mdt => integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala (100%)
 create mode 100644 processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/BinaryFieldConverterImpl.java
 create mode 100644 store/sdk/src/main/java/org/apache/carbondata/sdk/file/utils/SDKUtil.java
 create mode 100644 store/sdk/src/test/java/org/apache/carbondata/sdk/file/ImageTest.java
 create mode 100644 store/sdk/src/test/java/org/apache/carbondata/util/BinaryUtil.java
 create mode 100644 store/sdk/src/test/resources/image/carbondatalogo.jpg
 create mode 100644 store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.jpg
 create mode 100644 store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.txt
 create mode 100644 store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.jpg
 create mode 100644 store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.txt
 create mode 100644 store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.jpg
 create mode 100644 store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.txt
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000027.jpg
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000027.xml
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000032.jpg
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000032.xml
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000033.jpg
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000033.xml
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000039.jpg
 create mode 100755 store/sdk/src/test/resources/image/voc/2007_000039.xml
 create mode 100755 store/sdk/src/test/resources/image/voc/2009_001444.jpg
 create mode 100755 store/sdk/src/test/resources/image/voc/2009_001444.xml
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.jpg
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.png
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.jpg
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.png
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.jpg
 create mode 100755 store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.png


[carbondata] 04/22: [CARBONDATA-3334] fixed multiple segment file issue for partition

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit d1bb3a0e1d9da21ba8493f4845f5c404b8eae56b
Author: kunal642 <ku...@gmail.com>
AuthorDate: Thu Mar 28 14:33:45 2019 +0530

    [CARBONDATA-3334] fixed multiple segment file issue for partition
    
    Problem:
    During partition load, while writing merge index files the FactTimestamp in load model is being changed to current timestamp due to which a new file with mergeindex entry is written.
    
    Solution:
    Set new timestamp if FactTimestamp in load model is 0L(meaning nothing is set).
    
    This closes #3167
---
 .../standardpartition/StandardPartitionTableLoadingTestCase.scala | 8 ++++++++
 .../sql/execution/command/management/CarbonLoadDataCommand.scala  | 3 ++-
 2 files changed, 10 insertions(+), 1 deletion(-)

diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
index 059dd2b..bee118a 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableLoadingTestCase.scala
@@ -496,6 +496,13 @@ class StandardPartitionTableLoadingTestCase extends QueryTest with BeforeAndAfte
     }
   }
 
+  test("test number of segment files should not be more than 1 per segment") {
+    sql("drop table if exists new_par")
+    sql("create table new_par(a string) partitioned by ( b int) stored by 'carbondata'")
+    sql("insert into new_par select 'k',1")
+    assert(new File(s"$storeLocation/new_par/Metadata/segments/").listFiles().size == 1)
+  }
+
 
 
   def restoreData(dblocation: String, tableName: String) = {
@@ -556,6 +563,7 @@ class StandardPartitionTableLoadingTestCase extends QueryTest with BeforeAndAfte
     sql("drop table if exists emp1")
     sql("drop table if exists restorepartition")
     sql("drop table if exists casesensitivepartition")
+    sql("drop table if exists new_par")
   }
 
 }
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index 0c8a1df..b4ef1f0 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -805,6 +805,8 @@ case class CarbonLoadDataCommand(
       }
       if (updateModel.isDefined) {
         carbonLoadModel.setFactTimeStamp(updateModel.get.updatedTimeStamp)
+      } else if (carbonLoadModel.getFactTimeStamp == 0L) {
+        carbonLoadModel.setFactTimeStamp(System.currentTimeMillis())
       }
       // Create and ddd the segment to the tablestatus.
       CarbonLoaderUtil.readAndUpdateLoadProgressInTableMeta(carbonLoadModel, isOverwriteTable)
@@ -869,7 +871,6 @@ case class CarbonLoadDataCommand(
       }
     }
     try {
-      carbonLoadModel.setFactTimeStamp(System.currentTimeMillis())
       val compactedSegments = new util.ArrayList[String]()
       // Trigger auto compaction
       CarbonDataRDDFactory.handleSegmentMerging(


[carbondata] 16/22: [CARBONDATA-3371] Fix ArrayIndexOutOfBoundsException of compaction after sort_columns modification

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit d8b0ff47754507ed49e4ad859582678f785ddca9
Author: QiangCai <qi...@qq.com>
AuthorDate: Sun May 5 15:22:23 2019 +0800

    [CARBONDATA-3371] Fix ArrayIndexOutOfBoundsException of compaction after sort_columns modification
    
    Modification:
    
    SegmentPropertiesWrapper should check the column order for different segments
    Because sort_columns modification can change the column order.
    dictionaryColumnChunkIndex of blockExecutionInfo should keep projection order for compaction
    if column drift happened, it should convert measure to dimension in RawResultIterator
    
    This closes #3201
---
 .../block/SegmentPropertiesAndSchemaHolder.java    |  13 +--
 .../scan/executor/impl/AbstractQueryExecutor.java  |   6 +-
 .../core/scan/executor/util/QueryUtil.java         |   2 +-
 .../iterator/ColumnDriftRawResultIterator.java     | 128 +++++++++++++++++++++
 .../scan/result/iterator/RawResultIterator.java    |  12 +-
 .../core/scan/wrappers/ByteArrayWrapper.java       |   3 +
 .../TestAlterTableSortColumnsProperty.scala        |  92 ++++++++++-----
 .../carbondata/spark/rdd/StreamHandoffRDD.scala    |   2 +-
 .../merger/CarbonCompactionExecutor.java           |  20 +++-
 9 files changed, 225 insertions(+), 53 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
index 34ce5d0..f2f2d8c 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/block/SegmentPropertiesAndSchemaHolder.java
@@ -346,15 +346,9 @@ public class SegmentPropertiesAndSchemaHolder {
       if (obj1 == null || obj2 == null || (obj1.size() != obj2.size())) {
         return false;
       }
-      List<ColumnSchema> clonedObj1 = new ArrayList<>(obj1);
-      List<ColumnSchema> clonedObj2 = new ArrayList<>(obj2);
-      clonedObj1.addAll(obj1);
-      clonedObj2.addAll(obj2);
-      sortList(clonedObj1);
-      sortList(clonedObj2);
       boolean exists = true;
       for (int i = 0; i < obj1.size(); i++) {
-        if (!clonedObj1.get(i).equalsWithStrictCheck(clonedObj2.get(i))) {
+        if (!obj1.get(i).equalsWithStrictCheck(obj2.get(i))) {
           exists = false;
           break;
         }
@@ -372,11 +366,14 @@ public class SegmentPropertiesAndSchemaHolder {
 
     @Override public int hashCode() {
       int allColumnsHashCode = 0;
+      // check column order
+      StringBuilder builder = new StringBuilder();
       for (ColumnSchema columnSchema: columnsInTable) {
         allColumnsHashCode = allColumnsHashCode + columnSchema.strictHashCode();
+        builder.append(columnSchema.getColumnUniqueId()).append(",");
       }
       return carbonTable.getAbsoluteTableIdentifier().hashCode() + allColumnsHashCode + Arrays
-          .hashCode(columnCardinality);
+          .hashCode(columnCardinality) + builder.toString().hashCode();
     }
 
     public AbsoluteTableIdentifier getTableIdentifier() {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index f06f5c3..6c048f3 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -605,7 +605,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     // setting the size of fixed key column (dictionary column)
     blockExecutionInfo
         .setFixedLengthKeySize(getKeySize(projectDimensions, segmentProperties));
-    Set<Integer> dictionaryColumnChunkIndex = new HashSet<Integer>();
+    List<Integer> dictionaryColumnChunkIndex = new ArrayList<Integer>();
     List<Integer> noDictionaryColumnChunkIndex = new ArrayList<Integer>();
     // get the block index to be read from file for query dimension
     // for both dictionary columns and no dictionary columns
@@ -616,7 +616,9 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
         dictionaryColumnChunkIndex.toArray(new Integer[dictionaryColumnChunkIndex.size()]));
     // need to sort the dictionary column as for all dimension
     // column key will be filled based on key order
-    Arrays.sort(queryDictionaryColumnChunkIndexes);
+    if (!queryModel.isForcedDetailRawQuery()) {
+      Arrays.sort(queryDictionaryColumnChunkIndexes);
+    }
     blockExecutionInfo.setDictionaryColumnChunkIndex(queryDictionaryColumnChunkIndexes);
     // setting the no dictionary column block indexes
     blockExecutionInfo.setNoDictionaryColumnChunkIndexes(ArrayUtils.toPrimitive(
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
index 49157f9..95fbe66 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/QueryUtil.java
@@ -509,7 +509,7 @@ public class QueryUtil {
   public static void fillQueryDimensionChunkIndexes(
       List<ProjectionDimension> projectDimensions,
       Map<Integer, Integer> columnOrdinalToChunkIndexMapping,
-      Set<Integer> dictionaryDimensionChunkIndex,
+      List<Integer> dictionaryDimensionChunkIndex,
       List<Integer> noDictionaryDimensionChunkIndex) {
     for (ProjectionDimension queryDimension : projectDimensions) {
       if (CarbonUtil.hasEncoding(queryDimension.getDimension().getEncoder(), Encoding.DICTIONARY)
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/ColumnDriftRawResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/ColumnDriftRawResultIterator.java
new file mode 100644
index 0000000..d3fed76
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/ColumnDriftRawResultIterator.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.scan.result.iterator;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.common.CarbonIterator;
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.keygenerator.KeyGenException;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.encoder.Encoding;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
+import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
+import org.apache.carbondata.core.scan.result.RowBatch;
+import org.apache.carbondata.core.scan.wrappers.ByteArrayWrapper;
+import org.apache.carbondata.core.util.DataTypeUtil;
+
+import org.apache.log4j.Logger;
+
+/**
+ * This is a wrapper iterator over the detail raw query iterator.
+ * This iterator will handle the processing of the raw rows.
+ * This will handle the batch results and will iterate on the batches and give single row.
+ */
+public class ColumnDriftRawResultIterator extends RawResultIterator {
+
+  // column reorder for no-dictionary column
+  private int noDictCount;
+  private int[] noDictMap;
+  // column drift
+  private boolean[] isColumnDrift;
+  private int measureCount;
+  private DataType[] measureDataTypes;
+
+  /**
+   * LOGGER
+   */
+  private static final Logger LOGGER =
+      LogServiceFactory.getLogService(ColumnDriftRawResultIterator.class.getName());
+
+  public ColumnDriftRawResultIterator(CarbonIterator<RowBatch> detailRawQueryResultIterator,
+      SegmentProperties sourceSegProperties, SegmentProperties destinationSegProperties) {
+    super(detailRawQueryResultIterator, sourceSegProperties, destinationSegProperties, false);
+    initForColumnDrift();
+    init();
+  }
+
+  private void initForColumnDrift() {
+    List<CarbonDimension> noDictDims =
+        new ArrayList<>(destinationSegProperties.getDimensions().size());
+    for (CarbonDimension dimension : destinationSegProperties.getDimensions()) {
+      if (dimension.getNumberOfChild() == 0) {
+        if (!dimension.hasEncoding(Encoding.DICTIONARY)) {
+          noDictDims.add(dimension);
+        }
+      }
+    }
+    measureCount = destinationSegProperties.getMeasures().size();
+    noDictCount = noDictDims.size();
+    isColumnDrift = new boolean[noDictCount];
+    noDictMap = new int[noDictCount];
+    measureDataTypes = new DataType[noDictCount];
+    List<CarbonMeasure> sourceMeasures = sourceSegProperties.getMeasures();
+    int tableMeasureCount = sourceMeasures.size();
+    for (int i = 0; i < noDictCount; i++) {
+      for (int j = 0; j < tableMeasureCount; j++) {
+        if (RestructureUtil.isColumnMatches(true, noDictDims.get(i), sourceMeasures.get(j))) {
+          isColumnDrift[i] = true;
+          measureDataTypes[i] = sourceMeasures.get(j).getDataType();
+          break;
+        }
+      }
+    }
+    int noDictIndex = 0;
+    // the column drift are at the end of measures
+    int measureIndex = measureCount + 1;
+    for (int i = 0; i < noDictCount; i++) {
+      if (isColumnDrift[i]) {
+        noDictMap[i] = measureIndex++;
+      } else {
+        noDictMap[i] = noDictIndex++;
+      }
+    }
+  }
+
+  @Override
+  protected Object[] convertRow(Object[] rawRow) throws KeyGenException {
+    super.convertRow(rawRow);
+    ByteArrayWrapper dimObject = (ByteArrayWrapper) rawRow[0];
+    // need move measure to dimension and return new row by current schema
+    byte[][] noDicts = dimObject.getNoDictionaryKeys();
+    byte[][] newNoDicts = new byte[noDictCount][];
+    for (int i = 0; i < noDictCount; i++) {
+      if (isColumnDrift[i]) {
+        newNoDicts[i] = DataTypeUtil
+            .getBytesDataDataTypeForNoDictionaryColumn(rawRow[noDictMap[i]], measureDataTypes[i]);
+      } else {
+        newNoDicts[i] = noDicts[noDictMap[i]];
+      }
+    }
+    ByteArrayWrapper newWrapper = new ByteArrayWrapper();
+    newWrapper.setDictionaryKey(dimObject.getDictionaryKey());
+    newWrapper.setNoDictionaryKeys(newNoDicts);
+    newWrapper.setComplexTypesKeys(dimObject.getComplexTypesKeys());
+    newWrapper.setImplicitColumnByteArray(dimObject.getImplicitColumnByteArray());
+    Object[] finalRawRow = new Object[1 + measureCount];
+    finalRawRow[0] = newWrapper;
+    System.arraycopy(rawRow, 1, finalRawRow, 1, measureCount);
+    return finalRawRow;
+  }
+}
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
index 1febb0b..4d471b6 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/iterator/RawResultIterator.java
@@ -41,9 +41,9 @@ import org.apache.log4j.Logger;
  */
 public class RawResultIterator extends CarbonIterator<Object[]> {
 
-  private final SegmentProperties sourceSegProperties;
+  protected final SegmentProperties sourceSegProperties;
 
-  private final SegmentProperties destinationSegProperties;
+  protected final SegmentProperties destinationSegProperties;
   /**
    * Iterator of the Batch raw result.
    */
@@ -66,18 +66,18 @@ public class RawResultIterator extends CarbonIterator<Object[]> {
 
   public RawResultIterator(CarbonIterator<RowBatch> detailRawQueryResultIterator,
       SegmentProperties sourceSegProperties, SegmentProperties destinationSegProperties,
-      boolean isStreamingHandoff) {
+      boolean init) {
     this.detailRawQueryResultIterator = detailRawQueryResultIterator;
     this.sourceSegProperties = sourceSegProperties;
     this.destinationSegProperties = destinationSegProperties;
     this.executorService = Executors.newFixedThreadPool(1);
 
-    if (!isStreamingHandoff) {
+    if (init) {
       init();
     }
   }
 
-  private void init() {
+  protected void init() {
     this.prefetchEnabled = CarbonProperties.getInstance().getProperty(
         CarbonCommonConstants.CARBON_COMPACTION_PREFETCH_ENABLE,
         CarbonCommonConstants.CARBON_COMPACTION_PREFETCH_ENABLE_DEFAULT).equalsIgnoreCase("true");
@@ -193,7 +193,7 @@ public class RawResultIterator extends CarbonIterator<Object[]> {
     return this.currentRawRow;
   }
 
-  private Object[] convertRow(Object[] rawRow) throws KeyGenException {
+  protected Object[] convertRow(Object[] rawRow) throws KeyGenException {
     byte[] dims = ((ByteArrayWrapper) rawRow[0]).getDictionaryKey();
     long[] keyArray = sourceSegProperties.getDimensionKeyGenerator().getKeyArray(dims);
     byte[] covertedBytes =
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/wrappers/ByteArrayWrapper.java b/core/src/main/java/org/apache/carbondata/core/scan/wrappers/ByteArrayWrapper.java
index 1b903f7..253c21c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/wrappers/ByteArrayWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/wrappers/ByteArrayWrapper.java
@@ -218,4 +218,7 @@ public class ByteArrayWrapper implements Comparable<ByteArrayWrapper>, Serializa
     this.implicitColumnByteArray = implicitColumnByteArray;
   }
 
+  public byte[] getImplicitColumnByteArray() {
+    return implicitColumnByteArray;
+  }
 }
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
index bf4bae6..3e23e91 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
@@ -32,7 +32,7 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
     CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
       "yyyy-MM-dd HH:mm:ss")
     CarbonProperties.getInstance()
-      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "false")
     dropTable()
     prepareTable()
   }
@@ -359,6 +359,11 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
     loadData(tableName, baseTableName)
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
+    // alter table to change SORT_SCOPE and SORT_COLUMNS
+    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='charField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
     // alter table to local_sort with new SORT_COLUMNS
     sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='timestampField, intField, stringField')")
     loadData(tableName, baseTableName)
@@ -370,40 +375,66 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
     // alter table to change SORT_COLUMNS
-    sql(s"alter table $tableName set tblproperties('sort_columns'='smallIntField, stringField, intField')")
-    loadData(tableName, baseTableName)
-    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
-
-    // alter table to change SORT_SCOPE and SORT_COLUMNS
-    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='charField, bigIntField, smallIntField')")
+    sql(s"alter table $tableName set tblproperties('sort_columns'='intField, stringField')")
     loadData(tableName, baseTableName)
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
     // alter table to change SORT_SCOPE
-    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='charField, bigIntField, smallIntField')")
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='charField, smallIntField')")
     loadData(tableName, baseTableName)
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
+    // set input segments
+    (0 to 5).foreach { segment =>
+      sql(s"set carbon.input.segments.default.$tableName=$segment").collect()
+      sql(s"set carbon.input.segments.default.$baseTableName=$segment").collect()
+      checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+      checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+      checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+      checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
+      checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
+    }
+    sql(s"set carbon.input.segments.default.$tableName=*").collect()
+    sql(s"set carbon.input.segments.default.$baseTableName=*").collect()
+
     // query
     checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
     checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
     checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
     checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
 
-    // set input segments
-    (0 to 5).foreach { segment =>
-      sql(s"set carbon.input.segments.default.$tableName=$segment").show(100, false)
-      sql(s"set carbon.input.segments.default.$baseTableName=$segment").show(100, false)
-      checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
-    }
-    sql(s"set carbon.input.segments.default.$tableName=*").show(100, false)
-    sql(s"set carbon.input.segments.default.$baseTableName=*").show(100, false)
-
     // delete
     sql(s"delete from $tableName where smallIntField = 2")
     sql(s"delete from $baseTableName where smallIntField = 2")
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
+    // compaction for column drift
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='charField, intField')")
+    // [Segment info]:
+    //   | sorted | dimension order(sort_columns is in [])                                                     | measure order
+    // -------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+    // 0 | false  | timestampField, dateField, stringField, varcharField, charField                            | smallIntField, intField, bigIntField, floatField, doubleField
+    // 1 | true   | [charField], timestampField, dateField, stringField, varcharField                          | smallIntField, intField, bigIntField, floatField, doubleField
+    // 2 | false  | [timestampField, intField, stringField], charField, dateField, varcharField                | smallIntField, bigIntField, floatField, doubleField
+    // 3 | false  | [stringField, intField, timestampField], charField, dateField, varcharField                | smallIntField, bigIntField, floatField, doubleField
+    // 4 | false  | [intField, stringField], timestampField, charField, dateField, varcharField                | smallIntField, bigIntField, floatField, doubleField
+    // 5 | true   | [charField, smallIntField], intField, stringField, timestampField, dateField, varcharField | bigIntField, floatField, doubleField
+    // Column drift happened, intField and smallIntField became dimension.
+    // The order of columns also changed.
+    //
+    // [Table info]:
+    //            | dimension order(sort_columns is in [])                                                     | measure order
+    // --------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+    // table      | [charField], smallIntField, intField, stringField, timestampField, dateField, varcharField | bigIntField, floatField, doubleField
+    sql(s"alter table $tableName compact 'minor'")
+    sql(s"alter table $baseTableName compact 'minor'")
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
+
     sql(s"delete from $tableName")
     checkAnswer(sql(s"select count(*) from $tableName"), Seq(Row(0)))
     sql(s"delete from $baseTableName")
@@ -426,8 +457,8 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
     // update
-    sql(s"update $tableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").show()
-    sql(s"update $baseTableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").show()
+    sql(s"update $tableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").collect()
+    sql(s"update $baseTableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").collect()
     checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
 
     // query
@@ -438,21 +469,20 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
 
     // set input segments
     (6 to 11).foreach { segment =>
-      sql(s"set carbon.input.segments.default.$tableName=$segment").show(100, false)
-      sql(s"set carbon.input.segments.default.$baseTableName=$segment").show(100, false)
+      sql(s"set carbon.input.segments.default.$tableName=$segment").collect()
+      sql(s"set carbon.input.segments.default.$baseTableName=$segment").collect()
       checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
     }
-    sql(s"set carbon.input.segments.default.$tableName=*").show(100, false)
-    sql(s"set carbon.input.segments.default.$baseTableName=*").show(100, false)
+    sql(s"set carbon.input.segments.default.$tableName=*").collect()
+    sql(s"set carbon.input.segments.default.$baseTableName=*").collect()
 
-    // compaction
-    sql(s"show segments for table $tableName").show(100, false)
-    sql(s"show segments for table $baseTableName").show(100, false)
+    // no_sort compaction flow for column drift
+    sql(s"alter table $tableName set tblproperties('sort_scope'='no_sort', 'sort_columns'='charField, intField')")
+    // sort_scope become no_sort
     sql(s"alter table $tableName compact 'minor'")
     sql(s"alter table $baseTableName compact 'minor'")
-    sql(s"show segments for table $tableName").show(100, false)
-    sql(s"show segments for table $baseTableName").show(100, false)
     checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
     checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
     checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
     checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
@@ -508,6 +538,8 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
   }
 
   test("bloom filter") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
     val tableName = "alter_sc_bloom"
     val dataMapName = "alter_sc_bloom_dm1"
     val baseTableName = "alter_sc_bloom_base"
@@ -523,18 +555,18 @@ class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll
   }
 
   test("pre-aggregate") {
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
     val tableName = "alter_sc_agg"
     val dataMapName = "alter_sc_agg_dm1"
     val baseTableName = "alter_sc_agg_base"
     loadData(tableName, baseTableName)
-    sql(s"SHOW DATAMAP ON TABLE $tableName").show(100, false)
     checkExistence(sql(s"SHOW DATAMAP ON TABLE $tableName"), true, "preaggregate", dataMapName)
     checkExistence(sql(s"EXPLAIN select stringField,sum(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), true, "preaggregate", dataMapName)
     checkAnswer(sql(s"select stringField,sum(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), sql(s"select stringField,sum(intField) as sum from $baseTableName where stringField = 'abc2' group by stringField"))
 
     sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='smallIntField, charField')")
     loadData(tableName, baseTableName)
-    sql(s"EXPLAIN select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField").show(100, false)
     checkExistence(sql(s"EXPLAIN select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), true, "preaggregate", dataMapName)
     checkAnswer(sql(s"select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), sql(s"select stringField,max(intField) as sum from $baseTableName where stringField = 'abc2' group by stringField"))
   }
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
index 31417bc..d754781 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/StreamHandoffRDD.scala
@@ -74,7 +74,7 @@ class HandoffPartition(
  */
 class StreamingRawResultIterator(
     recordReader: RecordReader[Void, Any]
-) extends RawResultIterator(null, null, null, true) {
+) extends RawResultIterator(null, null, null, false) {
 
   override def hasNext: Boolean = {
     recordReader.nextKeyValue()
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
index d9c7be7..28f1cf4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
@@ -37,10 +37,12 @@ import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.executor.QueryExecutor;
 import org.apache.carbondata.core.scan.executor.QueryExecutorFactory;
 import org.apache.carbondata.core.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.scan.model.QueryModelBuilder;
 import org.apache.carbondata.core.scan.result.RowBatch;
+import org.apache.carbondata.core.scan.result.iterator.ColumnDriftRawResultIterator;
 import org.apache.carbondata.core.scan.result.iterator.RawResultIterator;
 import org.apache.carbondata.core.scan.wrappers.IntArrayWrapper;
 import org.apache.carbondata.core.stats.QueryStatistic;
@@ -175,11 +177,19 @@ public class CarbonCompactionExecutor {
   private RawResultIterator getRawResultIterator(Configuration configuration, String segmentId,
       String task, List<TableBlockInfo> tableBlockInfoList)
       throws QueryExecutionException, IOException {
-    return new RawResultIterator(
-        executeBlockList(tableBlockInfoList, segmentId, task, configuration),
-        getSourceSegmentProperties(
-            Collections.singletonList(tableBlockInfoList.get(0).getDataFileFooter())),
-        destinationSegProperties, false);
+    SegmentProperties sourceSegmentProperties = getSourceSegmentProperties(
+        Collections.singletonList(tableBlockInfoList.get(0).getDataFileFooter()));
+    boolean hasColumnDrift = carbonTable.hasColumnDrift() &&
+        RestructureUtil.hasColumnDriftOnSegment(carbonTable, sourceSegmentProperties);
+    if (hasColumnDrift) {
+      return new ColumnDriftRawResultIterator(
+          executeBlockList(tableBlockInfoList, segmentId, task, configuration),
+          sourceSegmentProperties, destinationSegProperties);
+    } else {
+      return new RawResultIterator(
+          executeBlockList(tableBlockInfoList, segmentId, task, configuration),
+          sourceSegmentProperties, destinationSegProperties, true);
+    }
   }
 
   /**


[carbondata] 17/22: [CARBONDATA-3375] [CARBONDATA-3376] Fix GC Overhead limit exceeded issue and partition column as range column issue

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 7e7792e98e0c10e272c3fb1b1ed5821a5193288f
Author: manishnalla1994 <ma...@gmail.com>
AuthorDate: Wed May 8 18:28:21 2019 +0530

    [CARBONDATA-3375] [CARBONDATA-3376] Fix GC Overhead limit exceeded issue and partition column as range column issue
    
    Problem1 : When only single data item is present then it will be launched as one single task wich results in one executor getting overloaded.
    
    Solution: When only a single range then we divide the splits and give to different tasks in order to ensure one executor does not overload.
    
    Problem2 : When the range col is given as partitioned by column then compaction is failed because compaction goes to Range Column flow.
    
    Solution: Added a check for Partition Table when range column is present so that it goes through the old flow and compaction passes.
    
    This closes #3210
---
 .../dataload/TestRangeColumnDataLoad.scala         |  25 +++
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     | 168 +++++++++++++--------
 2 files changed, 131 insertions(+), 62 deletions(-)

diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
index ff383f9..5d6730f 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
@@ -187,6 +187,31 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
     sql("DROP TABLE IF EXISTS carbon_range_column1")
   }
 
+  test("Test compaction for range_column - Partition Column") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING)
+        | PARTITIONED BY (age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT', 'SORT_COLUMNS'='age, city',
+        | 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
   test("Test compaction for range_column - 2 levels") {
     sql("DROP TABLE IF EXISTS carbon_range_column1")
     sql(
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index e361c14..c143f93 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -296,7 +296,11 @@ class CarbonMergerRDD[K, V](
       tablePath, new CarbonTableIdentifier(databaseName, factTableName, tableId)
     )
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
-    val rangeColumn = carbonTable.getRangeColumn
+    var rangeColumn: CarbonColumn = null
+    if (!carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.isHivePartitionTable) {
+      // If the table is not a partition table then only we go for range column compaction flow
+      rangeColumn = carbonTable.getRangeColumn
+    }
     val dataType: DataType = if (null != rangeColumn) {
       rangeColumn.getDataType
     } else {
@@ -386,6 +390,7 @@ class CarbonMergerRDD[K, V](
     }
     val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
     var allRanges: Array[Object] = new Array[Object](0)
+    var singleRange = false
     if (rangeColumn != null) {
       // To calculate the number of ranges to be made, min 2 ranges/tasks to be made in any case
       val numOfPartitions = Math
@@ -400,10 +405,14 @@ class CarbonMergerRDD[K, V](
         dataType)
       // If RangePartitioner does not give ranges in the case when the data is skewed with
       // a lot of null records then we take the min/max from footer and set them for tasks
-      if (null == allRanges || (allRanges.size == 1 && allRanges(0) == null)) {
+      if (null == allRanges || allRanges.size == 1) {
         allRanges = CarbonCompactionUtil.getOverallMinMax(carbonInputSplits.toList.toArray,
           rangeColumn,
           isRangeColSortCol)
+        if(allRanges(0) == allRanges(1)) {
+          // This will be true only if data has single values throughout
+          singleRange = true
+        }
       }
       LOGGER.info(s"Number of ranges:" + allRanges.size)
     }
@@ -433,75 +442,110 @@ class CarbonMergerRDD[K, V](
     val newRanges = allRanges.filter { range =>
       range != null
     }
-    carbonInputSplits.foreach { split =>
-      var dataFileFooter: DataFileFooter = null
-      if (null == rangeColumn) {
-        val taskNo = getTaskNo(split, partitionTaskMap, counter)
-        var sizeOfSplit = split.getDetailInfo.getBlockSize
-        val splitList = taskIdMapping.get(taskNo)
-        noOfBlocks += 1
+    val noOfSplitsPerTask = Math.ceil(carbonInputSplits.size / defaultParallelism)
+    var taskCount = 0
+    // In case of range column if only one data value is present then we try to
+    // divide the splits to different tasks in order to avoid single task creation
+    // and load on single executor
+    if (singleRange) {
+      carbonInputSplits.foreach { split =>
+        var dataFileFooter: DataFileFooter = null
+        try {
+          dataFileFooter = CarbonUtil.readMetadataFile(
+            CarbonInputSplit.getTableBlockInfo(split))
+        } catch {
+          case e: IOException =>
+            logError("Exception in preparing the data file footer for compaction " + e.getMessage)
+            throw e
+        }
+        // add all the column and cardinality to the map
+        CarbonCompactionUtil
+          .addColumnCardinalityToMap(columnToCardinalityMap,
+            dataFileFooter.getColumnInTable,
+            dataFileFooter.getSegmentInfo.getColumnCardinality)
+
+        var splitList = taskIdMapping.get(taskCount.toString)
+        if (null != splitList && splitList.size == noOfSplitsPerTask) {
+          taskCount = taskCount + 1
+          splitList = taskIdMapping.get(taskCount.toString)
+        }
         if (null == splitList) {
-          val splitTempList = new util.ArrayList[CarbonInputSplit]()
-          splitTempList.add(split)
-          taskIdMapping.put(taskNo, splitTempList)
-        } else {
-          splitList.add(split)
+          splitList = new util.ArrayList[CarbonInputSplit]()
+          taskIdMapping.put(taskCount.toString, splitList)
         }
+        splitList.add(split)
       }
-      // Check the cardinality of each columns and set the highest.
-      try {
-        dataFileFooter = CarbonUtil.readMetadataFile(
-          CarbonInputSplit.getTableBlockInfo(split))
-      } catch {
-        case e: IOException =>
-          logError("Exception in preparing the data file footer for compaction " + e.getMessage)
-          throw e
-      }
-      // add all the column and cardinality to the map
-      CarbonCompactionUtil
-        .addColumnCardinalityToMap(columnToCardinalityMap,
-          dataFileFooter.getColumnInTable,
-          dataFileFooter.getSegmentInfo.getColumnCardinality)
-
-      // Create taskIdMapping here for range column by reading min/max values.
-      if (null != rangeColumn) {
-        if (null == expressionMapForRangeCol) {
-          expressionMapForRangeCol = new util.HashMap[Integer, Expression]()
+    } else {
+      carbonInputSplits.foreach { split =>
+        var dataFileFooter: DataFileFooter = null
+        if (null == rangeColumn) {
+          val taskNo = getTaskNo(split, partitionTaskMap, counter)
+          var sizeOfSplit = split.getDetailInfo.getBlockSize
+          val splitList = taskIdMapping.get(taskNo)
+          noOfBlocks += 1
+          if (null == splitList) {
+            val splitTempList = new util.ArrayList[CarbonInputSplit]()
+            splitTempList.add(split)
+            taskIdMapping.put(taskNo, splitTempList)
+          } else {
+            splitList.add(split)
+          }
+        }
+        // Check the cardinality of each columns and set the highest.
+        try {
+          dataFileFooter = CarbonUtil.readMetadataFile(
+            CarbonInputSplit.getTableBlockInfo(split))
+        } catch {
+          case e: IOException =>
+            logError("Exception in preparing the data file footer for compaction " + e.getMessage)
+            throw e
         }
-        if (-1 == indexOfRangeColumn) {
-          val allColumns = dataFileFooter.getColumnInTable
-          for (i <- 0 until allColumns.size()) {
-            if (allColumns.get(i).getColumnName.equalsIgnoreCase(rangeColumn.getColName)) {
-              indexOfRangeColumn = i
+        // add all the column and cardinality to the map
+        CarbonCompactionUtil
+          .addColumnCardinalityToMap(columnToCardinalityMap,
+            dataFileFooter.getColumnInTable,
+            dataFileFooter.getSegmentInfo.getColumnCardinality)
+
+        // Create taskIdMapping here for range column by reading min/max values.
+        if (null != rangeColumn) {
+          if (null == expressionMapForRangeCol) {
+            expressionMapForRangeCol = new util.HashMap[Integer, Expression]()
+          }
+          if (-1 == indexOfRangeColumn) {
+            val allColumns = dataFileFooter.getColumnInTable
+            for (i <- 0 until allColumns.size()) {
+              if (allColumns.get(i).getColumnName.equalsIgnoreCase(rangeColumn.getColName)) {
+                indexOfRangeColumn = i
+              }
             }
           }
-        }
-        // Create ranges and add splits to the tasks
-        for (i <- 0 until (newRanges.size + 1)) {
-          if (null == expressionMapForRangeCol.get(i)) {
-            // Creating FilterExpression for the range column
-            var minVal: Object = null
-            var maxVal: Object = null
-            // For first task we will create an Or Filter and also accomodate null values
-            // For last task we will take as GreaterThan Expression of last value
-            if (i != 0) {
-              minVal = newRanges(i - 1)
+          // Create ranges and add splits to the tasks
+          for (i <- 0 until (newRanges.size + 1)) {
+            if (null == expressionMapForRangeCol.get(i)) {
+              // Creating FilterExpression for the range column
+              var minVal: Object = null
+              var maxVal: Object = null
+              // For first task we will create an Or Filter and also accomodate null values
+              // For last task we will take as GreaterThan Expression of last value
+              if (i != 0) {
+                minVal = newRanges(i - 1)
+              }
+              if (i != newRanges.size) {
+                maxVal = newRanges(i)
+              }
+              val filterExpr = CarbonCompactionUtil
+                .getFilterExpressionForRange(rangeColumn,
+                  minVal, maxVal, dataType)
+              expressionMapForRangeCol.put(i, filterExpr)
             }
-            if (i != newRanges.size) {
-              maxVal = newRanges(i)
+            var splitList = taskIdMapping.get(i.toString)
+            noOfBlocks += 1
+            if (null == splitList) {
+              splitList = new util.ArrayList[CarbonInputSplit]()
+              taskIdMapping.put(i.toString, splitList)
             }
-            val filterExpr = CarbonCompactionUtil
-              .getFilterExpressionForRange(rangeColumn,
-                minVal, maxVal, dataType)
-            expressionMapForRangeCol.put(i, filterExpr)
-          }
-          var splitList = taskIdMapping.get(i.toString)
-          noOfBlocks += 1
-          if (null == splitList) {
-            splitList = new util.ArrayList[CarbonInputSplit]()
-            taskIdMapping.put(i.toString, splitList)
+            splitList.add(split)
           }
-          splitList.add(split)
         }
       }
     }


[carbondata] 09/22: [CARBONDATA-3353 ][HOTFIX]Fixed MinMax Based Pruning for Measure column in case of Legacy store

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 9a9c79187a647f6f0acd16f01dd22bd8d6d2f368
Author: Indhumathi27 <in...@gmail.com>
AuthorDate: Wed Apr 24 20:52:03 2019 +0530

    [CARBONDATA-3353 ][HOTFIX]Fixed MinMax Based Pruning for Measure column in case of Legacy store
    
    This closes #3187
---
 .../carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
index 33a337b..64dc3a1 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
@@ -524,8 +524,8 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
           isMinMaxSet[chunkIndex]);
       }
     } else if (isMeasurePresentInCurrentBlock) {
+      chunkIndex = msrColumnEvaluatorInfo.getColumnIndexInMinMaxByteArray();
       if (isMinMaxSet[chunkIndex]) {
-        chunkIndex = msrColumnEvaluatorInfo.getColumnIndexInMinMaxByteArray();
         isScanRequired = isScanRequired(blkMaxVal[chunkIndex], blkMinVal[chunkIndex],
             msrColumnExecutorInfo.getFilterKeys(), msrColumnEvaluatorInfo.getType());
       } else {


[carbondata] 01/22: [CARBONDATA-3341] fixed invalid NULL result in filter query

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 9a54c88aa7e65ae08fd1fd36c53c8fa0a02db1a3
Author: kunal642 <ku...@gmail.com>
AuthorDate: Thu Apr 4 11:53:05 2019 +0530

    [CARBONDATA-3341] fixed invalid NULL result in filter query
    
    Problem: When vector filter push down is true and the table contains a null value
    then thegetNullBitSet method is giving an byte[]to represent null.
    But there is no check for the value of the bitset.
    
    Solution: Check if null bit set length is 0 then set the same to the chunkData.
    
    This closes #3172
---
 .../core/datastore/chunk/store/ColumnPageWrapper.java      |  7 ++++++-
 .../spark/testsuite/sortcolumns/TestSortColumns.scala      | 14 ++++++++++++++
 2 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
index a1c4aec..f4d3fe4 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/ColumnPageWrapper.java
@@ -261,7 +261,12 @@ public class ColumnPageWrapper implements DimensionColumnPage {
       // if the compare value is null and the data is also null we can directly return 0
       return 0;
     } else {
-      byte[] chunkData = this.getChunkDataInBytes(rowId);
+      byte[] chunkData;
+      if (nullBitSet != null && nullBitSet.length == 0) {
+        chunkData = nullBitSet;
+      } else {
+        chunkData = this.getChunkDataInBytes(rowId);
+      }
       return ByteUtil.UnsafeComparer.INSTANCE.compareTo(chunkData, compareValue);
     }
   }
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
index df97d0f..bbd58c0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/sortcolumns/TestSortColumns.scala
@@ -385,6 +385,17 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
         "sort_columns is unsupported for double datatype column: empno"))
   }
 
+  test("test if equal to 0 filter on sort column gives correct result") {
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_PUSH_ROW_FILTERS_FOR_VECTOR,
+      "true")
+    sql("create table test1(a bigint) stored by 'carbondata' TBLPROPERTIES('sort_columns'='a')")
+    sql("insert into test1 select 'k'")
+    sql("insert into test1 select '1'")
+    assert(sql("select * from test1 where a = 1 or a = 0").count() == 1)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_PUSH_ROW_FILTERS_FOR_VECTOR,
+      CarbonCommonConstants.CARBON_PUSH_ROW_FILTERS_FOR_VECTOR_DEFAULT)
+  }
+
   override def afterAll = {
     dropTestTables
     CarbonProperties.getInstance().addProperty(
@@ -392,9 +403,12 @@ class TestSortColumns extends QueryTest with BeforeAndAfterAll {
     CarbonProperties.getInstance()
       .addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
         CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_PUSH_ROW_FILTERS_FOR_VECTOR,
+      CarbonCommonConstants.CARBON_PUSH_ROW_FILTERS_FOR_VECTOR_DEFAULT)
   }
 
   def dropTestTables = {
+    sql("drop table if exists test1")
     sql("drop table if exists sortint")
     sql("drop table if exists sortint1")
     sql("drop table if exists sortlong")


[carbondata] 11/22: [CARBONDATA-3359]Fix data mismatch issue for decimal column after delete operation

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit f7cdb47e7535c7543147bf96f42cfbc14b36b082
Author: akashrn5 <ak...@gmail.com>
AuthorDate: Thu Apr 25 15:16:35 2019 +0530

    [CARBONDATA-3359]Fix data mismatch issue for decimal column after delete operation
    
    Problem:
    after delete operation is performed, the decimal column data is wrong. This is because, during filling vector for decimal column,
     we were not considering the deleted rows if present any, we were filling all the row data for decimal.
    
    Solution
    in case of decimal, get the vector from ColumnarVectorWrapperDirectFactory and then put data, which will take care of the deleted rows
    
    This closes #3189
---
 .../metadata/datatype/DecimalConverterFactory.java | 55 +++++++++++++---------
 .../src/test/resources/decimalData.csv             |  4 ++
 .../testsuite/iud/DeleteCarbonTableTestCase.scala  | 17 +++++++
 3 files changed, 54 insertions(+), 22 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DecimalConverterFactory.java b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DecimalConverterFactory.java
index 9793c38..2e155f4 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DecimalConverterFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DecimalConverterFactory.java
@@ -23,6 +23,7 @@ import java.util.BitSet;
 
 import org.apache.carbondata.core.scan.result.vector.CarbonColumnVector;
 import org.apache.carbondata.core.scan.result.vector.ColumnVectorInfo;
+import org.apache.carbondata.core.scan.result.vector.impl.directread.ColumnarVectorWrapperDirectFactory;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
 
@@ -102,13 +103,13 @@ public final class DecimalConverterFactory {
       return BigDecimal.valueOf((Long) valueToBeConverted, scale);
     }
 
-    @Override public void fillVector(Object valuesToBeConverted, int size, ColumnVectorInfo info,
-        BitSet nullBitset, DataType pageType) {
+    @Override public void fillVector(Object valuesToBeConverted, int size,
+        ColumnVectorInfo vectorInfo, BitSet nullBitSet, DataType pageType) {
       // TODO we need to find way to directly set to vector with out conversion. This way is very
       // inefficient.
-      CarbonColumnVector vector = info.vector;
-      int precision = info.measure.getMeasure().getPrecision();
-      int newMeasureScale = info.measure.getMeasure().getScale();
+      CarbonColumnVector vector = getCarbonColumnVector(vectorInfo, nullBitSet);
+      int precision = vectorInfo.measure.getMeasure().getPrecision();
+      int newMeasureScale = vectorInfo.measure.getMeasure().getScale();
       if (!(valuesToBeConverted instanceof byte[])) {
         throw new UnsupportedOperationException("This object type " + valuesToBeConverted.getClass()
             + " is not supported in this method");
@@ -116,7 +117,7 @@ public final class DecimalConverterFactory {
       byte[] data = (byte[]) valuesToBeConverted;
       if (pageType == DataTypes.BYTE) {
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = BigDecimal.valueOf(data[i], scale);
@@ -128,7 +129,7 @@ public final class DecimalConverterFactory {
         }
       } else if (pageType == DataTypes.SHORT) {
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = BigDecimal
@@ -142,7 +143,7 @@ public final class DecimalConverterFactory {
         }
       } else if (pageType == DataTypes.SHORT_INT) {
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = BigDecimal
@@ -156,7 +157,7 @@ public final class DecimalConverterFactory {
         }
       } else if (pageType == DataTypes.INT) {
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = BigDecimal
@@ -170,7 +171,7 @@ public final class DecimalConverterFactory {
         }
       } else if (pageType == DataTypes.LONG) {
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = BigDecimal
@@ -261,18 +262,18 @@ public final class DecimalConverterFactory {
       return new BigDecimal(bigInteger, scale);
     }
 
-    @Override public void fillVector(Object valuesToBeConverted, int size, ColumnVectorInfo info,
-        BitSet nullBitset, DataType pageType) {
-      CarbonColumnVector vector = info.vector;
-      int precision = info.measure.getMeasure().getPrecision();
-      int newMeasureScale = info.measure.getMeasure().getScale();
+    @Override public void fillVector(Object valuesToBeConverted, int size,
+        ColumnVectorInfo vectorInfo, BitSet nullBitSet, DataType pageType) {
+      CarbonColumnVector vector = getCarbonColumnVector(vectorInfo, nullBitSet);
+      int precision = vectorInfo.measure.getMeasure().getPrecision();
+      int newMeasureScale = vectorInfo.measure.getMeasure().getScale();
       if (scale < newMeasureScale) {
         scale = newMeasureScale;
       }
       if (valuesToBeConverted instanceof byte[][]) {
         byte[][] data = (byte[][]) valuesToBeConverted;
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigInteger bigInteger = new BigInteger(data[i]);
@@ -307,15 +308,15 @@ public final class DecimalConverterFactory {
       return DataTypeUtil.byteToBigDecimal((byte[]) valueToBeConverted);
     }
 
-    @Override public void fillVector(Object valuesToBeConverted, int size, ColumnVectorInfo info,
-        BitSet nullBitset, DataType pageType) {
-      CarbonColumnVector vector = info.vector;
-      int precision = info.measure.getMeasure().getPrecision();
-      int newMeasureScale = info.measure.getMeasure().getScale();
+    @Override public void fillVector(Object valuesToBeConverted, int size,
+        ColumnVectorInfo vectorInfo, BitSet nullBitSet, DataType pageType) {
+      CarbonColumnVector vector = getCarbonColumnVector(vectorInfo, nullBitSet);
+      int precision = vectorInfo.measure.getMeasure().getPrecision();
+      int newMeasureScale = vectorInfo.measure.getMeasure().getScale();
       if (valuesToBeConverted instanceof byte[][]) {
         byte[][] data = (byte[][]) valuesToBeConverted;
         for (int i = 0; i < size; i++) {
-          if (nullBitset.get(i)) {
+          if (nullBitSet.get(i)) {
             vector.putNull(i);
           } else {
             BigDecimal value = DataTypeUtil.byteToBigDecimal(data[i]);
@@ -337,6 +338,16 @@ public final class DecimalConverterFactory {
     }
   }
 
+  private static CarbonColumnVector getCarbonColumnVector(ColumnVectorInfo vectorInfo,
+      BitSet nullBitSet) {
+    CarbonColumnVector vector = vectorInfo.vector;
+    BitSet deletedRows = vectorInfo.deletedRows;
+    vector = ColumnarVectorWrapperDirectFactory
+        .getDirectVectorWrapperFactory(vector, vectorInfo.invertedIndex, nullBitSet, deletedRows,
+            true, false);
+    return vector;
+  }
+
   public DecimalConverter getDecimalConverter(int precision, int scale) {
     if (precision < 0) {
       return new LVBytesDecimalConverter();
diff --git a/integration/spark-common-test/src/test/resources/decimalData.csv b/integration/spark-common-test/src/test/resources/decimalData.csv
new file mode 100644
index 0000000..8cbbd21
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/decimalData.csv
@@ -0,0 +1,4 @@
+smallIntField,intField,bigIntField,floatField,doubleField,decimalField,timestampField,dateField,stringField,varcharField,charField,arrayField,structField
+-1,-1,-1,-1.1,-1.1,-1.1234,2017-06-11 00:00:01,2017-06-11,abc1,abcd1,abcde1,a$b$c$1,a$b$1
+2,2,2,2.1,2.1,2.1234,2017-06-12 23:59:02,2017-06-12,abc2,abcd2,abcde2,a$b$c$2,a$b$2
+3,3,3,3.1,3.1,3.1234,2017-06-13 23:59:03,2017-06-13,abc3,abcd3,abcde3,a$b$c$3,a$b$3
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
index 2f95133..f26283b 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/iud/DeleteCarbonTableTestCase.scala
@@ -344,6 +344,23 @@ class DeleteCarbonTableTestCase extends QueryTest with BeforeAndAfterAll {
     sql("drop table if exists test_dm_index")
   }
 
+  test("test delete on table with decimal column") {
+    sql("drop table if exists decimal_table")
+    sql(
+      s"""create table decimal_table(smallIntField smallInt,intField int,bigIntField bigint,floatField float,
+          doubleField double,decimalField decimal(25, 4),timestampField timestamp,dateField date,stringField string,
+          varcharField varchar(10),charField char(10))stored as carbondata
+      """.stripMargin)
+    sql(s"load data local inpath '$resourcesPath/decimalData.csv' into table decimal_table")
+    val frame = sql("select decimalfield from decimal_table where smallIntField = -1 or smallIntField = 3")
+    sql(s"delete from decimal_table where smallIntField = 2")
+    checkAnswer(frame, Seq(
+      Row(-1.1234),
+      Row(3.1234)
+    ))
+    sql("drop table if exists decimal_table")
+  }
+
   override def afterAll {
     sql("use default")
     sql("drop database  if exists iud_db cascade")


[carbondata] 19/22: [CARBONDATA-3362] Document update for pagesize table property scenario

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 251cbdc20319fc71013b154e2439503dea072d94
Author: ajantha-bhat <aj...@gmail.com>
AuthorDate: Tue May 7 14:36:05 2019 +0530

    [CARBONDATA-3362] Document update for pagesize table property scenario
    
    Document update for pagesize table property scenario.
    
    This closes #3206
---
 docs/carbon-as-spark-datasource-guide.md | 2 +-
 docs/ddl-of-carbondata.md                | 5 +++++
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/docs/carbon-as-spark-datasource-guide.md b/docs/carbon-as-spark-datasource-guide.md
index 598acb0..fe46b09 100644
--- a/docs/carbon-as-spark-datasource-guide.md
+++ b/docs/carbon-as-spark-datasource-guide.md
@@ -44,7 +44,7 @@ Now you can create Carbon table using Spark's datasource DDL syntax.
 |-----------|--------------|------------|
 | table_blocksize | 1024 | Size of blocks to write onto hdfs. For  more details, see [Table Block Size Configuration](./ddl-of-carbondata.md#table-block-size-configuration). |
 | table_blocklet_size | 64 | Size of blocklet to write. |
-| table_page_size_inmb | 0 | Size of each page in carbon table, if page size crosses this value before 32000 rows, page will be cut to that may rows. Helps in keep page size to fit cache size |
+| table_page_size_inmb | 0 | Size of each page in carbon table, if page size crosses this value before 32000 rows, page will be cut to that many rows. Helps in keep page size to fit cache size |
 | local_dictionary_threshold | 10000 | Cardinality upto which the local dictionary can be generated. For  more details, see [Local Dictionary Configuration](./ddl-of-carbondata.md#local-dictionary-configuration). |
 | local_dictionary_enable | false | Enable local dictionary generation. For  more details, see [Local Dictionary Configuration](./ddl-of-carbondata.md#local-dictionary-configuration). |
 | sort_columns | all dimensions are sorted | Columns to include in sort and its order of sort. For  more details, see [Sort Columns Configuration](./ddl-of-carbondata.md#sort-columns-configuration). |
diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 5bc8f10..34eca8d 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -291,6 +291,11 @@ CarbonData DDL statements are documented here,which includes:
      If page size crosses this value before 32000 rows, page will be cut to that many rows. 
      Helps in keeping page size to fit cpu cache size.
 
+     This property can be configured if the table has string, varchar, binary or complex datatype columns.
+     Because for these columns 32000 rows in one page may exceed 1755 MB and snappy compression will fail in that scenario.
+     Also if page size is huge, page cannot be fit in CPU cache. 
+     So, configuring smaller values of this property (say 1 MB) can result in better use of CPU cache for pages.
+
      Example usage:
      ```
      TBLPROPERTIES ('TABLE_PAGE_SIZE_INMB'='5')


[carbondata] 05/22: [CARBONDATA-3353 ]Fixed MinMax Based Pruning for Measure column in case of Legacy store

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 743b843676e8736beaf55286df3369772224bb90
Author: Indhumathi27 <in...@gmail.com>
AuthorDate: Fri Apr 12 12:25:38 2019 +0530

    [CARBONDATA-3353 ]Fixed MinMax Based Pruning for Measure column in case of Legacy store
    
    Why this PR needed?
    
    Problem:
    For table created and loaded with legacy store having a measure column, while building the page min max,
    min is written as max and viceversa, so blocklet level minmax is wrong. With current version, when we query with filter on measure column, measure filter pruning is skipping some blocks and giving wrong results.
    
    Solution:
    Skip MinMax based pruning in case of legacy store for measure column.
    
    This closes #3176
---
 .../indexstore/blockletindex/BlockDataMap.java     | 15 +------
 .../scan/executor/impl/AbstractQueryExecutor.java  | 15 +++----
 .../carbondata/core/scan/filter/FilterUtil.java    |  8 ++++
 .../filter/executer/IncludeFilterExecuterImpl.java | 11 +++--
 .../RowLevelRangeGrtThanFiterExecuterImpl.java     | 10 +++--
 ...LevelRangeGrtrThanEquaToFilterExecuterImpl.java | 10 +++--
 ...wLevelRangeLessThanEqualFilterExecuterImpl.java | 10 +++--
 .../RowLevelRangeLessThanFilterExecuterImpl.java   | 10 +++--
 .../apache/carbondata/core/util/CarbonUtil.java    | 47 ----------------------
 .../carbondata/core/util/CarbonUtilTest.java       | 46 ---------------------
 10 files changed, 51 insertions(+), 131 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
index 5b2132c..1fc5831 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
@@ -67,7 +67,6 @@ import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.util.BlockletDataMapUtil;
 import org.apache.carbondata.core.util.ByteUtil;
-import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataFileFooterConverter;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -219,7 +218,7 @@ public class BlockDataMap extends CoarseGrainDataMap
     DataMapRowImpl summaryRow = null;
     CarbonRowSchema[] schema = getFileFooterEntrySchema();
     boolean[] minMaxFlag = new boolean[segmentProperties.getColumnsValueSize().length];
-    Arrays.fill(minMaxFlag, true);
+    FilterUtil.setMinMaxFlagForLegacyStore(minMaxFlag, segmentProperties);
     long totalRowCount = 0;
     for (DataFileFooter fileFooter : indexInfo) {
       TableBlockInfo blockInfo = fileFooter.getBlockInfo().getTableBlockInfo();
@@ -232,19 +231,9 @@ public class BlockDataMap extends CoarseGrainDataMap
       if (null != blockMetaInfo) {
         BlockletIndex blockletIndex = fileFooter.getBlockletIndex();
         BlockletMinMaxIndex minMaxIndex = blockletIndex.getMinMaxIndex();
-        byte[][] minValues =
-            BlockletDataMapUtil.updateMinValues(segmentProperties, minMaxIndex.getMinValues());
-        byte[][] maxValues =
-            BlockletDataMapUtil.updateMaxValues(segmentProperties, minMaxIndex.getMaxValues());
-        // update min max values in case of old store for measures as measure min/max in
-        // old stores in written opposite
-        byte[][] updatedMinValues =
-            CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, true);
-        byte[][] updatedMaxValues =
-            CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, false);
         summaryRow = loadToUnsafeBlock(schema, taskSummarySchema, fileFooter, segmentProperties,
             getMinMaxCacheColumns(), blockInfo.getFilePath(), summaryRow,
-            blockMetaInfo, updatedMinValues, updatedMaxValues, minMaxFlag);
+            blockMetaInfo, minMaxIndex.getMinValues(), minMaxIndex.getMaxValues(), minMaxFlag);
         totalRowCount += fileFooter.getNumberOfRows();
       }
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index f81a3dc..b15bdb5 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -238,6 +238,12 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
           LOGGER.warn("Skipping Direct Vector Filling as it is not Supported "
               + "for Legacy store prior to V3 store");
           queryModel.setDirectVectorFill(false);
+          // Skip minmax based pruning for measure column in case of legacy store
+          boolean[] minMaxFlag = new boolean[segmentProperties.getColumnsValueSize().length];
+          FilterUtil.setMinMaxFlagForLegacyStore(minMaxFlag, segmentProperties);
+          for (BlockletInfo blockletInfo : fileFooter.getBlockletList()) {
+            blockletInfo.getBlockletIndex().getMinMaxIndex().setIsMinMaxSet(minMaxFlag);
+          }
         }
         readAndFillBlockletInfo(tableBlockInfos, blockInfo,
             blockletDetailInfo, fileFooter, segmentProperties);
@@ -386,15 +392,6 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     byte[][] maxValues = blockletInfo.getBlockletIndex().getMinMaxIndex().getMaxValues();
     byte[][] minValues = blockletInfo.getBlockletIndex().getMinMaxIndex().getMinValues();
     if (blockletDetailInfo.isLegacyStore()) {
-      minValues = BlockletDataMapUtil.updateMinValues(segmentProperties,
-          blockletInfo.getBlockletIndex().getMinMaxIndex().getMinValues());
-      maxValues = BlockletDataMapUtil.updateMaxValues(segmentProperties,
-          blockletInfo.getBlockletIndex().getMinMaxIndex().getMaxValues());
-      // update min and max values in case of old store for measures as min and max is written
-      // opposite for measures in old store ( store <= 1.1 version)
-      byte[][] tempMaxValues = maxValues;
-      maxValues = CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, false);
-      minValues = CarbonUtil.updateMinMaxValues(fileFooter, tempMaxValues, minValues, true);
       info.setDataBlockFromOldStore(true);
     }
     blockletInfo.getBlockletIndex().getMinMaxIndex().setMaxValues(maxValues);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index 6cc13e2..9d8fe8d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -2326,4 +2326,12 @@ public final class FilterUtil {
     return defaultValue;
   }
 
+  public static void setMinMaxFlagForLegacyStore(boolean[] minMaxFlag,
+      SegmentProperties segmentProperties) {
+    int index = segmentProperties.getEachDimColumnValueSize().length + segmentProperties
+        .getEachComplexDimColumnValueSize().length;
+    Arrays.fill(minMaxFlag, 0, index, true);
+    Arrays.fill(minMaxFlag, index, minMaxFlag.length, false);
+  }
+
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
index 4668242..33a337b 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/IncludeFilterExecuterImpl.java
@@ -524,10 +524,13 @@ public class IncludeFilterExecuterImpl implements FilterExecuter {
           isMinMaxSet[chunkIndex]);
       }
     } else if (isMeasurePresentInCurrentBlock) {
-      chunkIndex = msrColumnEvaluatorInfo.getColumnIndexInMinMaxByteArray();
-      isScanRequired = isScanRequired(blkMaxVal[chunkIndex], blkMinVal[chunkIndex],
-          msrColumnExecutorInfo.getFilterKeys(),
-          msrColumnEvaluatorInfo.getType());
+      if (isMinMaxSet[chunkIndex]) {
+        chunkIndex = msrColumnEvaluatorInfo.getColumnIndexInMinMaxByteArray();
+        isScanRequired = isScanRequired(blkMaxVal[chunkIndex], blkMinVal[chunkIndex],
+            msrColumnExecutorInfo.getFilterKeys(), msrColumnEvaluatorInfo.getType());
+      } else {
+        isScanRequired = true;
+      }
     }
 
     if (isScanRequired) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
index d2c4b05..6b37e60 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtThanFiterExecuterImpl.java
@@ -122,9 +122,13 @@ public class RowLevelRangeGrtThanFiterExecuterImpl extends RowLevelFilterExecute
     byte[] maxValue = null;
     if (isMeasurePresentInCurrentBlock[0] || isDimensionPresentInCurrentBlock[0]) {
       if (isMeasurePresentInCurrentBlock[0]) {
-        maxValue = blockMaxValue[measureChunkIndex[0]];
-        isScanRequired =
-            isScanRequired(maxValue, msrFilterRangeValues, msrColEvalutorInfoList.get(0).getType());
+        if (isMinMaxSet[measureChunkIndex[0]]) {
+          maxValue = blockMaxValue[measureChunkIndex[0]];
+          isScanRequired = isScanRequired(maxValue, msrFilterRangeValues,
+              msrColEvalutorInfoList.get(0).getType());
+        } else {
+          isScanRequired = true;
+        }
       } else {
         maxValue = blockMaxValue[dimensionChunkIndex[0]];
         DataType dataType = dimColEvaluatorInfoList.get(0).getDimension().getDataType();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
index cf31033..24c2e3c 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeGrtrThanEquaToFilterExecuterImpl.java
@@ -120,9 +120,13 @@ public class RowLevelRangeGrtrThanEquaToFilterExecuterImpl extends RowLevelFilte
     byte[] maxValue = null;
     if (isMeasurePresentInCurrentBlock[0] || isDimensionPresentInCurrentBlock[0]) {
       if (isMeasurePresentInCurrentBlock[0]) {
-        maxValue = blockMaxValue[measureChunkIndex[0]];
-        isScanRequired =
-            isScanRequired(maxValue, msrFilterRangeValues, msrColEvalutorInfoList.get(0).getType());
+        if (isMinMaxSet[measureChunkIndex[0]]) {
+          maxValue = blockMaxValue[measureChunkIndex[0]];
+          isScanRequired = isScanRequired(maxValue, msrFilterRangeValues,
+              msrColEvalutorInfoList.get(0).getType());
+        } else {
+          isScanRequired = true;
+        }
       } else {
         maxValue = blockMaxValue[dimensionChunkIndex[0]];
         DataType dataType = dimColEvaluatorInfoList.get(0).getDimension().getDataType();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
index 8ea6e0d..0dbdf79 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanEqualFilterExecuterImpl.java
@@ -120,9 +120,13 @@ public class RowLevelRangeLessThanEqualFilterExecuterImpl extends RowLevelFilter
     boolean isScanRequired = false;
     if (isMeasurePresentInCurrentBlock[0] || isDimensionPresentInCurrentBlock[0]) {
       if (isMeasurePresentInCurrentBlock[0]) {
-        minValue = blockMinValue[measureChunkIndex[0]];
-        isScanRequired =
-            isScanRequired(minValue, msrFilterRangeValues, msrColEvalutorInfoList.get(0).getType());
+        if (isMinMaxSet[measureChunkIndex[0]]) {
+          minValue = blockMinValue[measureChunkIndex[0]];
+          isScanRequired = isScanRequired(minValue, msrFilterRangeValues,
+              msrColEvalutorInfoList.get(0).getType());
+        } else {
+          isScanRequired = true;
+        }
       } else {
         minValue = blockMinValue[dimensionChunkIndex[0]];
         DataType dataType = dimColEvaluatorInfoList.get(0).getDimension().getDataType();
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java
index df1afc4..acd918a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/executer/RowLevelRangeLessThanFilterExecuterImpl.java
@@ -120,9 +120,13 @@ public class RowLevelRangeLessThanFilterExecuterImpl extends RowLevelFilterExecu
     boolean isScanRequired = false;
     if (isMeasurePresentInCurrentBlock[0] || isDimensionPresentInCurrentBlock[0]) {
       if (isMeasurePresentInCurrentBlock[0]) {
-        minValue = blockMinValue[measureChunkIndex[0]];
-        isScanRequired =
-            isScanRequired(minValue, msrFilterRangeValues, msrColEvalutorInfoList.get(0).getType());
+        if (isMinMaxSet[measureChunkIndex[0]]) {
+          minValue = blockMinValue[measureChunkIndex[0]];
+          isScanRequired = isScanRequired(minValue, msrFilterRangeValues,
+              msrColEvalutorInfoList.get(0).getType());
+        } else {
+          isScanRequired = true;
+        }
       } else {
         minValue = blockMinValue[dimensionChunkIndex[0]];
         DataType dataType = dimColEvaluatorInfoList.get(0).getDimension().getDataType();
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index d9f69e3..a4af9cc 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -88,8 +88,6 @@ import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatus;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.statusmanager.SegmentUpdateStatusManager;
-import org.apache.carbondata.core.util.comparator.Comparator;
-import org.apache.carbondata.core.util.comparator.SerializableComparator;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 import org.apache.carbondata.format.BlockletHeader;
 import org.apache.carbondata.format.DataChunk2;
@@ -2826,51 +2824,6 @@ public final class CarbonUtil {
   }
 
   /**
-   * This method will be used to update the min and max values and this will be used in case of
-   * old store where min and max values for measures are written opposite
-   * (i.e max values in place of min and min in place of max values)
-   *
-   * @param dataFileFooter
-   * @param maxValues
-   * @param minValues
-   * @param isMinValueComparison
-   * @return
-   */
-  public static byte[][] updateMinMaxValues(DataFileFooter dataFileFooter, byte[][] maxValues,
-      byte[][] minValues, boolean isMinValueComparison) {
-    byte[][] updatedMinMaxValues = new byte[maxValues.length][];
-    if (isMinValueComparison) {
-      System.arraycopy(minValues, 0, updatedMinMaxValues, 0, minValues.length);
-    } else {
-      System.arraycopy(maxValues, 0, updatedMinMaxValues, 0, maxValues.length);
-    }
-    for (int i = 0; i < maxValues.length; i++) {
-      // update min and max values only for measures
-      if (!dataFileFooter.getColumnInTable().get(i).isDimensionColumn()) {
-        DataType dataType = dataFileFooter.getColumnInTable().get(i).getDataType();
-        SerializableComparator comparator = Comparator.getComparator(dataType);
-        int compare;
-        if (isMinValueComparison) {
-          compare = comparator
-              .compare(DataTypeUtil.getMeasureObjectFromDataType(maxValues[i], dataType),
-                  DataTypeUtil.getMeasureObjectFromDataType(minValues[i], dataType));
-          if (compare < 0) {
-            updatedMinMaxValues[i] = maxValues[i];
-          }
-        } else {
-          compare = comparator
-              .compare(DataTypeUtil.getMeasureObjectFromDataType(minValues[i], dataType),
-                  DataTypeUtil.getMeasureObjectFromDataType(maxValues[i], dataType));
-          if (compare > 0) {
-            updatedMinMaxValues[i] = minValues[i];
-          }
-        }
-      }
-    }
-    return updatedMinMaxValues;
-  }
-
-  /**
    * Generate the blockid as per the block path
    *
    * @param identifier
diff --git a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
index a82a8aa..a8d30b1 100644
--- a/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/util/CarbonUtilTest.java
@@ -1011,52 +1011,6 @@ public class CarbonUtilTest {
     Assert.assertTrue(schemaString.length() > schema.length());
   }
 
-  @Test
-  public void testUpdateMinMaxValues() {
-    // create dimension and measure column schema
-    ColumnSchema dimensionColumnSchema = createColumnSchema(DataTypes.STRING, true);
-    ColumnSchema measureColumnSchema = createColumnSchema(DataTypes.DOUBLE, false);
-    List<ColumnSchema> columnSchemas = new ArrayList<>(2);
-    columnSchemas.add(dimensionColumnSchema);
-    columnSchemas.add(measureColumnSchema);
-    // create data file footer object
-    DataFileFooter fileFooter = new DataFileFooter();
-    fileFooter.setColumnInTable(columnSchemas);
-    // initialise the expected values
-    int expectedMaxValue = 5;
-    int expectedMinValue = 2;
-    double expectedMeasureMaxValue = 28.74;
-    double expectedMeasureMinValue = -21.46;
-    // initialise the minValues
-    byte[][] minValues = new byte[2][];
-    minValues[0] = new byte[] { 2 };
-    ByteBuffer buffer = ByteBuffer.allocate(8);
-    minValues[1] = (byte[]) buffer.putDouble(28.74).flip().array();
-    buffer = ByteBuffer.allocate(8);
-    // initialise the maxValues
-    byte[][] maxValues = new byte[2][];
-    maxValues[0] = new byte[] { 5 };
-    maxValues[1] = (byte[]) buffer.putDouble(-21.46).flip().array();
-    byte[][] updateMaxValues =
-        CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, false);
-    byte[][] updateMinValues =
-        CarbonUtil.updateMinMaxValues(fileFooter, maxValues, minValues, true);
-    // compare max values
-    assert (expectedMaxValue == ByteBuffer.wrap(updateMaxValues[0]).get());
-    assert (expectedMeasureMaxValue == ByteBuffer.wrap(updateMaxValues[1]).getDouble());
-
-    // compare min values
-    assert (expectedMinValue == ByteBuffer.wrap(updateMinValues[0]).get());
-    assert (expectedMeasureMinValue == ByteBuffer.wrap(updateMinValues[1]).getDouble());
-  }
-
-  private ColumnSchema createColumnSchema(DataType dataType, boolean isDimensionColumn) {
-    ColumnSchema columnSchema = new ColumnSchema();
-    columnSchema.setDataType(dataType);
-    columnSchema.setDimensionColumn(isDimensionColumn);
-    return columnSchema;
-  }
-
   private String generateString(int length) {
     StringBuilder builder = new StringBuilder();
     for (int i = 0; i < length; i++) {


[carbondata] 14/22: [CARBONDATA-3360]fix NullPointerException in delete and clean files operation

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit bc80a22ddb40ed09692008114bbe561ba67955f7
Author: akashrn5 <ak...@gmail.com>
AuthorDate: Fri Apr 26 12:00:41 2019 +0530

    [CARBONDATA-3360]fix NullPointerException in delete and clean files operation
    
    Problem:
    when delete is failed due to hdfs quota exceeded or disk space is full, then tableUpdateStatus.write will be present in store.
    So after that if clean files operation is done, we were trying to assign null to primitive type long, which will throw runtime exception, and .write file will not be deleted, since we consider it as invalid file.
    
    Solution:
    if .write file is present, then we do not fail clean files, we check for max query timeout for tableUpdateStatus.write file and then delete these .write files for any clean files operation after that.
    
    This closes #3191
---
 .../carbondata/core/mutate/CarbonUpdateUtil.java   | 48 +++++++++++++++-------
 1 file changed, 34 insertions(+), 14 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
index a632f03..beaf1a0 100644
--- a/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/mutate/CarbonUpdateUtil.java
@@ -673,7 +673,8 @@ public class CarbonUpdateUtil {
   private static boolean compareTimestampsAndDelete(
       CarbonFile invalidFile,
       boolean forceDelete, boolean isUpdateStatusFile) {
-    long fileTimestamp = 0L;
+    boolean isDeleted = false;
+    Long fileTimestamp;
 
     if (isUpdateStatusFile) {
       fileTimestamp = CarbonUpdateUtil.getTimeStampAsLong(invalidFile.getName()
@@ -683,21 +684,40 @@ public class CarbonUpdateUtil {
               CarbonTablePath.DataFileUtil.getTimeStampFromFileName(invalidFile.getName()));
     }
 
-    // if the timestamp of the file is more than the current time by query execution timeout.
-    // then delete that file.
-    if (CarbonUpdateUtil.isMaxQueryTimeoutExceeded(fileTimestamp) || forceDelete) {
-      // delete the files.
-      try {
-        LOGGER.info("deleting the invalid file : " + invalidFile.getName());
-        CarbonUtil.deleteFoldersAndFiles(invalidFile);
-        return true;
-      } catch (IOException e) {
-        LOGGER.error("error in clean up of compacted files." + e.getMessage(), e);
-      } catch (InterruptedException e) {
-        LOGGER.error("error in clean up of compacted files." + e.getMessage(), e);
+    // This check is because, when there are some invalid files like tableStatusUpdate.write files
+    // present in store [[which can happen during delete or update if the disk is full or hdfs quota
+    // is finished]] then fileTimestamp will be null, in that case check for max query out and
+    // delete the .write file after timeout
+    if (fileTimestamp == null) {
+      String tableUpdateStatusFilename = invalidFile.getName();
+      if (tableUpdateStatusFilename.endsWith(".write")) {
+        long tableUpdateStatusFileTimeStamp = Long.parseLong(
+            CarbonTablePath.DataFileUtil.getTimeStampFromFileName(tableUpdateStatusFilename));
+        if (isMaxQueryTimeoutExceeded(tableUpdateStatusFileTimeStamp)) {
+          isDeleted = deleteInvalidFiles(invalidFile);
+        }
+      }
+    } else {
+      // if the timestamp of the file is more than the current time by query execution timeout.
+      // then delete that file.
+      if (CarbonUpdateUtil.isMaxQueryTimeoutExceeded(fileTimestamp) || forceDelete) {
+        isDeleted = deleteInvalidFiles(invalidFile);
       }
     }
-    return false;
+    return isDeleted;
+  }
+
+  private static boolean deleteInvalidFiles(CarbonFile invalidFile) {
+    boolean isDeleted;
+    try {
+      LOGGER.info("deleting the invalid file : " + invalidFile.getName());
+      CarbonUtil.deleteFoldersAndFiles(invalidFile);
+      isDeleted = true;
+    } catch (IOException | InterruptedException e) {
+      LOGGER.error("error in clean up of invalid files." + e.getMessage(), e);
+      isDeleted = false;
+    }
+    return isDeleted;
   }
 
   public static boolean isBlockInvalid(SegmentStatus blockStatus) {


[carbondata] 10/22: [HOTFIX] support compact segments with different sort_columns

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 9f23d2c1aeabbae0d9b53899e2f91d3ccd8888b9
Author: QiangCai <qi...@qq.com>
AuthorDate: Thu Apr 25 19:08:49 2019 +0800

    [HOTFIX] support compact segments with different sort_columns
    
    This closes #3190
---
 .../core/scan/executor/util/RestructureUtil.java   |  2 +-
 .../merger/CarbonCompactionExecutor.java           |  3 +-
 .../processing/merger/CarbonCompactionUtil.java    | 39 ++++++++++++++++++++--
 3 files changed, 38 insertions(+), 6 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
index 11b7372..0f93227 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
@@ -160,7 +160,7 @@ public class RestructureUtil {
    * @param tableColumn
    * @return
    */
-  private static boolean isColumnMatches(boolean isTransactionalTable,
+  public static boolean isColumnMatches(boolean isTransactionalTable,
       CarbonColumn queryColumn, CarbonColumn tableColumn) {
     // If it is non transactional table just check the column names, no need to validate
     // column id as multiple sdk's output placed in a single folder doesn't have same
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
index 5961cd7..619b45a 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
@@ -136,8 +136,7 @@ public class CarbonCompactionExecutor {
       Set<String> taskBlockListMapping = taskBlockInfo.getTaskSet();
       // Check if block needs sorting or not
       boolean sortingRequired =
-          CarbonCompactionUtil.isRestructured(listMetadata, carbonTable.getTableLastUpdatedTime())
-              || !CarbonCompactionUtil.isSorted(listMetadata.get(0));
+          !CarbonCompactionUtil.isSortedByCurrentSortColumns(carbonTable, listMetadata.get(0));
       for (String task : taskBlockListMapping) {
         tableBlockInfos = taskBlockInfo.getTableBlockInfoList(task);
         // during update there may be a chance that the cardinality may change within the segment
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
index efd2559..c4b6843 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
@@ -35,6 +35,7 @@ import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
+import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
 
@@ -464,12 +465,44 @@ public class CarbonCompactionUtil {
    * Returns if the DataFileFooter containing carbondata file contains
    * sorted data or not.
    *
+   * @param table
    * @param footer
    * @return
-   * @throws IOException
    */
-  public static boolean isSorted(DataFileFooter footer) throws IOException {
-    return footer.isSorted();
+  public static boolean isSortedByCurrentSortColumns(CarbonTable table, DataFileFooter footer) {
+    if (footer.isSorted()) {
+      // When sort_columns is modified, it will be consider as no_sort also.
+      List<CarbonDimension> sortColumnsOfSegment = new ArrayList<>();
+      for (ColumnSchema column : footer.getColumnInTable()) {
+        if (column.isDimensionColumn() && column.isSortColumn()) {
+          sortColumnsOfSegment.add(new CarbonDimension(column, -1, -1, -1));
+        }
+      }
+      if (sortColumnsOfSegment.size() < table.getNumberOfSortColumns()) {
+        return false;
+      }
+      List<CarbonDimension> sortColumnsOfTable = new ArrayList<>();
+      for (CarbonDimension dimension : table.getDimensions()) {
+        if (dimension.isSortColumn()) {
+          sortColumnsOfTable.add(dimension);
+        }
+      }
+      int sortColumnNums = sortColumnsOfTable.size();
+      if (sortColumnsOfSegment.size() < sortColumnNums) {
+        return false;
+      }
+      // compare sort_columns
+      for (int i = 0; i < sortColumnNums; i++) {
+        if (!RestructureUtil
+            .isColumnMatches(table.isTransactionalTable(), sortColumnsOfTable.get(i),
+                sortColumnsOfSegment.get(i))) {
+          return false;
+        }
+      }
+      return true;
+    } else {
+      return false;
+    }
   }
 
 }


[carbondata] 21/22: [CARBONDATA-3377] Fix for Null pointer exception in Range Col compaction

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 4abed04bfefe7a24f18ab42fd96d63a617a26596
Author: manishnalla1994 <ma...@gmail.com>
AuthorDate: Fri May 10 15:43:10 2019 +0530

    [CARBONDATA-3377] Fix for Null pointer exception in Range Col compaction
    
    Problem : String Type Column with huge strings and null values fails giving NullPointerException when it is a range column and compaction is done.
    
    Solution : Added a check in StringOrdering for null values.
    
    This closes #3212
---
 .../core/constants/CarbonCommonConstants.java      |  4 +++
 .../carbondata/core/util/CarbonProperties.java     |  6 ++++
 .../dataload/TestRangeColumnDataLoad.scala         | 42 +++++++++++++++++++++-
 .../spark/load/DataLoadProcessBuilderOnSpark.scala | 16 ++++++---
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     |  7 ++--
 5 files changed, 67 insertions(+), 8 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index ba8e20a..43544cb 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1193,6 +1193,10 @@ public final class CarbonCommonConstants {
 
   public static final String CARBON_RANGE_COLUMN_SCALE_FACTOR_DEFAULT = "3";
 
+  public static final String CARBON_ENABLE_RANGE_COMPACTION = "carbon.enable.range.compaction";
+
+  public static final String CARBON_ENABLE_RANGE_COMPACTION_DEFAULT = "false";
+
   //////////////////////////////////////////////////////////////////////////////////////////
   // Query parameter start here
   //////////////////////////////////////////////////////////////////////////////////////////
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
index 004a51e..e26f3d8 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java
@@ -1507,6 +1507,12 @@ public final class CarbonProperties {
     return Boolean.parseBoolean(pushFilters);
   }
 
+  public boolean isRangeCompactionAllowed() {
+    String isRangeCompact = getProperty(CarbonCommonConstants.CARBON_ENABLE_RANGE_COMPACTION,
+        CarbonCommonConstants.CARBON_ENABLE_RANGE_COMPACTION_DEFAULT);
+    return Boolean.parseBoolean(isRangeCompact);
+  }
+
   private void validateSortMemorySpillPercentage() {
     String spillPercentageStr = carbonProperties.getProperty(
         CARBON_LOAD_SORT_MEMORY_SPILL_PERCENTAGE,
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
index 5d6730f..165e4f8 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
@@ -610,6 +610,34 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
     sql("DROP TABLE IF EXISTS carbon_range_column1")
   }
 
+  test("Test compaction for range_column - STRING Datatype null values") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    deleteFile(filePath2)
+    createFile(filePath2, 20, 14)
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age LONG)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='city',
+        | 'range_column'='city')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('BAD_RECORDS_ACTION'='FORCE','HEADER'='false')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('BAD_RECORDS_ACTION'='FORCE','HEADER'='false')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    deleteFile(filePath2)
+  }
+
   test("Test compaction for range_column - STRING Datatype min/max not stored") {
     deleteFile(filePath2)
     createFile(filePath2, 1000, 7)
@@ -930,12 +958,24 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
             .println(
               100 + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
         }
-      } else if (9 <= lastCol) {
+      } else if (9 <= lastCol && 13 >= lastCol) {
         for (i <- lastCol until (lastCol + line)) {
           write
             .println(
               i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
         }
+      } else if (14 == lastCol) {
+        // Null data generation for string col
+        for (i <- lastCol until (lastCol + line)) {
+          if (i % 3 != 0) {
+            write
+              .println(
+                i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
+          } else {
+            write
+              .println(i + ",")
+          }
+        }
       }
       write.close()
     } catch {
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
index a751887..81699b4 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
@@ -311,7 +311,7 @@ object DataLoadProcessBuilderOnSpark {
         // better to generate a CarbonData file for each partition
         val totalSize = model.getTotalSize.toDouble
         val table = model.getCarbonDataLoadSchema.getCarbonTable
-        numPartitions = getNumPatitionsBasedOnSize(totalSize, table, model)
+        numPartitions = getNumPatitionsBasedOnSize(totalSize, table, model, false)
       }
     }
     numPartitions
@@ -319,10 +319,13 @@ object DataLoadProcessBuilderOnSpark {
 
   def getNumPatitionsBasedOnSize(totalSize: Double,
       table: CarbonTable,
-      model: CarbonLoadModel): Int = {
+      model: CarbonLoadModel,
+      mergerFlag: Boolean): Int = {
     val blockSize = 1024L * 1024 * table.getBlockSizeInMB
     val blockletSize = 1024L * 1024 * table.getBlockletSizeInMB
-    val scaleFactor = if (model.getScaleFactor == 0) {
+    val scaleFactor = if (mergerFlag) {
+      1
+    } else if (model.getScaleFactor == 0) {
       // use system properties
       CarbonProperties.getInstance().getRangeColumnScaleFactor
     } else {
@@ -385,6 +388,11 @@ class ByteArrayOrdering() extends Ordering[Object] {
 
 class StringOrdering() extends Ordering[Object] {
   override def compare(x: Object, y: Object): Int = {
-    (x.asInstanceOf[UTF8String]).compare(y.asInstanceOf[UTF8String])
+    if (x == null) {
+      return -1
+    } else if (y == null) {
+      return 1
+    }
+    return (x.asInstanceOf[UTF8String]).compare(y.asInstanceOf[UTF8String])
   }
 }
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index c143f93..4f4386b 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -53,7 +53,7 @@ import org.apache.carbondata.core.scan.expression
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.result.iterator.RawResultIterator
 import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatusManager, SegmentUpdateStatusManager}
-import org.apache.carbondata.core.util.{CarbonUtil, DataTypeUtil}
+import org.apache.carbondata.core.util.{CarbonProperties, CarbonUtil, DataTypeUtil}
 import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit, CarbonProjection}
 import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
@@ -297,7 +297,8 @@ class CarbonMergerRDD[K, V](
     )
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
     var rangeColumn: CarbonColumn = null
-    if (!carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.isHivePartitionTable) {
+    if (CarbonProperties.getInstance().isRangeCompactionAllowed &&
+        !carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.isHivePartitionTable) {
       // If the table is not a partition table then only we go for range column compaction flow
       rangeColumn = carbonTable.getRangeColumn
     }
@@ -395,7 +396,7 @@ class CarbonMergerRDD[K, V](
       // To calculate the number of ranges to be made, min 2 ranges/tasks to be made in any case
       val numOfPartitions = Math
         .max(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL.toInt, DataLoadProcessBuilderOnSpark
-          .getNumPatitionsBasedOnSize(totalSize, carbonTable, carbonLoadModel))
+          .getNumPatitionsBasedOnSize(totalSize, carbonTable, carbonLoadModel, true))
       val colName = rangeColumn.getColName
       LOGGER.info(s"Compacting on range column: $colName")
       allRanges = getRangesFromRDD(rangeColumn,


[carbondata] 22/22: [CARBONDATA-3391] Count star output is wrong when BLOCKLET CACHE is enabled

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit ea1e86cc71a49563c426264447a95085dce2d436
Author: BJangir <ba...@gmail.com>
AuthorDate: Thu May 16 14:53:21 2019 +0530

    [CARBONDATA-3391] Count star output is wrong when BLOCKLET CACHE is enabled
    
    Wrong Cont(*) value when blocklet cache is enabled
    Root cause :- blockletToRowCountMap has key with segmentNo+carbonfile name . so when carbonfile has multiple blocklets , blockletToRowCountMap overrites existing rowcount.
    
    Solution :- update the existing rowcount if any.
    
    This closes #3225
---
 .../carbondata/core/indexstore/blockletindex/BlockDataMap.java    | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
index 1fc5831..13e612d 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockDataMap.java
@@ -689,7 +689,13 @@ public class BlockDataMap extends CoarseGrainDataMap
           CarbonCommonConstants.DEFAULT_CHARSET_CLASS) + CarbonTablePath.getCarbonDataExtension();
       int rowCount = dataMapRow.getInt(ROW_COUNT_INDEX);
       // prepend segment number with the blocklet file path
-      blockletToRowCountMap.put((segment.getSegmentNo() + "," + fileName), (long) rowCount);
+      String blockletMapKey = segment.getSegmentNo() + "," + fileName;
+      Long existingCount = blockletToRowCountMap.get(blockletMapKey);
+      if (null != existingCount) {
+        blockletToRowCountMap.put(blockletMapKey, (long) rowCount + existingCount);
+      } else {
+        blockletToRowCountMap.put(blockletMapKey, (long) rowCount);
+      }
     }
     return blockletToRowCountMap;
   }


[carbondata] 06/22: [CARBONDATA-3344] Fix Drop column not present in table

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 4f7f17d53e768554ec519a70394546d01e585c8b
Author: Indhumathi27 <in...@gmail.com>
AuthorDate: Sat Apr 6 16:53:56 2019 +0530

    [CARBONDATA-3344] Fix Drop column not present in table
    
    Why this PR needed?
    When trying to drop column which is not present in main table, it is throwing Null pointer exception, instead of throwing exception for column does not exists in table.
    
    This closes #3174
---
 .../cluster/sdv/generated/AlterTableTestCase.scala         | 12 ++++++++++++
 .../command/schema/CarbonAlterTableDropColumnCommand.scala | 14 +++++---------
 2 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/AlterTableTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/AlterTableTestCase.scala
index d15f70b..297ff04 100644
--- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/AlterTableTestCase.scala
+++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/AlterTableTestCase.scala
@@ -29,6 +29,8 @@ import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.util.CarbonProperties
 import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
 
+import org.apache.carbondata.spark.exception.ProcessMetaDataException
+
 /**
  * Test Class for AlterTableTestCase to verify all scenerios
  */
@@ -1024,6 +1026,16 @@ class AlterTableTestCase extends QueryTest with BeforeAndAfterAll {
     }
   }
 
+  test("Test drop columns not present in the table") {
+    sql("drop table if exists test1")
+    sql("create table test1(col1 int) stored by 'carbondata'")
+    val exception = intercept[ProcessMetaDataException] {
+      sql("alter table test1 drop columns(name)")
+    }
+    assert(exception.getMessage.contains("Column name does not exists in the table default.test1"))
+    sql("drop table if exists test1")
+  }
+
   val prop = CarbonProperties.getInstance()
   val p1 = prop.getProperty("carbon.horizontal.compaction.enable", CarbonCommonConstants.CARBON_HORIZONTAL_COMPACTION_ENABLE_DEFAULT)
   val p2 = prop.getProperty("carbon.horizontal.update.compaction.threshold", CarbonCommonConstants.DEFAULT_UPDATE_DELTAFILE_COUNT_THRESHOLD_IUD_COMPACTION)
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
index 7d5cb41..31cfdaf 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/schema/CarbonAlterTableDropColumnCommand.scala
@@ -76,15 +76,6 @@ private[sql] case class CarbonAlterTableDropColumnCommand(
         }
       }
 
-      // Check if column to be dropped is of complex dataType
-      alterTableDropColumnModel.columns.foreach { column =>
-        if (carbonTable.getColumnByName(alterTableDropColumnModel.tableName, column).getDataType
-          .isComplexType) {
-          val errMsg = "Complex column cannot be dropped"
-          throw new MalformedCarbonCommandException(errMsg)
-        }
-      }
-
       val tableColumns = carbonTable.getCreateOrderColumn(tableName).asScala
       var dictionaryColumns = Seq[org.apache.carbondata.core.metadata.schema.table.column
       .ColumnSchema]()
@@ -99,6 +90,11 @@ private[sql] case class CarbonAlterTableDropColumnCommand(
                 dictionaryColumns ++= Seq(tableColumn.getColumnSchema)
               }
             }
+            // Check if column to be dropped is of complex dataType
+            if (tableColumn.getDataType.isComplexType) {
+              val errMsg = "Complex column cannot be dropped"
+              throw new MalformedCarbonCommandException(errMsg)
+            }
             columnExist = true
           }
         }


[carbondata] 13/22: [CARBONDATA-3343] Compaction for Range Sort

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 7449c346499cc3d454317e5b223c22bb034358a6
Author: manishnalla1994 <ma...@gmail.com>
AuthorDate: Mon Apr 22 18:52:45 2019 +0530

    [CARBONDATA-3343] Compaction for Range Sort
    
    Problem: To support Compaction for Range Sort in correct way as earlier it was grouping the ranges/partitions based on taskId which was not correct.
    
    Solution: Combine all the data and create new ranges using Spark's RangePartitioner and using them give each
    range to one task and apply the filter query to get the compacted segment.
    
    This closes #3182
---
 .../core/constants/CarbonCommonConstants.java      |   1 +
 .../core/metadata/schema/table/CarbonTable.java    |  24 +-
 .../core/scan/expression/Expression.java           |  13 +
 .../scan/filter/FilterExpressionProcessor.java     |   5 +-
 .../carbondata/core/scan/filter/FilterUtil.java    |  52 +-
 .../resolver/ConditionalFilterResolverImpl.java    |   2 +-
 .../resolver/RowLevelRangeFilterResolverImpl.java  |  40 +-
 .../core/scan/model/QueryModelBuilder.java         |  18 +-
 .../core/scan/result/BlockletScannedResult.java    |  62 +-
 .../scan/result/impl/FilterQueryScannedResult.java |  20 +-
 .../result/impl/NonFilterQueryScannedResult.java   |  59 +-
 .../dataload/TestRangeColumnDataLoad.scala         | 669 ++++++++++++++++++++-
 .../spark/load/DataLoadProcessBuilderOnSpark.scala |  43 +-
 .../carbondata/spark/rdd/CarbonMergerRDD.scala     | 202 ++++++-
 .../carbondata/spark/rdd/CarbonScanRDD.scala       |   7 +-
 .../org/apache/spark/CarbonInputMetrics.scala      |   0
 .../apache/spark/DataSkewRangePartitioner.scala    |  26 +-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    |  12 +-
 .../spark/sql/CarbonDatasourceHadoopRelation.scala |   1 -
 .../merger/CarbonCompactionExecutor.java           |  20 +-
 .../processing/merger/CarbonCompactionUtil.java    | 140 +++++
 .../merger/RowResultMergerProcessor.java           |   6 +-
 22 files changed, 1274 insertions(+), 148 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 608b5fb..ba8e20a 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1759,6 +1759,7 @@ public final class CarbonCommonConstants {
   public static final String ARRAY = "array";
   public static final String STRUCT = "struct";
   public static final String MAP = "map";
+  public static final String DECIMAL = "decimal";
   public static final String FROM = "from";
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 54ea772..c66d1fc 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -1081,22 +1081,26 @@ public class CarbonTable implements Serializable {
     return dataSize + indexSize;
   }
 
-  public void processFilterExpression(Expression filterExpression,
-      boolean[] isFilterDimensions, boolean[] isFilterMeasures) {
-    QueryModel.FilterProcessVO processVO =
-        new QueryModel.FilterProcessVO(getDimensionByTableName(getTableName()),
-            getMeasureByTableName(getTableName()), getImplicitDimensionByTableName(getTableName()));
-    QueryModel.processFilterExpression(processVO, filterExpression, isFilterDimensions,
-        isFilterMeasures, this);
-
+  public void processFilterExpression(Expression filterExpression, boolean[] isFilterDimensions,
+      boolean[] isFilterMeasures) {
+    processFilterExpressionWithoutRange(filterExpression, isFilterDimensions, isFilterMeasures);
     if (null != filterExpression) {
       // Optimize Filter Expression and fit RANGE filters is conditions apply.
-      FilterOptimizer rangeFilterOptimizer =
-          new RangeFilterOptmizer(filterExpression);
+      FilterOptimizer rangeFilterOptimizer = new RangeFilterOptmizer(filterExpression);
       rangeFilterOptimizer.optimizeFilter();
     }
   }
 
+  public void processFilterExpressionWithoutRange(Expression filterExpression,
+      boolean[] isFilterDimensions, boolean[] isFilterMeasures) {
+    QueryModel.FilterProcessVO processVO =
+        new QueryModel.FilterProcessVO(getDimensionByTableName(getTableName()),
+            getMeasureByTableName(getTableName()), getImplicitDimensionByTableName(getTableName()));
+    QueryModel
+        .processFilterExpression(processVO, filterExpression, isFilterDimensions, isFilterMeasures,
+            this);
+  }
+
   /**
    * Resolve the filter expression.
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java b/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
index 13acc63..2513b0d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/expression/Expression.java
@@ -33,6 +33,11 @@ public abstract class Expression implements Serializable {
   protected List<Expression> children =
       new ArrayList<Expression>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
 
+  // When a filter expression already has the dictionary surrogate values in
+  // it then we set isAlreadyResolved as true so that we donot resolve the
+  // filter expression in further steps.
+  protected boolean isAlreadyResolved;
+
   public abstract ExpressionResult evaluate(RowIntf value)
       throws FilterUnsupportedException, FilterIllegalMemberException;
 
@@ -52,4 +57,12 @@ public abstract class Expression implements Serializable {
   public abstract String getString();
 
   public abstract String getStatement();
+
+  public boolean isAlreadyResolved() {
+    return isAlreadyResolved;
+  }
+
+  public void setAlreadyResolved(boolean alreadyResolved) {
+    isAlreadyResolved = alreadyResolved;
+  }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
index 7269304..fd75496 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterExpressionProcessor.java
@@ -377,7 +377,10 @@ public class FilterExpressionProcessor implements FilterProcessor {
           // getting new dim index.
           if (!currentCondExpression.getColumnList().get(0).getCarbonColumn()
               .hasEncoding(Encoding.DICTIONARY) || currentCondExpression.getColumnList().get(0)
-              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+              .getCarbonColumn().hasEncoding(Encoding.DIRECT_DICTIONARY) || currentCondExpression
+              .isAlreadyResolved()) {
+            // In case of Range Column Dictionary Include we do not need to resolve the range
+            // expression as it is already resolved and has the surrogates in the filter value
             if (FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getLeft())
                 && FilterUtil.checkIfExpressionContainsColumn(currentCondExpression.getRight()) || (
                 FilterUtil.checkIfRightExpressionRequireEvaluation(currentCondExpression.getRight())
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
index 9d8fe8d..cef3af1 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/FilterUtil.java
@@ -1028,6 +1028,37 @@ public final class FilterUtil {
     return filterValuesList.toArray(new byte[filterValuesList.size()][]);
   }
 
+  // This function is used for calculating filter values in case when Range Column
+  // is given as a Dictionary Include Column
+  private static byte[][] getFilterValueInBytesForDictRange(ColumnFilterInfo columnFilterInfo,
+      KeyGenerator blockLevelKeyGenerator, int[] dimColumnsCardinality, int[] keys,
+      List<byte[]> filterValuesList, int keyOrdinalOfDimensionFromCurrentBlock) {
+    if (null != columnFilterInfo) {
+      int[] rangesForMaskedByte =
+          getRangesForMaskedByte(keyOrdinalOfDimensionFromCurrentBlock, blockLevelKeyGenerator);
+      List<Integer> listOfsurrogates = columnFilterInfo.getFilterList();
+      if (listOfsurrogates == null || listOfsurrogates.size() > 1) {
+        throw new RuntimeException(
+            "Filter values cannot be null in case of range in dictionary include");
+      }
+      // Here we only get the first column as there can be only one range column.
+      try {
+        if (listOfsurrogates.get(0)
+            <= dimColumnsCardinality[keyOrdinalOfDimensionFromCurrentBlock]) {
+          keys[keyOrdinalOfDimensionFromCurrentBlock] = listOfsurrogates.get(0);
+        } else {
+          keys[keyOrdinalOfDimensionFromCurrentBlock] =
+              dimColumnsCardinality[keyOrdinalOfDimensionFromCurrentBlock];
+        }
+        filterValuesList
+            .add(getMaskedKey(rangesForMaskedByte, blockLevelKeyGenerator.generateKey(keys)));
+      } catch (KeyGenException e) {
+        LOGGER.error(e.getMessage(), e);
+      }
+    }
+    return filterValuesList.toArray(new byte[filterValuesList.size()][]);
+  }
+
   /**
    * This method will be used to get the Filter key array list for blocks which do not contain
    * filter column and the column Encoding is Direct Dictionary
@@ -1057,10 +1088,12 @@ public final class FilterUtil {
    * @param columnFilterInfo
    * @param carbonDimension
    * @param segmentProperties
+   * @param isDictRange
    * @return
    */
   public static byte[][] getKeyArray(ColumnFilterInfo columnFilterInfo,
-      CarbonDimension carbonDimension, SegmentProperties segmentProperties,  boolean isExclude) {
+      CarbonDimension carbonDimension, SegmentProperties segmentProperties, boolean isExclude,
+      boolean isDictRange) {
     if (!carbonDimension.hasEncoding(Encoding.DICTIONARY)) {
       return columnFilterInfo.getNoDictionaryFilterValuesList()
           .toArray((new byte[columnFilterInfo.getNoDictionaryFilterValuesList().size()][]));
@@ -1071,8 +1104,14 @@ public final class FilterUtil {
     List<byte[]> filterValuesList = new ArrayList<byte[]>(20);
     Arrays.fill(keys, 0);
     int keyOrdinalOfDimensionFromCurrentBlock = carbonDimension.getKeyOrdinal();
-    return getFilterValuesInBytes(columnFilterInfo, isExclude, blockLevelKeyGenerator,
-        dimColumnsCardinality, keys, filterValuesList, keyOrdinalOfDimensionFromCurrentBlock);
+    if (!isDictRange) {
+      return getFilterValuesInBytes(columnFilterInfo, isExclude, blockLevelKeyGenerator,
+          dimColumnsCardinality, keys, filterValuesList, keyOrdinalOfDimensionFromCurrentBlock);
+    } else {
+      // For Dictionary Include Range Column
+      return getFilterValueInBytesForDictRange(columnFilterInfo, blockLevelKeyGenerator,
+          dimColumnsCardinality, keys, filterValuesList, keyOrdinalOfDimensionFromCurrentBlock);
+    }
   }
 
   /**
@@ -1500,10 +1539,11 @@ public final class FilterUtil {
       if (filterValues == null) {
         dimColumnExecuterInfo.setFilterKeys(new byte[0][]);
       } else {
-        byte[][] keysBasedOnFilter = getKeyArray(filterValues, dimension, segmentProperties, false);
+        byte[][] keysBasedOnFilter =
+            getKeyArray(filterValues, dimension, segmentProperties, false, false);
         if (!filterValues.isIncludeFilter() || filterValues.isOptimized()) {
-          dimColumnExecuterInfo
-              .setExcludeFilterKeys(getKeyArray(filterValues, dimension, segmentProperties, true));
+          dimColumnExecuterInfo.setExcludeFilterKeys(
+              getKeyArray(filterValues, dimension, segmentProperties, true, false));
         }
         dimColumnExecuterInfo.setFilterKeys(keysBasedOnFilter);
       }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
index 8ad0c48..2fd1996 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/ConditionalFilterResolverImpl.java
@@ -300,7 +300,7 @@ public class ConditionalFilterResolverImpl implements FilterResolverIntf {
     } else if (null != dimColResolvedFilterInfo.getFilterValues() && dimColResolvedFilterInfo
         .getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
       return FilterUtil.getKeyArray(this.dimColResolvedFilterInfo.getFilterValues(),
-          this.dimColResolvedFilterInfo.getDimension(), segmentProperties, false);
+          this.dimColResolvedFilterInfo.getDimension(), segmentProperties, false, false);
     }
     return null;
   }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
index 4a713d5..963b445 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/filter/resolver/RowLevelRangeFilterResolverImpl.java
@@ -89,7 +89,18 @@ public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverIm
           .getDimensionFromCurrentBlock(this.dimColEvaluatorInfoList.get(0).getDimension());
       if (null != dimensionFromCurrentBlock) {
         return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(),
-            dimensionFromCurrentBlock, segmentProperties, false);
+            dimensionFromCurrentBlock, segmentProperties, false, false);
+      } else {
+        return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(), false);
+      }
+    } else if (dimColEvaluatorInfoList.size() > 0 && null != dimColEvaluatorInfoList.get(0)
+        .getFilterValues() && dimColEvaluatorInfoList.get(0).getDimension()
+        .hasEncoding(Encoding.DICTIONARY)) {
+      CarbonDimension dimensionFromCurrentBlock = segmentProperties
+          .getDimensionFromCurrentBlock(this.dimColEvaluatorInfoList.get(0).getDimension());
+      if (null != dimensionFromCurrentBlock) {
+        return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(),
+            dimensionFromCurrentBlock, segmentProperties, false, true);
       } else {
         return FilterUtil.getKeyArray(this.dimColEvaluatorInfoList.get(0).getFilterValues(), false);
       }
@@ -249,6 +260,13 @@ public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverIm
             } else {
               filterInfo.setFilterList(getDirectSurrogateValues(columnExpression));
             }
+          } else if (columnExpression.getDimension().hasEncoding(Encoding.DICTIONARY)
+              && !columnExpression.getDimension().hasEncoding(Encoding.DIRECT_DICTIONARY)) {
+            if (!isIncludeFilter) {
+              filterInfo.setExcludeFilterList(getSurrogateValues());
+            } else {
+              filterInfo.setFilterList(getSurrogateValues());
+            }
           } else {
             filterInfo.setFilterListForNoDictionaryCols(getNoDictionaryRangeValues());
           }
@@ -303,6 +321,26 @@ public class RowLevelRangeFilterResolverImpl extends ConditionalFilterResolverIm
     return filterValuesList;
   }
 
+  private List<Integer> getSurrogateValues() throws FilterUnsupportedException {
+    List<ExpressionResult> listOfExpressionResults = new ArrayList<ExpressionResult>(20);
+
+    if (this.getFilterExpression() instanceof BinaryConditionalExpression) {
+      listOfExpressionResults =
+          ((BinaryConditionalExpression) this.getFilterExpression()).getLiterals();
+    }
+    List<Integer> filterValuesList = new ArrayList<Integer>(20);
+    try {
+      // If any filter member provided by user is invalid throw error else
+      // system can display inconsistent result.
+      for (ExpressionResult result : listOfExpressionResults) {
+        filterValuesList.add(result.getInt());
+      }
+    } catch (FilterIllegalMemberException e) {
+      throw new FilterUnsupportedException(e);
+    }
+    return filterValuesList;
+  }
+
   /**
    * Method will return the DimColumnResolvedFilterInfo instance which consists
    * the mapping of the respective dimension and its surrogates involved in
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
index d736805..e91d14d 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
@@ -43,6 +43,7 @@ public class QueryModelBuilder {
   private DataTypeConverter dataTypeConverter;
   private boolean forcedDetailRawQuery;
   private boolean readPageByPage;
+  private boolean convertToRangeFilter = true;
   /**
    * log information
    */
@@ -301,6 +302,15 @@ public class QueryModelBuilder {
     return this;
   }
 
+  public QueryModelBuilder convertToRangeFilter(boolean convertToRangeFilter) {
+    this.convertToRangeFilter = convertToRangeFilter;
+    return this;
+  }
+
+  public boolean isConvertToRangeFilter() {
+    return this.convertToRangeFilter;
+  }
+
   public void enableReadPageByPage() {
     this.readPageByPage = true;
   }
@@ -316,7 +326,13 @@ public class QueryModelBuilder {
       // set the filter to the query model in order to filter blocklet before scan
       boolean[] isFilterDimensions = new boolean[table.getDimensionOrdinalMax()];
       boolean[] isFilterMeasures = new boolean[table.getAllMeasures().size()];
-      table.processFilterExpression(filterExpression, isFilterDimensions, isFilterMeasures);
+      // In case of Dictionary Include Range Column we donot optimize the range expression
+      if (isConvertToRangeFilter()) {
+        table.processFilterExpression(filterExpression, isFilterDimensions, isFilterMeasures);
+      } else {
+        table.processFilterExpressionWithoutRange(filterExpression, isFilterDimensions,
+            isFilterMeasures);
+      }
       queryModel.setIsFilterDimensions(isFilterDimensions);
       queryModel.setIsFilterMeasures(isFilterMeasures);
       FilterResolverIntf filterIntf =
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
index ad4d2b3..ee8a254 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/BlockletScannedResult.java
@@ -518,19 +518,11 @@ public abstract class BlockletScannedResult {
    * @param batchSize
    * @return
    */
-  protected void fillValidRowIdsBatchFilling(int rowId, int batchSize) {
-    // row id will be different for every batch so clear it before filling
-    clearValidRowIdList();
-    int startPosition = rowId;
-    for (int i = 0; i < batchSize; i++) {
-      if (!containsDeletedRow(startPosition)) {
-        validRowIds.add(startPosition);
-      }
-      startPosition++;
-    }
-  }
 
-  private void clearValidRowIdList() {
+
+  public abstract void fillValidRowIdsBatchFilling(int rowId, int batchSize);
+
+  protected void clearValidRowIdList() {
     if (null != validRowIds && !validRowIds.isEmpty()) {
       validRowIds.clear();
     }
@@ -773,7 +765,30 @@ public abstract class BlockletScannedResult {
    * @param batchSize
    * @return
    */
-  public abstract List<byte[]> getDictionaryKeyArrayBatch(int batchSize);
+  public List<byte[]> getDictionaryKeyArrayBatch(int batchSize) {
+    // rowId from where computing need to start
+    int startRowId = currentRow + 1;
+    fillValidRowIdsBatchFilling(startRowId, batchSize);
+    List<byte[]> dictionaryKeyArrayList = new ArrayList<>(validRowIds.size());
+    int[] columnDataOffsets = null;
+    byte[] completeKey = null;
+    // everyTime it is initialized new as in case of prefetch it can modify the data
+    for (int i = 0; i < validRowIds.size(); i++) {
+      completeKey = new byte[fixedLengthKeySize];
+      dictionaryKeyArrayList.add(completeKey);
+    }
+    // initialize offset array onli if data is present
+    if (this.dictionaryColumnChunkIndexes.length > 0) {
+      columnDataOffsets = new int[validRowIds.size()];
+    }
+    for (int i = 0; i < this.dictionaryColumnChunkIndexes.length; i++) {
+      for (int j = 0; j < validRowIds.size(); j++) {
+        columnDataOffsets[j] += dimensionColumnPages[dictionaryColumnChunkIndexes[i]][pageCounter]
+            .fillRawData(validRowIds.get(j), columnDataOffsets[j], dictionaryKeyArrayList.get(j));
+      }
+    }
+    return dictionaryKeyArrayList;
+  }
 
   /**
    * Below method will be used to get the complex type key array
@@ -806,7 +821,26 @@ public abstract class BlockletScannedResult {
    *
    * @return no dictionary keys for all no dictionary dimension
    */
-  public abstract List<byte[][]> getNoDictionaryKeyArrayBatch(int batchSize);
+  public List<byte[][]> getNoDictionaryKeyArrayBatch(int batchSize) {
+    List<byte[][]> noDictionaryKeyArrayList = new ArrayList<>(validRowIds.size());
+    byte[][] noDictionaryColumnsKeys = null;
+    // everyTime it is initialized new as in case of prefetch it can modify the data
+    for (int i = 0; i < validRowIds.size(); i++) {
+      noDictionaryColumnsKeys = new byte[noDictionaryColumnChunkIndexes.length][];
+      noDictionaryKeyArrayList.add(noDictionaryColumnsKeys);
+    }
+    int columnPosition = 0;
+    for (int i = 0; i < this.noDictionaryColumnChunkIndexes.length; i++) {
+      for (int j = 0; j < validRowIds.size(); j++) {
+        byte[][] noDictionaryArray = noDictionaryKeyArrayList.get(j);
+        noDictionaryArray[columnPosition] =
+            dimensionColumnPages[noDictionaryColumnChunkIndexes[i]][pageCounter]
+                .getChunkData(validRowIds.get(j));
+      }
+      columnPosition++;
+    }
+    return noDictionaryKeyArrayList;
+  }
 
   /**
    * Mark the filtered rows in columnar batch. These rows will not be added to vector batches later.
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/impl/FilterQueryScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/impl/FilterQueryScannedResult.java
index 1b83110..0a7338f 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/impl/FilterQueryScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/impl/FilterQueryScannedResult.java
@@ -53,8 +53,18 @@ public class FilterQueryScannedResult extends BlockletScannedResult {
     return getDictionaryKeyIntegerArray(pageFilteredRowId[pageCounter][currentRow]);
   }
 
-  @Override public List<byte[]> getDictionaryKeyArrayBatch(int batchSize) {
-    throw new UnsupportedOperationException("Operation not supported");
+  @Override public void fillValidRowIdsBatchFilling(int rowId, int batchSize) {
+    // row id will be different for every batch so clear it before filling
+    clearValidRowIdList();
+    int startPosition = rowId;
+    int minSize = Math.min(batchSize, pageFilteredRowId[pageCounter].length);
+    for (int j = startPosition; j < startPosition + minSize; ) {
+      int pos = pageFilteredRowId[pageCounter][j];
+      if (!containsDeletedRow(pos)) {
+        validRowIds.add(pos);
+      }
+      j++;
+    }
   }
 
   /**
@@ -67,7 +77,7 @@ public class FilterQueryScannedResult extends BlockletScannedResult {
   }
 
   @Override public List<byte[][]> getComplexTypeKeyArrayBatch(int batchSize) {
-    throw new UnsupportedOperationException("Operation not supported");
+    return getComplexTypeKeyArrayBatch();
   }
 
   /**
@@ -80,10 +90,6 @@ public class FilterQueryScannedResult extends BlockletScannedResult {
     return getNoDictionaryKeyArray(pageFilteredRowId[pageCounter][currentRow]);
   }
 
-  @Override public List<byte[][]> getNoDictionaryKeyArrayBatch(int batchSize) {
-    throw new UnsupportedOperationException("Operation not supported");
-  }
-
   /**
    * will return the current valid row id
    *
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/impl/NonFilterQueryScannedResult.java b/core/src/main/java/org/apache/carbondata/core/scan/result/impl/NonFilterQueryScannedResult.java
index b5f9d66..36a1017 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/impl/NonFilterQueryScannedResult.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/impl/NonFilterQueryScannedResult.java
@@ -16,7 +16,6 @@
  */
 package org.apache.carbondata.core.scan.result.impl;
 
-import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.carbondata.core.scan.executor.infos.BlockExecutionInfo;
@@ -53,31 +52,6 @@ public class NonFilterQueryScannedResult extends BlockletScannedResult {
     return getDictionaryKeyIntegerArray(currentRow);
   }
 
-  @Override public List<byte[]> getDictionaryKeyArrayBatch(int batchSize) {
-    // rowId from where computing need to start
-    int startRowId = currentRow + 1;
-    fillValidRowIdsBatchFilling(startRowId, batchSize);
-    List<byte[]> dictionaryKeyArrayList = new ArrayList<>(validRowIds.size());
-    int[] columnDataOffsets = null;
-    byte[] completeKey = null;
-    // everyTime it is initialized new as in case of prefetch it can modify the data
-    for (int i = 0; i < validRowIds.size(); i++) {
-      completeKey = new byte[fixedLengthKeySize];
-      dictionaryKeyArrayList.add(completeKey);
-    }
-    // initialize offset array onli if data is present
-    if (this.dictionaryColumnChunkIndexes.length > 0) {
-      columnDataOffsets = new int[validRowIds.size()];
-    }
-    for (int i = 0; i < this.dictionaryColumnChunkIndexes.length; i++) {
-      for (int j = 0; j < validRowIds.size(); j++) {
-        columnDataOffsets[j] += dimensionColumnPages[dictionaryColumnChunkIndexes[i]][pageCounter]
-            .fillRawData(validRowIds.get(j), columnDataOffsets[j], dictionaryKeyArrayList.get(j));
-      }
-    }
-    return dictionaryKeyArrayList;
-  }
-
   /**
    * Below method will be used to get the complex type key array
    *
@@ -101,35 +75,18 @@ public class NonFilterQueryScannedResult extends BlockletScannedResult {
     return getNoDictionaryKeyArray(currentRow);
   }
 
-  /**
-   * Below method will be used to get the dimension key array
-   * for all the no dictionary dimension present in the query
-   * This method will fill the data column wise for the given batch size
-   *
-   * @return no dictionary keys for all no dictionary dimension
-   */
-  @Override public List<byte[][]> getNoDictionaryKeyArrayBatch(int batchSize) {
-    List<byte[][]> noDictionaryKeyArrayList = new ArrayList<>(validRowIds.size());
-    byte[][] noDictionaryColumnsKeys = null;
-    // everyTime it is initialized new as in case of prefetch it can modify the data
-    for (int i = 0; i < validRowIds.size(); i++) {
-      noDictionaryColumnsKeys = new byte[noDictionaryColumnChunkIndexes.length][];
-      noDictionaryKeyArrayList.add(noDictionaryColumnsKeys);
-    }
-    int columnPosition = 0;
-    for (int i = 0; i < this.noDictionaryColumnChunkIndexes.length; i++) {
-      for (int j = 0; j < validRowIds.size(); j++) {
-        byte[][] noDictionaryArray = noDictionaryKeyArrayList.get(j);
-        noDictionaryArray[columnPosition] =
-            dimensionColumnPages[noDictionaryColumnChunkIndexes[i]][pageCounter]
-                .getChunkData(validRowIds.get(j));
+  @Override public void fillValidRowIdsBatchFilling(int rowId, int batchSize) {
+    // row id will be different for every batch so clear it before filling
+    clearValidRowIdList();
+    int startPosition = rowId;
+    for (int i = 0; i < batchSize; i++) {
+      if (!containsDeletedRow(startPosition)) {
+        validRowIds.add(startPosition);
       }
-      columnPosition++;
+      startPosition++;
     }
-    return noDictionaryKeyArrayList;
   }
 
-
   /**
    * will return the current valid row id
    *
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
index 2caf46c..ff383f9 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestRangeColumnDataLoad.scala
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.spark.testsuite.dataload
 
+import java.io.{File, PrintWriter}
+
 import scala.collection.mutable.ArrayBuffer
 import scala.reflect.classTag
 
@@ -38,6 +40,9 @@ import org.apache.carbondata.spark.load.PrimtiveOrdering
 
 class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with BeforeAndAfterAll {
   var filePath: String = s"$resourcesPath/globalsort"
+  var filePath2: String = s"$resourcesPath/range_compact_test"
+  var filePath3: String = s"$resourcesPath/range_compact_test1"
+  var filePath4: String = s"$resourcesPath/range_compact_test2"
 
   override def beforeAll(): Unit = {
     dropTable
@@ -137,9 +142,586 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
     checkAnswer(sql("SELECT COUNT(*) FROM carbon_range_column4"), Seq(Row(20)))
   }
 
+  test("Test compaction for range_column - SHORT Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age SHORT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='age, city', 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+  }
+
+  test("Test compaction for range_column - INT Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='age, city', 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - 2 levels") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT', 'SORT_COLUMNS'='age, city',
+        | 'range_column'='age')
+      """.stripMargin)
+
+    for (i <- 0 until 12) {
+      sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+          "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+    }
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MINOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - CUSTOM Compaction") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='GLOBAL_SORT', 'SORT_COLUMNS'='age, city',
+        | 'range_column'='age')
+      """.stripMargin)
+
+    for (i <- 0 until 12) {
+      sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+          "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+    }
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'CUSTOM' WHERE SEGMENT.ID IN(3,4,5)")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - INT Datatype with null values") {
+    deleteFile(filePath3)
+    createFile(filePath3, 2000, 3)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city',
+        | 'range_column'='name')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    deleteFile(filePath3)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - BOOLEAN Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    val exception = intercept[MalformedCarbonCommandException](
+      sql(
+        """
+          | CREATE TABLE carbon_range_column1(id Boolean, name STRING, city STRING, age INT)
+          | STORED BY 'org.apache.carbondata.format'
+          | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='id, city',
+          | 'range_column'='id')
+        """.stripMargin)
+    )
+
+    assertResult("RANGE_COLUMN doesn't support boolean data type: id")(exception.getMessage)
+  }
+
+  test("Test compaction for range_column - DECIMAL Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    val exception = intercept[MalformedCarbonCommandException](
+      sql(
+        """
+          | CREATE TABLE carbon_range_column1(id decimal, name STRING, city STRING, age INT)
+          | STORED BY 'org.apache.carbondata.format'
+          | TBLPROPERTIES('range_column'='id')
+        """.stripMargin)
+    )
+
+    assertResult("RANGE_COLUMN doesn't support decimal data type: id")(exception.getMessage)
+  }
+
+  test("Test compaction for range_column - INT Datatype with no overlapping") {
+    deleteFile(filePath2)
+    createFile(filePath2, 1000, 4)
+    deleteFile(filePath3)
+    createFile(filePath3, 1000, 5)
+    deleteFile(filePath4)
+    createFile(filePath4, 1000, 6)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='id, city',
+        | 'range_column'='id')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='1')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='2')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath4' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    deleteFile(filePath2)
+    deleteFile(filePath3)
+    deleteFile(filePath4)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - INT Datatype with overlapping") {
+    deleteFile(filePath2)
+    createFile(filePath2, 10, 9)
+    deleteFile(filePath3)
+    createFile(filePath3, 10, 10)
+    deleteFile(filePath4)
+    createFile(filePath4, 10, 11)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='id, city',
+        | 'range_column'='id')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath4' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    deleteFile(filePath2)
+    deleteFile(filePath3)
+    deleteFile(filePath4)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - INT Datatype with Global Dict") {
+    deleteFile(filePath2)
+    createFile(filePath2, 10, 9)
+    deleteFile(filePath3)
+    createFile(filePath3, 10, 10)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city',
+        | 'range_column'='name', 'DICTIONARY_INCLUDE'='name')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    deleteFile(filePath2)
+    deleteFile(filePath3)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - STRING Datatype with Global Dict") {
+    deleteFile(filePath2)
+    createFile(filePath2, 1000, 9)
+    deleteFile(filePath3)
+    createFile(filePath3, 10, 10)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city',
+        | 'range_column'='name', 'DICTIONARY_INCLUDE'='name')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath3' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    deleteFile(filePath2)
+    deleteFile(filePath3)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Compact range_column with data skew") {
+    sql("DROP TABLE IF EXISTS carbon_range_column4")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column4(c1 int, c2 string)
+        | STORED AS carbondata
+        | TBLPROPERTIES('sort_columns'='c1,c2', 'sort_scope'='local_sort', 'range_column'='c2')
+      """.stripMargin)
+
+    val dataSkewPath = s"$resourcesPath/range_column"
+
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$dataSkewPath'
+         | INTO TABLE carbon_range_column4
+         | OPTIONS('FILEHEADER'='c1,c2', 'global_sort_partitions'='10')
+        """.stripMargin)
+
+    sql(
+      s"""LOAD DATA LOCAL INPATH '$dataSkewPath'
+         | INTO TABLE carbon_range_column4
+         | OPTIONS('FILEHEADER'='c1,c2', 'global_sort_partitions'='10')
+        """.stripMargin)
+
+    val res = sql("SELECT * FROM carbon_range_column4").collect()
+
+    sql("ALTER TABLE carbon_range_column4 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("SELECT * FROM carbon_range_column4"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column4")
+  }
+
+  test("Test compaction for range_column - INT Datatype without SORT Column") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+  }
+
+  test("Test compaction for range_column - INT Datatype with single value in range column") {
+    deleteFile(filePath2)
+    createFile(filePath2, 10, 8)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age INT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('range_column'='id')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+    deleteFile(filePath2)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - LONG Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age LONG)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='age, city', 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+  }
+
+  test("Test compaction for range_column - LONG Datatype without SORT Column") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age LONG)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+  }
+
+  test("Test compaction for range_column - STRING Datatype") {
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age LONG)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city',
+        | 'range_column'='name')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('SORT_SCOPE'='GLOBAL_SORT','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('SORT_SCOPE'='NO_SORT','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+  }
+
+  test("Test compaction for range_column - STRING Datatype min/max not stored") {
+    deleteFile(filePath2)
+    createFile(filePath2, 1000, 7)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_MINMAX_ALLOWED_BYTE_COUNT, "10")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age LONG)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='name, city',
+        | 'range_column'='name')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        "OPTIONS('HEADER'='false','GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select name from carbon_range_column1 order by name").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select name from carbon_range_column1 order by name"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_MINMAX_ALLOWED_BYTE_COUNT,
+        CarbonCommonConstants.CARBON_MINMAX_ALLOWED_BYTE_COUNT_DEFAULT)
+    deleteFile(filePath2)
+  }
+
+  test("Test compaction for range_column - DATE Datatype") {
+    deleteFile(filePath2)
+    createFile(filePath2, 12, 0)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age DATE)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='age, city' ,
+        | 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+
+    deleteFile(filePath2)
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+        CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
+  }
+
+  test("Test compaction for range_column - TIMESTAMP Datatype skewed data") {
+    deleteFile(filePath2)
+    createFile(filePath2, 12, 1)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy-MM-dd HH:mm:SS")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, age TIMESTAMP)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('SORT_SCOPE'='LOCAL_SORT', 'SORT_COLUMNS'='city' ,
+        | 'range_column'='age')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+
+    deleteFile(filePath2)
+
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+        CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+  }
+
+  test("Test compaction for range_column - Float Datatype") {
+    deleteFile(filePath2)
+    createFile(filePath2, 12, 2)
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+    sql(
+      """
+        | CREATE TABLE carbon_range_column1(id INT, name STRING, city STRING, floatval FLOAT)
+        | STORED BY 'org.apache.carbondata.format'
+        | TBLPROPERTIES('range_column'='floatval')
+      """.stripMargin)
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    sql(s"LOAD DATA LOCAL INPATH '$filePath2' INTO TABLE carbon_range_column1 " +
+        s"OPTIONS('HEADER'='false', 'GLOBAL_SORT_PARTITIONS'='3')")
+
+    var res = sql("select * from carbon_range_column1").collect()
+
+    sql("ALTER TABLE carbon_range_column1 COMPACT 'MAJOR'")
+
+    checkAnswer(sql("select * from carbon_range_column1"), res)
+
+    sql("DROP TABLE IF EXISTS carbon_range_column1")
+
+    deleteFile(filePath2)
+
+  }
+
   test("DataSkewRangePartitioner.combineDataSkew") {
     val partitioner =
-      new DataSkewRangePartitioner(1, null)(new PrimtiveOrdering(DataTypes.STRING),
+      new DataSkewRangePartitioner(1, null,
+        false)(new PrimtiveOrdering(DataTypes.STRING),
         classTag[Object])
 
     testCombineDataSkew(
@@ -263,4 +845,89 @@ class TestRangeColumnDataLoad extends QueryTest with BeforeAndAfterEach with Bef
         .size()
     }
   }
+
+  def createFile(fileName: String, line: Int = 10000, lastCol: Int = 0): Boolean = {
+    try {
+      val write = new PrintWriter(new File(fileName))
+      val start = 0
+      if (0 == lastCol) {
+        // Date data generation
+        for (i <- start until (start + line)) {
+          write.println(i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i) + "-10-10")
+        }
+      } else if (1 == lastCol) {
+        // Timestamp data generation
+        for (i <- start until (start + line)) {
+          if (i == start) {
+            write
+              .println(i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i) + "-10-10 " +
+                       "00:00:00")
+          } else {
+            write.println(i + "," + "n" + i + "," + "c" + (i % 10000) + ",")
+          }
+        }
+      } else if (2 == lastCol) {
+        // Float data generation
+        for (i <- start until (start + line)) {
+          write
+            .println(i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i) + (i % 3.14))
+        }
+      } else if (3 == lastCol) {
+        // Null data generation
+        for (i <- start until (start + line)) {
+          if (i % 3 != 0) {
+            write
+              .println(i + "," + "," + "c" + (i % 10000) + "," + (1990 + i))
+          } else {
+            write
+              .println(i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
+          }
+        }
+      } else if (4 <= lastCol && 6 >= lastCol) {
+        // No overlap data generation 1
+        for (i <- start until (start + line)) {
+          write
+            .println(
+              (line * lastCol + i) + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
+        }
+      } else if (7 == lastCol) {
+        // Min/max not stored data generation
+        for (i <- start until (start + line)) {
+          write
+            .println(
+              (100 * lastCol + i) + "," + "nnnnnnnnnnnn" + i + "," + "c" + (i % 10000) + "," +
+              (1990 + i))
+        }
+      } else if (8 == lastCol) {
+        // Range values less than default parallelism (Single value)
+        for (i <- start until (start + line)) {
+          write
+            .println(
+              100 + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
+        }
+      } else if (9 <= lastCol) {
+        for (i <- lastCol until (lastCol + line)) {
+          write
+            .println(
+              i + "," + "n" + i + "," + "c" + (i % 10000) + "," + (1990 + i))
+        }
+      }
+      write.close()
+    } catch {
+      case _: Exception => false
+    }
+    true
+  }
+
+  def deleteFile(fileName: String): Boolean = {
+    try {
+      val file = new File(fileName)
+      if (file.exists()) {
+        file.delete()
+      }
+    } catch {
+      case _: Exception => false
+    }
+    true
+  }
 }
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
index 77d0d84..a751887 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/load/DataLoadProcessBuilderOnSpark.scala
@@ -27,11 +27,13 @@ import org.apache.spark.sql.{DataFrame, SparkSession}
 import org.apache.spark.sql.execution.command.ExecutionErrors
 import org.apache.spark.sql.util.SparkSQLUtil
 import org.apache.spark.storage.StorageLevel
+import org.apache.spark.unsafe.types.UTF8String
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.datastore.row.CarbonRow
 import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, CarbonDimension}
 import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatus}
 import org.apache.carbondata.core.util._
@@ -220,7 +222,10 @@ object DataLoadProcessBuilderOnSpark {
     val sampleRDD = getSampleRDD(sparkSession, model, hadoopConf, configuration, modelBroadcast)
     val rangeRDD = keyRDD
       .partitionBy(
-        new DataSkewRangePartitioner(numPartitions, sampleRDD)(objectOrdering, classTag[Object]))
+        new DataSkewRangePartitioner(
+          numPartitions,
+          sampleRDD,
+          false)(objectOrdering, classTag[Object]))
       .map(_._2)
 
     // 4. Sort and Write data
@@ -306,23 +311,29 @@ object DataLoadProcessBuilderOnSpark {
         // better to generate a CarbonData file for each partition
         val totalSize = model.getTotalSize.toDouble
         val table = model.getCarbonDataLoadSchema.getCarbonTable
-        val blockSize = 1024L * 1024 * table.getBlockSizeInMB
-        val blockletSize = 1024L * 1024 * table.getBlockletSizeInMB
-        val scaleFactor = if (model.getScaleFactor == 0) {
-          // use system properties
-          CarbonProperties.getInstance().getRangeColumnScaleFactor
-        } else {
-          model.getScaleFactor
-        }
-        // For Range_Column, it will try to generate one big file for each partition.
-        // And the size of the big file is about TABLE_BLOCKSIZE of this table.
-        val splitSize = Math.max(blockletSize, (blockSize - blockletSize)) * scaleFactor
-        numPartitions = Math.ceil(totalSize / splitSize).toInt
+        numPartitions = getNumPatitionsBasedOnSize(totalSize, table, model)
       }
     }
     numPartitions
   }
 
+  def getNumPatitionsBasedOnSize(totalSize: Double,
+      table: CarbonTable,
+      model: CarbonLoadModel): Int = {
+    val blockSize = 1024L * 1024 * table.getBlockSizeInMB
+    val blockletSize = 1024L * 1024 * table.getBlockletSizeInMB
+    val scaleFactor = if (model.getScaleFactor == 0) {
+      // use system properties
+      CarbonProperties.getInstance().getRangeColumnScaleFactor
+    } else {
+      model.getScaleFactor
+    }
+    // For Range_Column, it will try to generate one big file for each partition.
+    // And the size of the big file is about TABLE_BLOCKSIZE of this table.
+    val splitSize = Math.max(blockletSize, (blockSize - blockletSize)) * scaleFactor
+    Math.ceil(totalSize / splitSize).toInt
+  }
+
   private def indexOfColumn(column: CarbonColumn, fields: Array[DataField]): Int = {
     (0 until fields.length)
       .find(index => fields(index).getColumn.getColName.equals(column.getColName))
@@ -371,3 +382,9 @@ class ByteArrayOrdering() extends Ordering[Object] {
     UnsafeComparer.INSTANCE.compareTo(x.asInstanceOf[Array[Byte]], y.asInstanceOf[Array[Byte]])
   }
 }
+
+class StringOrdering() extends Ordering[Object] {
+  override def compare(x: Object, y: Object): Int = {
+    (x.asInstanceOf[UTF8String]).compare(y.asInstanceOf[UTF8String])
+  }
+}
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
index 0e44f6d..e361c14 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonMergerRDD.scala
@@ -19,20 +19,22 @@ package org.apache.carbondata.spark.rdd
 
 import java.io.IOException
 import java.util
-import java.util.{Collections, List}
+import java.util.{Collections, List, Map}
 import java.util.concurrent.atomic.AtomicInteger
 
 import scala.collection.mutable
 import scala.collection.JavaConverters._
+import scala.reflect.classTag
 
 import org.apache.hadoop.mapred.JobConf
-import org.apache.hadoop.mapreduce.Job
+import org.apache.hadoop.mapreduce.{InputSplit, Job}
 import org.apache.spark._
 import org.apache.spark.deploy.SparkHadoopUtil
 import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.execution.command.{CarbonMergerMapping, NodeInfo}
 import org.apache.spark.sql.hive.DistributionUtil
-import org.apache.spark.sql.util.CarbonException
+import org.apache.spark.sql.util.{CarbonException, SparkTypeConverter}
 
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.converter.SparkDataTypeConverterImpl
@@ -43,12 +45,17 @@ import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.indexstore.PartitionSpec
 import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
 import org.apache.carbondata.core.metadata.blocklet.DataFileFooter
-import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
+import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.metadata.schema.table.column.{CarbonColumn, CarbonDimension, ColumnSchema}
 import org.apache.carbondata.core.mutate.UpdateVO
+import org.apache.carbondata.core.scan.expression
+import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.result.iterator.RawResultIterator
-import org.apache.carbondata.core.statusmanager.{FileFormat, SegmentUpdateStatusManager}
+import org.apache.carbondata.core.statusmanager.{FileFormat, LoadMetadataDetails, SegmentStatusManager, SegmentUpdateStatusManager}
 import org.apache.carbondata.core.util.{CarbonUtil, DataTypeUtil}
-import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit}
+import org.apache.carbondata.core.util.path.CarbonTablePath
+import org.apache.carbondata.hadoop.{CarbonInputSplit, CarbonMultiBlockSplit, CarbonProjection}
 import org.apache.carbondata.hadoop.api.{CarbonInputFormat, CarbonTableInputFormat}
 import org.apache.carbondata.hadoop.util.{CarbonInputFormatUtil, CarbonInputSplitTaskInfo}
 import org.apache.carbondata.processing.loading.TableProcessingOperations
@@ -56,7 +63,8 @@ import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 import org.apache.carbondata.processing.merger._
 import org.apache.carbondata.processing.util.{CarbonDataProcessorUtil, CarbonLoaderUtil}
 import org.apache.carbondata.spark.MergeResult
-import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil}
+import org.apache.carbondata.spark.load.{ByteArrayOrdering, DataLoadProcessBuilderOnSpark, PrimtiveOrdering, StringOrdering}
+import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil, Util}
 
 class CarbonMergerRDD[K, V](
     @transient private val ss: SparkSession,
@@ -77,12 +85,14 @@ class CarbonMergerRDD[K, V](
   val databaseName = carbonMergerMapping.databaseName
   val factTableName = carbonMergerMapping.factTableName
   val tableId = carbonMergerMapping.tableId
+  var expressionMapForRangeCol: util.Map[Integer, Expression] = null
 
   override def internalCompute(theSplit: Partition, context: TaskContext): Iterator[(K, V)] = {
     val queryStartTime = System.currentTimeMillis()
     val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
     val iter = new Iterator[(K, V)] {
       val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+      val rangeColumn = carbonTable.getRangeColumn
       val carbonSparkPartition = theSplit.asInstanceOf[CarbonSparkPartition]
       if (carbonTable.isPartitionTable) {
         carbonLoadModel.setTaskNo(String.valueOf(carbonSparkPartition.partitionId))
@@ -181,7 +191,12 @@ class CarbonMergerRDD[K, V](
         }
         try {
           // fire a query and get the results.
-          rawResultIteratorMap = exec.processTableBlocks(FileFactory.getConfiguration)
+          var expr: expression.Expression = null
+          if (null != expressionMapForRangeCol) {
+            expr = expressionMapForRangeCol
+              .get(theSplit.asInstanceOf[CarbonSparkPartition].idx)
+          }
+          rawResultIteratorMap = exec.processTableBlocks(FileFactory.getConfiguration, expr)
         } catch {
           case e: Throwable =>
             LOGGER.error(e)
@@ -281,6 +296,14 @@ class CarbonMergerRDD[K, V](
       tablePath, new CarbonTableIdentifier(databaseName, factTableName, tableId)
     )
     val carbonTable = carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable
+    val rangeColumn = carbonTable.getRangeColumn
+    val dataType: DataType = if (null != rangeColumn) {
+      rangeColumn.getDataType
+    } else {
+      null
+    }
+    val isRangeColSortCol = rangeColumn != null && rangeColumn.isDimension &&
+                            rangeColumn.asInstanceOf[CarbonDimension].isSortColumn
     val updateStatusManager: SegmentUpdateStatusManager = new SegmentUpdateStatusManager(
       carbonTable)
     val jobConf: JobConf = new JobConf(getConf)
@@ -303,14 +326,30 @@ class CarbonMergerRDD[K, V](
 
     val taskInfoList = new java.util.ArrayList[Distributable]
     var carbonInputSplits = mutable.Seq[CarbonInputSplit]()
+    var allSplits = new java.util.ArrayList[InputSplit]
 
     var splitsOfLastSegment: List[CarbonInputSplit] = null
     // map for keeping the relation of a task and its blocks.
     val taskIdMapping: java.util.Map[String, java.util.List[CarbonInputSplit]] = new
         java.util.HashMap[String, java.util.List[CarbonInputSplit]]
 
+    var totalSize: Double = 0
+    var loadMetadataDetails: Array[LoadMetadataDetails] = null
+    // Only for range column get the details for the size of segments
+    if (null != rangeColumn) {
+      loadMetadataDetails = SegmentStatusManager
+        .readLoadMetadata(CarbonTablePath.getMetadataPath(tablePath))
+    }
     // for each valid segment.
     for (eachSeg <- carbonMergerMapping.validSegments) {
+      // In case of range column get the size for calculation of number of ranges
+      if (null != rangeColumn) {
+        for (details <- loadMetadataDetails) {
+          if (details.getLoadName == eachSeg.getSegmentNo) {
+            totalSize = totalSize + (details.getDataSize.toDouble)
+          }
+        }
+      }
 
       // map for keeping the relation of a task and its blocks.
       job.getConfiguration.set(CarbonTableInputFormat.INPUT_SEGMENT_NUMBERS, eachSeg.getSegmentNo)
@@ -329,7 +368,7 @@ class CarbonMergerRDD[K, V](
           .map(_.asInstanceOf[CarbonInputSplit])
           .filter { split => FileFormat.COLUMNAR_V3.equals(split.getFileFormat) }.toList.asJava
       }
-      carbonInputSplits ++:= splits.asScala.map(_.asInstanceOf[CarbonInputSplit]).filter{ entry =>
+       val filteredSplits = splits.asScala.map(_.asInstanceOf[CarbonInputSplit]).filter{ entry =>
         val blockInfo = new TableBlockInfo(entry.getFilePath,
           entry.getStart, entry.getSegmentId,
           entry.getLocations, entry.getLength, entry.getVersion,
@@ -342,6 +381,31 @@ class CarbonMergerRDD[K, V](
             updateDetails, updateStatusManager)))) &&
         FileFormat.COLUMNAR_V3.equals(entry.getFileFormat)
       }
+      carbonInputSplits ++:= filteredSplits
+      allSplits.addAll(filteredSplits.asJava)
+    }
+    val LOGGER = LogServiceFactory.getLogService(this.getClass.getName)
+    var allRanges: Array[Object] = new Array[Object](0)
+    if (rangeColumn != null) {
+      // To calculate the number of ranges to be made, min 2 ranges/tasks to be made in any case
+      val numOfPartitions = Math
+        .max(CarbonCommonConstants.NUM_CORES_DEFAULT_VAL.toInt, DataLoadProcessBuilderOnSpark
+          .getNumPatitionsBasedOnSize(totalSize, carbonTable, carbonLoadModel))
+      val colName = rangeColumn.getColName
+      LOGGER.info(s"Compacting on range column: $colName")
+      allRanges = getRangesFromRDD(rangeColumn,
+        carbonTable,
+        numOfPartitions,
+        allSplits,
+        dataType)
+      // If RangePartitioner does not give ranges in the case when the data is skewed with
+      // a lot of null records then we take the min/max from footer and set them for tasks
+      if (null == allRanges || (allRanges.size == 1 && allRanges(0) == null)) {
+        allRanges = CarbonCompactionUtil.getOverallMinMax(carbonInputSplits.toList.toArray,
+          rangeColumn,
+          isRangeColSortCol)
+      }
+      LOGGER.info(s"Number of ranges:" + allRanges.size)
     }
 
     // prepare the details required to extract the segment properties using last segment.
@@ -362,20 +426,28 @@ class CarbonMergerRDD[K, V](
     val columnToCardinalityMap = new util.HashMap[java.lang.String, Integer]()
     val partitionTaskMap = new util.HashMap[PartitionSpec, String]()
     val counter = new AtomicInteger()
+    var indexOfRangeColumn = -1
+    var taskIdCount = 0
+    // As we are already handling null values in the filter expression separately so we
+    // can remove the null from the ranges we get, else it may lead to duplicate data
+    val newRanges = allRanges.filter { range =>
+      range != null
+    }
     carbonInputSplits.foreach { split =>
-      val taskNo = getTaskNo(split, partitionTaskMap, counter)
       var dataFileFooter: DataFileFooter = null
-
-      val splitList = taskIdMapping.get(taskNo)
-      noOfBlocks += 1
-      if (null == splitList) {
-        val splitTempList = new util.ArrayList[CarbonInputSplit]()
-        splitTempList.add(split)
-        taskIdMapping.put(taskNo, splitTempList)
-      } else {
-        splitList.add(split)
+      if (null == rangeColumn) {
+        val taskNo = getTaskNo(split, partitionTaskMap, counter)
+        var sizeOfSplit = split.getDetailInfo.getBlockSize
+        val splitList = taskIdMapping.get(taskNo)
+        noOfBlocks += 1
+        if (null == splitList) {
+          val splitTempList = new util.ArrayList[CarbonInputSplit]()
+          splitTempList.add(split)
+          taskIdMapping.put(taskNo, splitTempList)
+        } else {
+          splitList.add(split)
+        }
       }
-
       // Check the cardinality of each columns and set the highest.
       try {
         dataFileFooter = CarbonUtil.readMetadataFile(
@@ -390,6 +462,48 @@ class CarbonMergerRDD[K, V](
         .addColumnCardinalityToMap(columnToCardinalityMap,
           dataFileFooter.getColumnInTable,
           dataFileFooter.getSegmentInfo.getColumnCardinality)
+
+      // Create taskIdMapping here for range column by reading min/max values.
+      if (null != rangeColumn) {
+        if (null == expressionMapForRangeCol) {
+          expressionMapForRangeCol = new util.HashMap[Integer, Expression]()
+        }
+        if (-1 == indexOfRangeColumn) {
+          val allColumns = dataFileFooter.getColumnInTable
+          for (i <- 0 until allColumns.size()) {
+            if (allColumns.get(i).getColumnName.equalsIgnoreCase(rangeColumn.getColName)) {
+              indexOfRangeColumn = i
+            }
+          }
+        }
+        // Create ranges and add splits to the tasks
+        for (i <- 0 until (newRanges.size + 1)) {
+          if (null == expressionMapForRangeCol.get(i)) {
+            // Creating FilterExpression for the range column
+            var minVal: Object = null
+            var maxVal: Object = null
+            // For first task we will create an Or Filter and also accomodate null values
+            // For last task we will take as GreaterThan Expression of last value
+            if (i != 0) {
+              minVal = newRanges(i - 1)
+            }
+            if (i != newRanges.size) {
+              maxVal = newRanges(i)
+            }
+            val filterExpr = CarbonCompactionUtil
+              .getFilterExpressionForRange(rangeColumn,
+                minVal, maxVal, dataType)
+            expressionMapForRangeCol.put(i, filterExpr)
+          }
+          var splitList = taskIdMapping.get(i.toString)
+          noOfBlocks += 1
+          if (null == splitList) {
+            splitList = new util.ArrayList[CarbonInputSplit]()
+            taskIdMapping.put(i.toString, splitList)
+          }
+          splitList.add(split)
+        }
+      }
     }
     val updatedMaxSegmentColumnList = new util.ArrayList[ColumnSchema]()
     // update cardinality and column schema list according to master schema
@@ -472,6 +586,52 @@ class CarbonMergerRDD[K, V](
     result.toArray(new Array[Partition](result.size))
   }
 
+  private def getRangesFromRDD(rangeColumn: CarbonColumn,
+      carbonTable: CarbonTable,
+      defaultParallelism: Int,
+      allSplits: java.util.ArrayList[InputSplit],
+      dataType: DataType): Array[Object] = {
+    val inputMetricsStats: CarbonInputMetrics = new CarbonInputMetrics
+    val projection = new CarbonProjection
+    projection.addColumn(rangeColumn.getColName)
+    val scanRdd = new CarbonScanRDD[InternalRow](
+      ss,
+      projection,
+      null,
+      carbonTable.getAbsoluteTableIdentifier,
+      carbonTable.getTableInfo.serialize(),
+      carbonTable.getTableInfo,
+      inputMetricsStats,
+      partitionNames = null,
+      splits = allSplits)
+    val objectOrdering: Ordering[Object] = createOrderingForColumn(rangeColumn)
+    val sparkDataType = Util.convertCarbonToSparkDataType(dataType)
+    // Change string type to support all types
+    val sampleRdd = scanRdd
+      .map(row => (row.get(0, sparkDataType), null))
+    val value = new DataSkewRangePartitioner(
+      defaultParallelism, sampleRdd, true)(objectOrdering, classTag[Object])
+    value.rangeBounds
+  }
+
+  private def createOrderingForColumn(column: CarbonColumn): Ordering[Object] = {
+    if (column.isDimension) {
+      val dimension = column.asInstanceOf[CarbonDimension]
+      if ((dimension.isGlobalDictionaryEncoding || dimension.isDirectDictionaryEncoding) &&
+          dimension.getDataType != DataTypes.TIMESTAMP) {
+        new PrimtiveOrdering(DataTypes.INT)
+      } else {
+        if (DataTypeUtil.isPrimitiveColumn(column.getDataType)) {
+          new PrimtiveOrdering(column.getDataType)
+        } else {
+          new StringOrdering()
+        }
+      }
+    } else {
+      new PrimtiveOrdering(column.getDataType)
+    }
+  }
+
   private def getTaskNo(
       split: CarbonInputSplit,
       partitionTaskMap: util.Map[PartitionSpec, String],
@@ -495,8 +655,6 @@ class CarbonMergerRDD[K, V](
     }
   }
 
-
-
   private def getPartitionNamesFromTask(taskId: String,
       partitionTaskMap: util.Map[PartitionSpec, String]): Option[PartitionSpec] = {
     if (carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable.isHivePartitionTable) {
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index d0ed815..b62a7e2 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -82,7 +82,8 @@ class CarbonScanRDD[T: ClassTag](
     inputMetricsStats: InitInputMetrics,
     @transient val partitionNames: Seq[PartitionSpec],
     val dataTypeConverterClz: Class[_ <: DataTypeConverter] = classOf[SparkDataTypeConverterImpl],
-    val readSupportClz: Class[_ <: CarbonReadSupport[_]] = SparkReadSupport.readSupportClass)
+    val readSupportClz: Class[_ <: CarbonReadSupport[_]] = SparkReadSupport.readSupportClass,
+    @transient var splits: java.util.List[InputSplit] = null)
   extends CarbonRDDWithTableInfo[T](spark, Nil, serializedTableInfo) {
 
   private val queryId = sparkContext.getConf.get("queryId", System.nanoTime() + "")
@@ -126,7 +127,9 @@ class CarbonScanRDD[T: ClassTag](
 
       // get splits
       getSplitsStartTime = System.currentTimeMillis()
-      val splits = format.getSplits(job)
+      if (null == splits) {
+        splits = format.getSplits(job)
+      }
       getSplitsEndTime = System.currentTimeMillis()
       if ((splits == null) && format.isInstanceOf[CarbonFileInputFormat[Object]]) {
         throw new SparkException(
diff --git a/integration/spark2/src/main/scala/org/apache/spark/CarbonInputMetrics.scala b/integration/spark-common/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
similarity index 100%
rename from integration/spark2/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
rename to integration/spark-common/src/main/scala/org/apache/spark/CarbonInputMetrics.scala
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala b/integration/spark-common/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
index 12285d3..d434108 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/DataSkewRangePartitioner.scala
@@ -80,7 +80,8 @@ import org.apache.spark.util.{CollectionsUtils, Utils}
  */
 class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
     partitions: Int,
-    rdd: RDD[_ <: Product2[K, V]])
+    rdd: RDD[_ <: Product2[K, V]],
+    withoutSkew: Boolean)
   extends Partitioner {
 
   // We allow partitions = 0, which happens when sorting an empty RDD under the default settings.
@@ -92,7 +93,8 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
   // dataSkewCount: how many bounds happened data skew
   // dataSkewIndex: the index of data skew bounds
   // dataSkewNum: how many partition of each data skew bound
-  private var (rangeBounds: Array[K], skewCount: Int, skewIndexes: Array[Int],
+  // Min and Max values of complete range
+  var (rangeBounds: Array[K], skewCount: Int, skewIndexes: Array[Int],
   skewWeights: Array[Int]) = {
     if (partitions <= 1) {
       (Array.empty[K], 0, Array.empty[Int], Array.empty[Int])
@@ -103,7 +105,7 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
       val sampleSizePerPartition = math.ceil(3.0 * sampleSize / rdd.partitions.length).toInt
       val (numItems, sketched) = RangePartitioner.sketch(rdd.map(_._1), sampleSizePerPartition)
       if (numItems == 0L) {
-        (Array.empty[K], 0, Array.empty[Int], Array.empty[Int])
+        (Array.empty[K], 0, Array.empty[Int], Array.empty[Int], Array.empty[K])
       } else {
         // If a partition contains much more than the average number of items, we re-sample from it
         // to ensure that enough items are collected from that partition.
@@ -129,14 +131,25 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
           val weight = (1.0 / fraction).toFloat
           candidates ++= reSampled.map(x => (x, weight))
         }
-        determineBounds(candidates, partitions)
+        // In case of compaction we do not need the skew handled ranges so we use RangePartitioner,
+        // but we require the overall minmax for creating the separate ranges.
+        // withoutSkew = true for Compaction only
+        if (withoutSkew == false) {
+          determineBounds(candidates, partitions, false)
+        } else {
+          var ranges = RangePartitioner.determineBounds(candidates, partitions)
+          var otherRangeParams = determineBounds(candidates, partitions, true)
+          (ranges, otherRangeParams._2, otherRangeParams._3,
+            otherRangeParams._4)
+        }
       }
     }
   }
 
   def determineBounds(
       candidates: ArrayBuffer[(K, Float)],
-      partitions: Int): (Array[K], Int, Array[Int], Array[Int]) = {
+      partitions: Int,
+      withoutSkew: Boolean): (Array[K], Int, Array[Int], Array[Int]) = {
     val ordered = candidates.sortBy(_._1)
     val numCandidates = ordered.size
     val sumWeights = ordered.map(_._2.toDouble).sum
@@ -196,7 +209,8 @@ class DataSkewRangePartitioner[K: Ordering : ClassTag, V](
       dataSkewNumTmp += dataSkewCountTmp
     }
     if (dataSkewIndexTmp.size > 0) {
-      (finalBounds.toArray, dataSkewIndexTmp.size, dataSkewIndexTmp.toArray, dataSkewNumTmp.toArray)
+      (finalBounds.toArray, dataSkewIndexTmp.size, dataSkewIndexTmp.toArray, dataSkewNumTmp
+        .toArray)
     } else {
       (finalBounds.toArray, 0, Array.empty[Int], Array.empty[Int])
     }
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index d978128..a2b2af6 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -36,7 +36,7 @@ import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandExcepti
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
 import org.apache.carbondata.core.exception.InvalidConfigurationException
-import org.apache.carbondata.core.metadata.datatype.DataTypes
+import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
 import org.apache.carbondata.core.metadata.schema.PartitionInfo
 import org.apache.carbondata.core.metadata.schema.partition.PartitionType
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
@@ -784,13 +784,19 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         throw new MalformedCarbonCommandException(errorMsg)
       }
       val rangeField = fields.find(_.column.equalsIgnoreCase(rangeColumn))
+      val dataType = rangeField.get.dataType.get
       if (rangeField.isEmpty) {
         val errorMsg = "range_column: " + rangeColumn +
                        " does not exist in table. Please check the create table statement."
         throw new MalformedCarbonCommandException(errorMsg)
-      } else if (DataTypes.BINARY.getName.equalsIgnoreCase(rangeField.get.dataType.get)) {
+      } else if (DataTypes.BINARY.getName.equalsIgnoreCase(dataType) ||
+                 DataTypes.BOOLEAN.getName.equalsIgnoreCase(dataType) ||
+                 CarbonCommonConstants.ARRAY.equalsIgnoreCase(dataType) ||
+                 CarbonCommonConstants.STRUCT.equalsIgnoreCase(dataType) ||
+                 CarbonCommonConstants.MAP.equalsIgnoreCase(dataType) ||
+                 CarbonCommonConstants.DECIMAL.equalsIgnoreCase(dataType)) {
         throw new MalformedCarbonCommandException(
-          "RANGE_COLUMN doesn't support binary data type:" + rangeColumn)
+          s"RANGE_COLUMN doesn't support $dataType data type: " + rangeColumn)
       } else {
         tableProperties.put(CarbonCommonConstants.RANGE_COLUMN, rangeField.get.column)
       }
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
index 57dd356..09763fd 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDatasourceHadoopRelation.scala
@@ -40,7 +40,6 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension
 import org.apache.carbondata.core.scan.expression.Expression
 import org.apache.carbondata.core.scan.expression.logical.AndExpression
 import org.apache.carbondata.hadoop.CarbonProjection
-import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil
 import org.apache.carbondata.spark.rdd.{CarbonScanRDD, SparkReadSupport}
 
 case class CarbonDatasourceHadoopRelation(
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
index 619b45a..d9c7be7 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionExecutor.java
@@ -37,6 +37,7 @@ import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.executor.QueryExecutor;
 import org.apache.carbondata.core.scan.executor.QueryExecutorFactory;
 import org.apache.carbondata.core.scan.executor.exception.QueryExecutionException;
+import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.scan.model.QueryModelBuilder;
 import org.apache.carbondata.core.scan.result.RowBatch;
@@ -108,9 +109,10 @@ public class CarbonCompactionExecutor {
    * Map has 2 elements: UNSORTED and SORTED
    * Map(UNSORTED) = List of Iterators which yield sorted data
    * Map(Sorted) = List of Iterators which yield sorted data
+   * In Range Column compaction we will have a Filter Expression to process
    */
-  public Map<String, List<RawResultIterator>> processTableBlocks(Configuration configuration)
-      throws QueryExecutionException, IOException {
+  public Map<String, List<RawResultIterator>> processTableBlocks(Configuration configuration,
+      Expression filterExpr) throws QueryExecutionException, IOException {
 
     Map<String, List<RawResultIterator>> resultList = new HashMap<>(2);
     resultList.put(CarbonCompactionUtil.UNSORTED_IDX,
@@ -119,10 +121,16 @@ public class CarbonCompactionExecutor {
         new ArrayList<RawResultIterator>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE));
 
     List<TableBlockInfo> tableBlockInfos = null;
-    QueryModelBuilder builder = new QueryModelBuilder(carbonTable)
-        .projectAllColumns()
-        .dataConverter(dataTypeConverter)
-        .enableForcedDetailRawQuery();
+    QueryModelBuilder builder = null;
+    if (null == filterExpr) {
+      builder =
+          new QueryModelBuilder(carbonTable).projectAllColumns().dataConverter(dataTypeConverter)
+              .enableForcedDetailRawQuery();
+    } else {
+      builder = new QueryModelBuilder(carbonTable).projectAllColumns().filterExpression(filterExpr)
+          .dataConverter(dataTypeConverter).enableForcedDetailRawQuery()
+          .convertToRangeFilter(false);
+    }
     if (enablePageLevelReaderForCompaction()) {
       builder.enableReadPageByPage();
     }
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
index c4b6843..f4a15bb 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/CarbonCompactionUtil.java
@@ -30,14 +30,28 @@ import org.apache.carbondata.core.datastore.impl.FileFactory;
 import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
 import org.apache.carbondata.core.metadata.blocklet.BlockletInfo;
 import org.apache.carbondata.core.metadata.blocklet.DataFileFooter;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema;
 import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
+import org.apache.carbondata.core.scan.expression.ColumnExpression;
+import org.apache.carbondata.core.scan.expression.Expression;
+import org.apache.carbondata.core.scan.expression.LiteralExpression;
+import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
+import org.apache.carbondata.core.scan.expression.conditional.GreaterThanExpression;
+import org.apache.carbondata.core.scan.expression.conditional.LessThanEqualToExpression;
+import org.apache.carbondata.core.scan.expression.logical.AndExpression;
+import org.apache.carbondata.core.scan.expression.logical.OrExpression;
+import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
+import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.core.util.path.CarbonTablePath;
+import org.apache.carbondata.hadoop.CarbonInputSplit;
 
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.log4j.Logger;
@@ -461,6 +475,132 @@ public class CarbonCompactionUtil {
     return false;
   }
 
+  // This method will return an Expression(And/Or) for each range based on the datatype
+  // This Expression will be passed to each task as a Filter Query to get the data
+  public static Expression getFilterExpressionForRange(CarbonColumn rangeColumn, Object minVal,
+      Object maxVal, DataType dataType) {
+    Expression finalExpr;
+    Expression exp1, exp2;
+    String colName = rangeColumn.getColName();
+
+    // In case of null values create an OrFilter expression and
+    // for other cases create and AndFilter Expression
+    if (null == minVal) {
+      // First task
+      exp1 = new EqualToExpression(new ColumnExpression(colName, dataType),
+          new LiteralExpression(null, dataType), true);
+      if (null == maxVal) {
+        // If both the min/max values are null, that means, if data contains only
+        // null value then pass only one expression as a filter expression
+        finalExpr = exp1;
+      } else {
+        exp2 = new LessThanEqualToExpression(new ColumnExpression(colName, dataType),
+            new LiteralExpression(maxVal, dataType));
+        if (rangeColumn.hasEncoding(Encoding.DICTIONARY)) {
+          exp2.setAlreadyResolved(true);
+        }
+        finalExpr = new OrExpression(exp1, exp2);
+      }
+    } else if (null == maxVal) {
+      // Last task
+      finalExpr = new GreaterThanExpression(new ColumnExpression(colName, dataType),
+          new LiteralExpression(minVal, dataType));
+      if (rangeColumn.hasEncoding(Encoding.DICTIONARY)) {
+        finalExpr.setAlreadyResolved(true);
+      }
+    } else {
+      // Remaining all intermediate ranges
+      exp1 = new GreaterThanExpression(new ColumnExpression(colName, dataType),
+          new LiteralExpression(minVal, dataType));
+      exp2 = new LessThanEqualToExpression(new ColumnExpression(colName, dataType),
+          new LiteralExpression(maxVal, dataType));
+      if (rangeColumn.hasEncoding(Encoding.DICTIONARY)) {
+        exp2.setAlreadyResolved(true);
+        exp1.setAlreadyResolved(true);
+      }
+      finalExpr = new AndExpression(exp1, exp2);
+    }
+    return finalExpr;
+  }
+
+  public static Object[] getOverallMinMax(CarbonInputSplit[] carbonInputSplits,
+      CarbonColumn rangeCol, boolean isSortCol) {
+    byte[] minVal = null;
+    byte[] maxVal = null;
+    int dictMinVal = Integer.MAX_VALUE;
+    int dictMaxVal = Integer.MIN_VALUE;
+    int idx = -1;
+    DataType dataType = rangeCol.getDataType();
+    Object[] minMaxVals = new Object[2];
+    boolean isDictEncode = rangeCol.hasEncoding(Encoding.DICTIONARY);
+    try {
+      for (CarbonInputSplit split : carbonInputSplits) {
+        DataFileFooter dataFileFooter = null;
+        dataFileFooter =
+            CarbonUtil.readMetadataFile(CarbonInputSplit.getTableBlockInfo(split), true);
+
+        if (-1 == idx) {
+          List<ColumnSchema> allColumns = dataFileFooter.getColumnInTable();
+          for (int i = 0; i < allColumns.size(); i++) {
+            if (allColumns.get(i).getColumnName().equalsIgnoreCase(rangeCol.getColName())) {
+              idx = i;
+              break;
+            }
+          }
+        }
+        if (isDictEncode) {
+          byte[] tempMin = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMinValues()[idx];
+          int tempMinVal = CarbonUtil.getSurrogateInternal(tempMin, 0, tempMin.length);
+          byte[] tempMax = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMaxValues()[idx];
+          int tempMaxVal = CarbonUtil.getSurrogateInternal(tempMax, 0, tempMax.length);
+          if (dictMinVal > tempMinVal) {
+            dictMinVal = tempMinVal;
+          }
+          if (dictMaxVal < tempMaxVal) {
+            dictMaxVal = tempMaxVal;
+          }
+        } else {
+          if (null == minVal) {
+            minVal = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMinValues()[idx];
+            maxVal = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMaxValues()[idx];
+          } else {
+            byte[] tempMin = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMinValues()[idx];
+            byte[] tempMax = dataFileFooter.getBlockletIndex().getMinMaxIndex().getMaxValues()[idx];
+            if (ByteUtil.compare(tempMin, minVal) <= 0) {
+              minVal = tempMin;
+            }
+            if (ByteUtil.compare(tempMax, maxVal) >= 0) {
+              maxVal = tempMax;
+            }
+          }
+        }
+      }
+
+      // Based on how min/max value is stored in the footer we change the data
+      if (isDictEncode) {
+        minMaxVals[0] = dictMinVal;
+        minMaxVals[1] = dictMaxVal;
+      } else {
+        if (!isSortCol && (dataType == DataTypes.INT || dataType == DataTypes.LONG)) {
+          minMaxVals[0] = ByteUtil.toLong(minVal, 0, minVal.length);
+          minMaxVals[1] = ByteUtil.toLong(maxVal, 0, maxVal.length);
+        } else if (dataType == DataTypes.DOUBLE) {
+          minMaxVals[0] = ByteUtil.toDouble(minVal, 0, minVal.length);
+          minMaxVals[1] = ByteUtil.toDouble(maxVal, 0, maxVal.length);
+        } else {
+          minMaxVals[0] =
+              DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(minVal, dataType, true);
+          minMaxVals[1] =
+              DataTypeUtil.getDataBasedOnDataTypeForNoDictionaryColumn(maxVal, dataType, true);
+        }
+      }
+
+    } catch (IOException e) {
+      LOGGER.error(e.getMessage());
+    }
+    return minMaxVals;
+  }
+
   /**
    * Returns if the DataFileFooter containing carbondata file contains
    * sorted data or not.
diff --git a/processing/src/main/java/org/apache/carbondata/processing/merger/RowResultMergerProcessor.java b/processing/src/main/java/org/apache/carbondata/processing/merger/RowResultMergerProcessor.java
index 7234c33..bec51e6 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/merger/RowResultMergerProcessor.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/merger/RowResultMergerProcessor.java
@@ -117,8 +117,10 @@ public class RowResultMergerProcessor extends AbstractResultProcessor {
 
       // add all iterators to the queue
       for (RawResultIterator leaftTupleIterator : finalIteratorList) {
-        this.recordHolderHeap.add(leaftTupleIterator);
-        index++;
+        if (leaftTupleIterator.hasNext()) {
+          this.recordHolderHeap.add(leaftTupleIterator);
+          index++;
+        }
       }
       RawResultIterator iterator = null;
       while (index > 1) {


[carbondata] 15/22: [CARBONDATA-3369] Fix issues during concurrent execution of Create table If not exists

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit f46ad430e1f97ee1dc659a2676048671374cf394
Author: KanakaKumar <ka...@huawei.com>
AuthorDate: Fri May 3 22:05:16 2019 +0530

    [CARBONDATA-3369] Fix issues during concurrent execution of Create table If not exists
    
    Create table if not exists has following problems if run concurrently from different drivers
    Sometimes It fails with error Table <db.table> already exists.
    Create table failed driver still holds the table with wrong path or schema. Eventual operations refer the wrong path
    Stale path created during create table is not deleted for ever [After 1.5.0 version table will be created in a new folder using UUID if folder with table name already exists]
    This PR fixes above 3 issues.
    
    This closes #3198
---
 .../createTable/TestCreateTableIfNotExists.scala   | 36 ++++++++++++++++++++++
 .../command/table/CarbonCreateTableCommand.scala   | 33 +++++++++++++++++++-
 2 files changed, 68 insertions(+), 1 deletion(-)

diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
index 8f7afe4..dc54127 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableIfNotExists.scala
@@ -17,6 +17,8 @@
 
 package org.apache.carbondata.spark.testsuite.createTable
 
+import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit}
+
 import org.apache.spark.sql.test.util.QueryTest
 import org.scalatest.BeforeAndAfterAll
 
@@ -51,11 +53,45 @@ class TestCreateTableIfNotExists extends QueryTest with BeforeAndAfterAll {
     assert(exception.getMessage.contains("Operation not allowed, when source table is carbon table"))
   }
 
+  test("test create table if not exist concurrently") {
+
+    val executorService: ExecutorService = Executors.newFixedThreadPool(10)
+    var futures: List[Future[_]] = List()
+    for (i <- 0 until (3)) {
+      futures = futures :+ runAsync()
+    }
+
+    executorService.shutdown();
+    executorService.awaitTermination(30L, TimeUnit.SECONDS)
+
+    futures.foreach { future =>
+      assertResult("PASS")(future.get.toString)
+    }
+
+    def runAsync(): Future[String] = {
+      executorService.submit(new Callable[String] {
+        override def call() = {
+          // Create table
+          var result = "PASS"
+          try {
+            sql("create table IF NOT EXISTS TestIfExists(name string) stored by 'carbondata'")
+          } catch {
+            case exception: Exception =>
+              result = exception.getMessage
+          }
+          result
+        }
+      })
+    }
+  }
+
+
   override def afterAll {
     sql("use default")
     sql("drop table if exists test")
     sql("drop table if exists sourceTable")
     sql("drop table if exists targetTable")
+    sql("drop table if exists TestIfExists")
   }
 
 }
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
index 1e17ffe..debb283 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonCreateTableCommand.scala
@@ -21,6 +21,7 @@ import scala.collection.JavaConverters._
 
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession, _}
 import org.apache.spark.sql.catalyst.analysis.TableAlreadyExistsException
+import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.execution.SQLExecution.EXECUTION_ID_KEY
 import org.apache.spark.sql.execution.command.MetadataCommand
 
@@ -166,7 +167,37 @@ case class CarbonCreateTableCommand(
              """.stripMargin)
           }
         } catch {
-          case e: AnalysisException => throw e
+          case e: AnalysisException =>
+            // AnalysisException thrown with table already exists msg incase of conurrent drivers
+            if (e.getMessage().contains("already exists")) {
+
+              // Clear the cache first
+              CarbonEnv.getInstance(sparkSession).carbonMetaStore
+                .removeTableFromMetadata(dbName, tableName)
+
+              // Delete the folders created by this call if the actual path is different
+              val actualPath = CarbonEnv
+                .getCarbonTable(TableIdentifier(tableName, Option(dbName)))(sparkSession)
+                .getTablePath
+
+              if (!actualPath.equalsIgnoreCase(tablePath)) {
+                LOGGER
+                  .error(
+                    "TableAlreadyExists with path : " + actualPath + " So, deleting " + tablePath)
+                FileFactory.deleteAllCarbonFilesOfDir(FileFactory.getCarbonFile(tablePath))
+              }
+
+              // No need to throw for create if not exists
+              if (ifNotExistsSet) {
+                LOGGER.error(e, e)
+              } else {
+                throw e
+              }
+            }
+            else {
+              throw e
+            }
+
           case e: Exception =>
             // call the drop table to delete the created table.
             try {


[carbondata] 07/22: [CARBONDATA-3351] Support Binary Data Type

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 69b88732323500fae6ba97a91dcd4ada6a084bc9
Author: xubo245 <xu...@huawei.com>
AuthorDate: Tue Apr 16 18:28:42 2019 +0800

    [CARBONDATA-3351] Support Binary Data Type
    
    CarbonData supports binary data type
    
    Background :
    Binary is basic data type and widely used in various scenarios. So it’s better to support binary data type in CarbonData. Download data from S3 will be slow when dataset has lots of small binary data. The majority of application scenarios are related to storage small binary data type into CarbonData, which can avoid small binary files problem and speed up S3 access performance, also can decrease cost of accessing OBS by decreasing the number of calling S3 API. It also will easier to m [...]
    
    Goals:
    
    Supporting write binary data type by Carbon Java SDK.
    Supporting read binary data type by Spark Carbon file format(carbon datasource) and CarbonSession.
    Supporting read binary data type by Carbon SDK
    Supporting write binary by spark
---
 .../core/constants/CarbonCommonConstants.java      |    1 +
 .../safe/AbstractNonDictionaryVectorFiller.java    |    2 +-
 .../SafeVariableLengthDimensionDataChunkStore.java |    2 +-
 .../carbondata/core/datastore/page/ColumnPage.java |   11 +-
 .../core/datastore/page/LazyColumnPage.java        |    2 +
 .../datastore/page/UnsafeVarLengthColumnPage.java  |    7 +-
 .../datastore/page/VarLengthColumnPageBase.java    |    1 +
 .../page/encoding/ColumnPageEncoderMeta.java       |    4 +-
 .../page/encoding/DefaultEncodingFactory.java      |   14 +-
 .../carbondata/core/datastore/row/CarbonRow.java   |    6 +-
 .../ThriftWrapperSchemaConverterImpl.java          |    4 +
 .../core/metadata/datatype/BinaryType.java         |   29 +
 .../core/metadata/datatype/DataType.java           |    2 +-
 .../core/metadata/datatype/DataTypes.java          |    4 +
 .../metadata/schema/table/TableSchemaBuilder.java  |    1 +
 .../result/vector/impl/CarbonColumnVectorImpl.java |    6 +-
 .../apache/carbondata/core/util/CarbonUtil.java    |    9 +-
 .../apache/carbondata/core/util/DataTypeUtil.java  |   11 +-
 docs/sdk-guide.md                                  |    1 +
 docs/supported-data-types-in-carbondata.md         |    2 +-
 format/src/main/thrift/schema.thrift               |    1 +
 .../hadoop/util/CarbonVectorizedRecordReader.java  |    3 +-
 integration/spark-common-test/pom.xml              |    1 -
 .../org/apache/carbondata/sdk/util/BinaryUtil.java |   88 ++
 .../src/test/resources/binaryStringNullData.csv    |    4 +
 .../src/test/resources/binarydata.csv              |    3 +
 .../src/test/resources/binarystringdata.csv        |    3 +
 .../src/test/resources/binarystringdata2.csv       |    3 +
 .../resources/jsonFiles/data/allPrimitiveType.json |    3 +-
 .../testsuite/binary/TestBinaryDataType.scala      | 1153 ++++++++++++++++++++
 ...ryWithColumnMetCacheAndCacheLevelProperty.scala |    2 -
 .../TestNonTransactionalCarbonTableForBinary.scala |  162 +++
 ...TestNonTransactionalCarbonTableJsonWriter.scala |   37 +-
 .../StandardPartitionBadRecordLoggerTest.scala     |    2 -
 .../carbondata/spark/util/CarbonScalaUtil.scala    |   21 +-
 .../spark/util/DataTypeConverterUtil.scala         |    3 +
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    |   39 +-
 integration/spark-datasource/pom.xml               |    1 -
 .../converter/SparkDataTypeConverterImpl.java      |    5 +-
 .../vectorreader/VectorizedCarbonRecordReader.java |    2 +-
 .../datasources/CarbonSparkDataSourceUtil.scala    |   10 +-
 .../datasources/SparkCarbonFileFormat.scala        |    2 +
 .../apache/spark/sql/util/SparkTypeConverter.scala |    1 +
 .../org/apache/carbondata/sdk/util/BinaryUtil.java |   89 ++
 .../SparkCarbonDataSourceBinaryTest.scala          |  544 +++++++++
 .../datasource/SparkCarbonDataSourceTest.scala     |   26 +
 .../apache/spark/sql/optimizer/CarbonFilters.scala |    8 +-
 .../converter/impl/BinaryFieldConverterImpl.java   |   78 ++
 .../converter/impl/FieldEncoderFactory.java        |    2 +
 .../loading/sort/SortStepRowHandler.java           |    9 +
 .../store/CarbonFactDataHandlerColumnar.java       |    3 +-
 .../carbondata/processing/store/TablePage.java     |   10 +-
 store/sdk/pom.xml                                  |    3 +-
 .../carbondata/sdk/file/CSVCarbonWriter.java       |    2 +-
 .../apache/carbondata/sdk/file/CarbonReader.java   |    1 -
 .../carbondata/sdk/file/CarbonWriterBuilder.java   |   68 +-
 .../java/org/apache/carbondata/sdk/file/Field.java |    4 +
 .../carbondata/sdk/file/JsonCarbonWriter.java      |    3 +-
 .../org/apache/carbondata/sdk/file/RowUtil.java    |   11 +
 .../apache/carbondata/sdk/file/utils/SDKUtil.java  |   79 ++
 .../carbondata/sdk/file/CSVCarbonWriterTest.java   |   16 +-
 .../carbondata/sdk/file/CarbonReaderTest.java      |  186 +++-
 .../org/apache/carbondata/sdk/file/ImageTest.java  |  818 ++++++++++++++
 .../org/apache/carbondata/util/BinaryUtil.java     |  126 +++
 .../src/test/resources/image/carbondatalogo.jpg    |  Bin 0 -> 59099 bytes
 .../image/flowers/10686568196_b1915544a8.jpg       |  Bin 0 -> 97920 bytes
 .../image/flowers/10686568196_b1915544a8.txt       |    1 +
 .../image/flowers/10712722853_5632165b04.jpg       |  Bin 0 -> 63389 bytes
 .../image/flowers/10712722853_5632165b04.txt       |    1 +
 .../flowers/subfolder/10841136265_af473efc60.jpg   |  Bin 0 -> 62144 bytes
 .../flowers/subfolder/10841136265_af473efc60.txt   |    1 +
 .../src/test/resources/image/voc/2007_000027.jpg   |  Bin 0 -> 145493 bytes
 .../src/test/resources/image/voc/2007_000027.xml   |   63 ++
 .../src/test/resources/image/voc/2007_000032.jpg   |  Bin 0 -> 54757 bytes
 .../src/test/resources/image/voc/2007_000032.xml   |   63 ++
 .../src/test/resources/image/voc/2007_000033.jpg   |  Bin 0 -> 71205 bytes
 .../src/test/resources/image/voc/2007_000033.xml   |   51 +
 .../src/test/resources/image/voc/2007_000039.jpg   |  Bin 0 -> 64668 bytes
 .../src/test/resources/image/voc/2007_000039.xml   |   27 +
 .../src/test/resources/image/voc/2009_001444.jpg   |  Bin 0 -> 677151 bytes
 .../src/test/resources/image/voc/2009_001444.xml   |   28 +
 .../image/vocForSegmentationClass/2007_000032.jpg  |  Bin 0 -> 54757 bytes
 .../image/vocForSegmentationClass/2007_000032.png  |  Bin 0 -> 2334 bytes
 .../image/vocForSegmentationClass/2007_000033.jpg  |  Bin 0 -> 71205 bytes
 .../image/vocForSegmentationClass/2007_000033.png  |  Bin 0 -> 2814 bytes
 .../image/vocForSegmentationClass/2007_000042.jpg  |  Bin 0 -> 82847 bytes
 .../image/vocForSegmentationClass/2007_000042.png  |  Bin 0 -> 3620 bytes
 .../org/apache/carbondata/tool/CarbonCliTest.java  |   61 +-
 88 files changed, 3900 insertions(+), 150 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index e02241e..c9efc34 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1749,6 +1749,7 @@ public final class CarbonCommonConstants {
   public static final String ARRAY_SEPARATOR = "\001";
   public static final String STRING = "String";
   public static final String SHORT = "Short";
+  public static final String BINARY = "Binary";
   public static final String TIMESTAMP = "Timestamp";
   public static final String ARRAY = "array";
   public static final String STRUCT = "struct";
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/AbstractNonDictionaryVectorFiller.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/AbstractNonDictionaryVectorFiller.java
index 38e28ae..298d165 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/AbstractNonDictionaryVectorFiller.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/AbstractNonDictionaryVectorFiller.java
@@ -52,7 +52,7 @@ class NonDictionaryVectorFillerFactory {
       } else {
         return new StringVectorFiller(numberOfRows, actualDataLength);
       }
-    } else if (type == DataTypes.VARCHAR) {
+    } else if (type == DataTypes.VARCHAR || type == DataTypes.BINARY) {
       return new LongStringVectorFiller(numberOfRows, actualDataLength);
     } else if (type == DataTypes.TIMESTAMP) {
       return new TimeStampVectorFiller(numberOfRows);
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
index 219d8c9..b740b28 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/chunk/store/impl/safe/SafeVariableLengthDimensionDataChunkStore.java
@@ -177,7 +177,7 @@ public abstract class SafeVariableLengthDimensionDataChunkStore
             length)) {
       vector.putNull(vectorRow);
     } else {
-      if (dt == DataTypes.STRING || dt == DataTypes.VARCHAR) {
+      if (dt == DataTypes.STRING || dt == DataTypes.VARCHAR || dt == DataTypes.BINARY) {
         vector.putByteArray(vectorRow, currentDataOffset, length, data);
       } else if (dt == DataTypes.BOOLEAN) {
         vector.putBoolean(vectorRow, ByteUtil.toBoolean(data[currentDataOffset]));
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
index 22c5536..41d93ef 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/ColumnPage.java
@@ -204,7 +204,8 @@ public abstract class ColumnPage {
             new ColumnPageEncoderMeta(columnSpec, dataType, compressorName), pageSize);
       } else if (dataType == DataTypes.STRING
           || dataType == DataTypes.BYTE_ARRAY
-          || dataType == DataTypes.VARCHAR) {
+          || dataType == DataTypes.VARCHAR
+          || dataType == DataTypes.BINARY) {
         instance = new UnsafeVarLengthColumnPage(
             new ColumnPageEncoderMeta(columnSpec, dataType, compressorName), pageSize);
       } else {
@@ -231,7 +232,8 @@ public abstract class ColumnPage {
         instance = newDecimalPage(columnPageEncoderMeta, new byte[pageSize][]);
       } else if (dataType == DataTypes.STRING
           || dataType == DataTypes.BYTE_ARRAY
-          || dataType == DataTypes.VARCHAR) {
+          || dataType == DataTypes.VARCHAR
+          || dataType == DataTypes.BINARY) {
         instance = new SafeVarLengthColumnPage(columnPageEncoderMeta, pageSize);
       } else {
         throw new RuntimeException("Unsupported data dataType: " + dataType);
@@ -426,6 +428,9 @@ public abstract class ColumnPage {
     } else if (dataType == DataTypes.FLOAT) {
       putFloat(rowId, (float) value);
       statsCollector.update((float) value);
+    } else if (dataType == DataTypes.BINARY) {
+      putBytes(rowId, (byte[]) value);
+      statsCollector.update((byte[]) value);
     } else {
       throw new RuntimeException("unsupported data type: " + dataType);
     }
@@ -782,6 +787,8 @@ public abstract class ColumnPage {
         || columnPageEncoderMeta.getColumnSpec().getColumnType() == ColumnType.PLAIN_LONG_VALUE
         || columnPageEncoderMeta.getColumnSpec().getColumnType() == ColumnType.PLAIN_VALUE)) {
       return compressor.compressByte(getComplexParentFlattenedBytePage());
+    } else if (dataType == DataTypes.BINARY) {
+      return getLVFlattenedBytePage();
     } else if (dataType == DataTypes.BYTE_ARRAY) {
       return compressor.compressByte(getLVFlattenedBytePage());
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
index 772916d..d0389d3 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/LazyColumnPage.java
@@ -86,6 +86,8 @@ public class LazyColumnPage extends ColumnPage {
       return converter.decodeDouble(columnPage.getFloat(rowId));
     } else if (dataType == DataTypes.DOUBLE) {
       return columnPage.getDouble(rowId);
+    } else if (dataType == DataTypes.BINARY) {
+      return converter.decodeDouble(columnPage.getByte(rowId));
     } else {
       throw new RuntimeException("internal error: " + this.toString());
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeVarLengthColumnPage.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeVarLengthColumnPage.java
index 4693dba..c23c147 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeVarLengthColumnPage.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/UnsafeVarLengthColumnPage.java
@@ -23,6 +23,7 @@ import org.apache.carbondata.core.datastore.page.encoding.ColumnPageEncoderMeta;
 import org.apache.carbondata.core.memory.CarbonUnsafe;
 import org.apache.carbondata.core.memory.MemoryException;
 import org.apache.carbondata.core.memory.UnsafeMemoryManager;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
 
 /**
  * This extension uses unsafe memory to store page data, for variable length data type (string)
@@ -35,7 +36,11 @@ public class UnsafeVarLengthColumnPage extends VarLengthColumnPageBase {
   UnsafeVarLengthColumnPage(ColumnPageEncoderMeta columnPageEncoderMeta, int pageSize)
       throws MemoryException {
     super(columnPageEncoderMeta, pageSize);
-    capacity = (int) (pageSize * DEFAULT_ROW_SIZE * FACTOR);
+    if (columnPageEncoderMeta.getStoreDataType() == DataTypes.BINARY) {
+      capacity = (int) (pageSize * DEFAULT_BINARY_SIZE * FACTOR);
+    } else {
+      capacity = (int) (pageSize * DEFAULT_ROW_SIZE * FACTOR);
+    }
     memoryBlock = UnsafeMemoryManager.allocateMemoryWithRetry(taskId, (long) (capacity));
     baseAddress = memoryBlock.getBaseObject();
     baseOffset = memoryBlock.getBaseOffset();
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
index 0f409f6..a941880 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/VarLengthColumnPageBase.java
@@ -42,6 +42,7 @@ public abstract class VarLengthColumnPageBase extends ColumnPage {
   static final int longBits = DataTypes.LONG.getSizeBits();
   // default size for each row, grows as needed
   static final int DEFAULT_ROW_SIZE = 8;
+  static final int DEFAULT_BINARY_SIZE = 512;
 
   static final double FACTOR = 1.25;
 
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
index 03a43f8..f04d38a 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/ColumnPageEncoderMeta.java
@@ -158,7 +158,7 @@ public class ColumnPageEncoderMeta extends ValueEncoderMeta implements Writable
         out.writeInt(-1);
         out.writeInt(-1);
       }
-    } else if (dataType == DataTypes.BYTE_ARRAY) {
+    } else if (dataType == DataTypes.BYTE_ARRAY || dataType == DataTypes.BINARY) {
       // for complex type, it will come here, ignoring stats for complex type
       // TODO: support stats for complex type
     } else {
@@ -206,7 +206,7 @@ public class ColumnPageEncoderMeta extends ValueEncoderMeta implements Writable
       in.readInt();
       // precision field is obsoleted. It is stored in the schema data type in columnSpec
       in.readInt();
-    } else if (dataType == DataTypes.BYTE_ARRAY) {
+    } else if (dataType == DataTypes.BYTE_ARRAY || dataType == DataTypes.BINARY) {
       // for complex type, it will come here, ignoring stats for complex type
       // TODO: support stats for complex type
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
index 506e1c7..f2eb92f 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/page/encoding/DefaultEncodingFactory.java
@@ -101,11 +101,10 @@ public class DefaultEncodingFactory extends EncodingFactory {
             dimensionSpec.isInSortColumns() && dimensionSpec.isDoInvertedIndex())
             .createEncoder(null);
       case PLAIN_VALUE:
-        return new HighCardDictDimensionIndexCodec(
-            dimensionSpec.isInSortColumns(),
+        return new HighCardDictDimensionIndexCodec(dimensionSpec.isInSortColumns(),
             dimensionSpec.isInSortColumns() && dimensionSpec.isDoInvertedIndex(),
-            dimensionSpec.getSchemaDataType() == DataTypes.VARCHAR)
-            .createEncoder(null);
+            dimensionSpec.getSchemaDataType() == DataTypes.VARCHAR
+                || dimensionSpec.getSchemaDataType() == DataTypes.BINARY).createEncoder(null);
       default:
         throw new RuntimeException("unsupported dimension type: " +
             dimensionSpec.getColumnType());
@@ -114,9 +113,12 @@ public class DefaultEncodingFactory extends EncodingFactory {
 
   private ColumnPageEncoder createEncoderForMeasureOrNoDictionaryPrimitive(ColumnPage columnPage,
       TableSpec.ColumnSpec columnSpec) {
+
     SimpleStatsResult stats = columnPage.getStatistics();
     DataType dataType = stats.getDataType();
-    if (dataType == DataTypes.BOOLEAN) {
+    if (dataType == DataTypes.BOOLEAN
+        || dataType == DataTypes.BYTE_ARRAY
+        || columnPage.getDataType() == DataTypes.BINARY) {
       return new DirectCompressCodec(columnPage.getDataType()).createEncoder(null);
     } else if (dataType == DataTypes.BYTE ||
         dataType == DataTypes.SHORT ||
@@ -128,8 +130,6 @@ public class DefaultEncodingFactory extends EncodingFactory {
       return createEncoderForDecimalDataTypeMeasure(columnPage, columnSpec);
     } else if (dataType == DataTypes.FLOAT || dataType == DataTypes.DOUBLE) {
       return selectCodecByAlgorithmForFloating(stats, false, columnSpec).createEncoder(null);
-    } else if (dataType == DataTypes.BYTE_ARRAY) {
-      return new DirectCompressCodec(columnPage.getDataType()).createEncoder(null);
     } else {
       throw new RuntimeException("unsupported data type: " + stats.getDataType());
     }
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/row/CarbonRow.java b/core/src/main/java/org/apache/carbondata/core/datastore/row/CarbonRow.java
index 1141707..c140017 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/row/CarbonRow.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/row/CarbonRow.java
@@ -54,7 +54,11 @@ public class CarbonRow implements Serializable {
   }
 
   public String getString(int ordinal) {
-    return (String) data[ordinal];
+    if (null == data[ordinal]) {
+      return null;
+    } else {
+      return String.valueOf(data[ordinal]);
+    }
   }
 
   public Object getObject(int ordinal) {
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
index dca7fa2..d9fa936 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/converter/ThriftWrapperSchemaConverterImpl.java
@@ -152,6 +152,8 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter {
       return org.apache.carbondata.format.DataType.DATE;
     } else if (dataType.getId() == DataTypes.TIMESTAMP.getId()) {
       return org.apache.carbondata.format.DataType.TIMESTAMP;
+    } else if (dataType.getId() == DataTypes.BINARY.getId()) {
+      return org.apache.carbondata.format.DataType.BINARY;
     } else if (DataTypes.isArrayType(dataType)) {
       return org.apache.carbondata.format.DataType.ARRAY;
     } else if (DataTypes.isStructType(dataType)) {
@@ -498,6 +500,8 @@ public class ThriftWrapperSchemaConverterImpl implements SchemaConverter {
         return DataTypes.TIMESTAMP;
       case DATE:
         return DataTypes.DATE;
+      case BINARY:
+        return DataTypes.BINARY;
       case ARRAY:
         return DataTypes.createDefaultArrayType();
       case STRUCT:
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/BinaryType.java b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/BinaryType.java
new file mode 100644
index 0000000..6ecd9db
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/BinaryType.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.core.metadata.datatype;
+
+public class BinaryType extends DataType {
+  static final DataType BINARY =
+      new BinaryType(DataTypes.BINARY_TYPE_ID, 26, "BINARY", -1);
+  private BinaryType(int id, int precedenceOrder, String name, int sizeInBytes) {
+    super(id, precedenceOrder, name, sizeInBytes);
+  }
+  // this function is needed to ensure singleton pattern while supporting java serialization
+  private Object readResolve() {
+    return DataTypes.BINARY;
+  }
+}
\ No newline at end of file
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataType.java b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataType.java
index 8514ccb..4f282e3 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataType.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataType.java
@@ -97,7 +97,7 @@ public class DataType implements Serializable {
       return TIMESTAMP_CHAR;
     } else if (dataType == DataTypes.DATE) {
       return DATE_CHAR;
-    } else if (dataType == DataTypes.BYTE_ARRAY) {
+    } else if (dataType == DataTypes.BYTE_ARRAY || dataType == DataTypes.BINARY) {
       return BYTE_ARRAY_CHAR;
     } else {
       throw new RuntimeException("Unexpected type: " + dataType);
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataTypes.java b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataTypes.java
index d71eea4..c073fa0 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataTypes.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/datatype/DataTypes.java
@@ -37,6 +37,7 @@ public class DataTypes {
   public static final DataType DOUBLE = DoubleType.DOUBLE;
   public static final DataType NULL = NullType.NULL;
   public static final DataType BYTE = ByteType.BYTE;
+  public static final DataType BINARY = BinaryType.BINARY;
 
   // internal use only, for variable length data type
   public static final DataType BYTE_ARRAY = ByteArrayType.BYTE_ARRAY;
@@ -69,6 +70,7 @@ public class DataTypes {
   public static final int STRUCT_TYPE_ID = 12;
   public static final int MAP_TYPE_ID = 13;
   public static final int VARCHAR_TYPE_ID = 18;
+  public static final int BINARY_TYPE_ID = 19;
 
   /**
    * create a DataType instance from uniqueId of the DataType
@@ -102,6 +104,8 @@ public class DataTypes {
       return NULL;
     } else if (id == DECIMAL_TYPE_ID) {
       return createDefaultDecimalType();
+    } else if (id == BINARY.getId()) {
+      return BINARY;
     } else if (id == ARRAY_TYPE_ID) {
       return createDefaultArrayType();
     } else if (id == STRUCT_TYPE_ID) {
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index 53542d5..1d64293 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -186,6 +186,7 @@ public class TableSchemaBuilder {
         field.getDataType() == DataTypes.VARCHAR ||
         field.getDataType() == DataTypes.DATE ||
         field.getDataType() == DataTypes.TIMESTAMP ||
+        field.getDataType() == DataTypes.BINARY ||
         field.getDataType().isComplexType() ||
         (isComplexChild))  {
       newColumn.setDimensionColumn(true);
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/CarbonColumnVectorImpl.java b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/CarbonColumnVectorImpl.java
index 30d2317..18f440a 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/CarbonColumnVectorImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/result/vector/impl/CarbonColumnVectorImpl.java
@@ -92,7 +92,7 @@ public class CarbonColumnVectorImpl implements CarbonColumnVector {
     } else if (dataType instanceof DecimalType) {
       decimals = new BigDecimal[batchSize];
     } else if (dataType == DataTypes.STRING || dataType == DataTypes.BYTE_ARRAY
-        || dataType == DataTypes.VARCHAR) {
+        || dataType == DataTypes.VARCHAR || dataType == DataTypes.BINARY) {
       dictionaryVector = new CarbonColumnVectorImpl(batchSize, DataTypes.INT);
       bytes = new byte[batchSize][];
     } else {
@@ -233,7 +233,7 @@ public class CarbonColumnVectorImpl implements CarbonColumnVector {
     } else if (dataType instanceof DecimalType) {
       return decimals[rowId];
     } else if (dataType == DataTypes.STRING || dataType == DataTypes.BYTE_ARRAY
-        || dataType == DataTypes.VARCHAR) {
+        || dataType == DataTypes.VARCHAR || dataType == DataTypes.BINARY) {
       if (null != carbonDictionary) {
         int dictKey = (Integer) dictionaryVector.getData(rowId);
         return carbonDictionary.getDictionaryValue(dictKey);
@@ -295,7 +295,7 @@ public class CarbonColumnVectorImpl implements CarbonColumnVector {
     } else if (dataType instanceof DecimalType) {
       Arrays.fill(decimals, null);
     } else if (dataType == DataTypes.STRING || dataType == DataTypes.BYTE_ARRAY
-        || dataType == DataTypes.VARCHAR) {
+        || dataType == DataTypes.VARCHAR || dataType == DataTypes.BINARY) {
       Arrays.fill(bytes, null);
       this.dictionaryVector.reset();
     } else {
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
index a4af9cc..d8e4499 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java
@@ -2177,7 +2177,11 @@ public final class CarbonUtil {
         return DataTypes.FLOAT;
       case BYTE:
         return DataTypes.BYTE;
+      case BINARY:
+        return DataTypes.BINARY;
       default:
+        LOGGER.warn(String.format("Cannot match the data type, using default String data type: %s",
+            DataTypes.STRING.getName()));
         return DataTypes.STRING;
     }
   }
@@ -2382,9 +2386,8 @@ public final class CarbonUtil {
       return b.array();
     } else if (DataTypes.isDecimal(dataType)) {
       return DataTypeUtil.bigDecimalToByte((BigDecimal) value);
-    } else if (dataType == DataTypes.BYTE_ARRAY) {
-      return (byte[]) value;
-    } else if (dataType == DataTypes.STRING
+    } else if (dataType == DataTypes.BYTE_ARRAY || dataType == DataTypes.BINARY
+        || dataType == DataTypes.STRING
         || dataType == DataTypes.DATE
         || dataType == DataTypes.VARCHAR) {
       return (byte[]) value;
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
index 303cc80..7129f34 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataTypeUtil.java
@@ -476,7 +476,9 @@ public final class DataTypeUtil {
     } else if (actualDataType == DataTypes.LONG) {
       return ByteUtil.toXorBytes((Long) dimensionValue);
     } else if (actualDataType == DataTypes.TIMESTAMP) {
-      return ByteUtil.toXorBytes((Long)dimensionValue);
+      return ByteUtil.toXorBytes((Long) dimensionValue);
+    } else if (actualDataType == DataTypes.BINARY) {
+      return (byte[]) dimensionValue;
     } else {
       // Default action for String/Varchar
       return ByteUtil.toBytes(dimensionValue.toString());
@@ -603,6 +605,11 @@ public final class DataTypeUtil {
           return null;
         }
         return getDataTypeConverter().convertFromBigDecimalToDecimal(byteToBigDecimal(dataInBytes));
+      } else if (actualDataType == DataTypes.BINARY) {
+        if (isEmptyByteArray(dataInBytes)) {
+          return null;
+        }
+        return dataInBytes;
       } else {
         // Default action for String/Varchar
         return getDataTypeConverter().convertFromByteToUTF8String(dataInBytes);
@@ -1057,6 +1064,8 @@ public final class DataTypeUtil {
       return DataTypes.BYTE_ARRAY;
     } else if (DataTypes.BYTE_ARRAY.getName().equalsIgnoreCase(dataType.getName())) {
       return DataTypes.BYTE_ARRAY;
+    } else if (DataTypes.BINARY.getName().equalsIgnoreCase(dataType.getName())) {
+      return DataTypes.BINARY;
     } else if (dataType.getName().equalsIgnoreCase("decimal")) {
       return DataTypes.createDecimalType(precision, scale);
     } else if (dataType.getName().equalsIgnoreCase("array")) {
diff --git a/docs/sdk-guide.md b/docs/sdk-guide.md
index e040e64..002a06b 100644
--- a/docs/sdk-guide.md
+++ b/docs/sdk-guide.md
@@ -195,6 +195,7 @@ Each of SQL data types and Avro Data Types are mapped into data types of SDK. Fo
 | BIGINT | LONG | DataTypes.LONG |
 | DOUBLE | DOUBLE | DataTypes.DOUBLE |
 | VARCHAR |  -  | DataTypes.STRING |
+| BINARY |  -  | DataTypes.BINARY |
 | FLOAT | FLOAT | DataTypes.FLOAT |
 | BYTE |  -  | DataTypes.BYTE |
 | DATE | DATE | DataTypes.DATE |
diff --git a/docs/supported-data-types-in-carbondata.md b/docs/supported-data-types-in-carbondata.md
index daf1acf..4960453 100644
--- a/docs/supported-data-types-in-carbondata.md
+++ b/docs/supported-data-types-in-carbondata.md
@@ -51,4 +51,4 @@
 
   * Other Types
     * BOOLEAN
-
+    * BINARY
diff --git a/format/src/main/thrift/schema.thrift b/format/src/main/thrift/schema.thrift
index d39e548..5daf767 100644
--- a/format/src/main/thrift/schema.thrift
+++ b/format/src/main/thrift/schema.thrift
@@ -39,6 +39,7 @@ enum DataType {
 	MAP = 23,
 	FLOAT = 24,
 	BYTE = 25
+	BINARY = 26,
 }
 
 /**
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonVectorizedRecordReader.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonVectorizedRecordReader.java
index 1c11275..99db9d3 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonVectorizedRecordReader.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/util/CarbonVectorizedRecordReader.java
@@ -147,7 +147,8 @@ public class CarbonVectorizedRecordReader extends AbstractRecordReader<Object> {
         DataType dataType = msr.getMeasure().getDataType();
         if (dataType == DataTypes.BOOLEAN || dataType == DataTypes.SHORT
             || dataType == DataTypes.INT || dataType == DataTypes.LONG
-            || dataType == DataTypes.FLOAT || dataType == DataTypes.BYTE) {
+            || dataType == DataTypes.FLOAT || dataType == DataTypes.BYTE
+            || dataType == DataTypes.BINARY) {
           fields[msr.getOrdinal()] =
               new StructField(msr.getColumnName(), msr.getMeasure().getDataType());
         } else if (DataTypes.isDecimal(dataType)) {
diff --git a/integration/spark-common-test/pom.xml b/integration/spark-common-test/pom.xml
index 39996fc..de3eb44 100644
--- a/integration/spark-common-test/pom.xml
+++ b/integration/spark-common-test/pom.xml
@@ -163,7 +163,6 @@
   </dependencies>
 
   <build>
-    <testSourceDirectory>src/test/scala</testSourceDirectory>
     <resources>
       <resource>
         <directory>src/resources</directory>
diff --git a/integration/spark-common-test/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java b/integration/spark-common-test/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
new file mode 100644
index 0000000..9144e4c
--- /dev/null
+++ b/integration/spark-common-test/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.sdk.util;
+
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.sdk.file.CarbonWriter;
+import org.apache.carbondata.sdk.file.Field;
+import org.apache.carbondata.sdk.file.Schema;
+
+import java.io.*;
+
+import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
+
+public class BinaryUtil {
+  public static void binaryToCarbon(String sourceImageFolder, String outputPath,
+                                    String sufAnnotation, final String sufImage) throws Exception {
+    Field[] fields = new Field[5];
+    fields[0] = new Field("binaryId", DataTypes.INT);
+    fields[1] = new Field("binaryName", DataTypes.STRING);
+    fields[2] = new Field("binary", DataTypes.BINARY);
+    fields[3] = new Field("labelName", DataTypes.STRING);
+    fields[4] = new Field("labelContent", DataTypes.STRING);
+    CarbonWriter writer = CarbonWriter
+        .builder()
+        .outputPath(outputPath)
+        .withCsvInput(new Schema(fields))
+        .withBlockSize(256)
+        .writtenBy("binaryExample")
+        .withPageSizeInMb(1)
+        .build();
+    binaryToCarbon(sourceImageFolder, writer, sufAnnotation, sufImage);
+  }
+
+  public static boolean binaryToCarbon(String sourceImageFolder, CarbonWriter writer,
+      String sufAnnotation, final String sufImage) throws Exception {
+    int num = 1;
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+
+      Object[] files = listFiles(sourceImageFolder, sufImage).toArray();
+
+      if (null != files) {
+        for (int i = 0; i < files.length; i++) {
+          // read image and encode to Hex
+          BufferedInputStream bis = new BufferedInputStream(
+              new FileInputStream(new File((String) files[i])));
+          originBinary = new byte[bis.available()];
+          while ((bis.read(originBinary)) != -1) {
+          }
+
+          String labelFileName = ((String) files[i]).split(sufImage)[0] + sufAnnotation;
+          BufferedInputStream txtBis = new BufferedInputStream(new FileInputStream(labelFileName));
+          String labelValue = null;
+          byte[] labelBinary = null;
+          labelBinary = new byte[txtBis.available()];
+          while ((txtBis.read(labelBinary)) != -1) {
+            labelValue = new String(labelBinary, "UTF-8");
+          }
+          // write data
+          writer.write(new Object[]{i, (String) files[i], originBinary,
+              labelFileName, labelValue});
+          bis.close();
+          txtBis.close();
+        }
+      }
+      writer.close();
+    }
+    return true;
+  }
+}
diff --git a/integration/spark-common-test/src/test/resources/binaryStringNullData.csv b/integration/spark-common-test/src/test/resources/binaryStringNullData.csv
new file mode 100644
index 0000000..8a7b595
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/binaryStringNullData.csv
@@ -0,0 +1,4 @@
+2|false|2.png|history|true
+3|false|3.png|biology|false
+3|false|3.png||false
+1|true|1.png|education|true
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/resources/binarydata.csv b/integration/spark-common-test/src/test/resources/binarydata.csv
new file mode 100644
index 0000000..ed642c2
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/binarydata.csv
@@ -0,0 +1,3 @@
+2,false,2.png,89504e470d0a1a0a0000000d49484452000002f5000000cf0806000000753c2e6f0000000473424954080808087c0864880000001974455874536f66747761726500676e6f6d652d73637265656e73686f74ef03bf3e0000200049444154789cecbdf99324b9b1e7f7012222efccbafb9ae9e9991e72488ad2d3aecc642bd3ff6f32993dadd993f62d8fe190ddd357dd5995779c807e001081888caaae6ecee370a4f4b6e8cc8a8cc0e170b87f01381c426badd9d18e76b4a31ded68473bdad18e76f48b25f9731760473bdad18e76b4a31ded68473bdad1df473b50bfa31ded68473bdad18e76b4a31dfdc26907ea [...]
+3,false,3.png,89504e470d0a1a0a0000000d4948445200000136000000a108060000004da893e60000000473424954080808087c0864880000001974455874536f66747761726500676e6f6d652d73637265656e73686f74ef03bf3e0000200049444154789cecbd77741dd77deffbd97b4ec54107480004c02a92a24452222552dda6e4a83ab665d9b22dc5762c3bb16f927b7373535ecacabd6bbd756f9297b7d6bd491c3b2f4e6c15ab595631d53b295194a8c2de3b09a277e000a7ceccdeef8f293800d109928875be5ecba0ce99d9b3f79cd9dff9f59fd05a6bf2c8238f3c7e8d202ff604f2c8238f3c661a7962cb238fff88 [...]
+1,true,1.png,89504e470d0a1a0a0000000d494844520000014a0000005008060000007f133c4c0000000473424954080808087c0864880000001974455874536f66747761726500676e6f6d652d73637265656e73686f74ef03bf3e0000200049444154789cedbd596c1cf79deffbadaaaeded8dd642fdcf755a24449a62c538b25d9892d8f6cc571ec646e723088279933987b33b81860705fe6615e2fce794b70e6006770700607671e06492e66e2716225926ccb32b5d35a487197c47de97defaebdea3e7457a99b5d4db229ca9273ea031816bb96aeaaaefad5ffff5bbe3f626d6d4d818181818141590896650d4369606060b [...]
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/resources/binarystringdata.csv b/integration/spark-common-test/src/test/resources/binarystringdata.csv
new file mode 100644
index 0000000..02121ca
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/binarystringdata.csv
@@ -0,0 +1,3 @@
+2|false|2.png|history|true
+3|false|3.png|biology|false
+1|true|1.png|education|true
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/resources/binarystringdata2.csv b/integration/spark-common-test/src/test/resources/binarystringdata2.csv
new file mode 100644
index 0000000..f3ea934
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/binarystringdata2.csv
@@ -0,0 +1,3 @@
+2|false|2.png|abc|true
+3|false|3.png|binary|false
+1|true|1.png|^Ayard duty^B|true
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json b/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
index 6d81ec7..d61ff14 100644
--- a/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
+++ b/integration/spark-common-test/src/test/resources/jsonFiles/data/allPrimitiveType.json
@@ -7,5 +7,6 @@
 	"boolField": false,
 	"dateField": "2019-03-02",
 	"timeField": "2019-02-12 03:03:34",
-	"decimalField" : 55.35
+	"decimalField" : 55.35,
+	"binaryField" : "abc"
 }
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
new file mode 100644
index 0000000..b2bda24
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/binary/TestBinaryDataType.scala
@@ -0,0 +1,1153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.integration.spark.testsuite.binary
+
+import java.util.Arrays
+
+import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.metadata.CarbonMetadata
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
+import org.apache.carbondata.core.util.CarbonProperties
+
+import org.apache.commons.codec.binary.Hex
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.test.util.QueryTest
+import org.apache.spark.util.SparkUtil
+import org.scalatest.BeforeAndAfterAll
+
+/**
+  * Test cases for testing binary
+  */
+class TestBinaryDataType extends QueryTest with BeforeAndAfterAll {
+    override def beforeAll {
+    }
+
+    test("Create table and load data with binary column") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('SORT_COLUMNS'='')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+
+        val result = sql("desc formatted binaryTable").collect()
+        var flag = false
+        result.foreach { each =>
+            if ("binary".equals(each.get(1))) {
+                flag = true
+            }
+        }
+        assert(flag)
+
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable"), Seq(Row(3)))
+        try {
+            val df = sql("SELECT * FROM binaryTable").collect()
+            assert(3 == df.length)
+            df.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+                val bytes40 = each.getAs[Array[Byte]](3).slice(0, 40)
+                val binaryName = each(2).toString
+                val expectedBytes = Hex.encodeHex(firstBytes20.get(binaryName).get)
+                assert(Arrays.equals(String.valueOf(expectedBytes).getBytes(), bytes40), "incorrect numeric value for flattened binaryField")
+
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+
+                val df = sql("SELECT name,binaryField FROM binaryTable").collect()
+                assert(3 == df.length)
+                df.foreach { each =>
+                    assert(2 == each.length)
+                    val binaryName = each(0).toString
+                    val bytes40 = each.getAs[Array[Byte]](1).slice(0, 40)
+                    val expectedBytes = Hex.encodeHex(firstBytes20.get(binaryName).get)
+                    assert(Arrays.equals(String.valueOf(expectedBytes).getBytes(), bytes40), "incorrect numeric value for flattened binaryField")
+                }
+            }
+        } catch {
+            case e: Exception =>
+                e.printStackTrace()
+                assert(false)
+        }
+    }
+
+    private val firstBytes20 = Map("1.png" -> Array[Byte](-119, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 1, 74),
+        "2.png" -> Array[Byte](-119, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 2, -11),
+        "3.png" -> Array[Byte](-119, 80, 78, 71, 13, 10, 26, 10, 0, 0, 0, 13, 73, 72, 68, 82, 0, 0, 1, 54)
+    )
+
+    test("Don't support sort_columns") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[Exception] {
+            sql(
+                s"""
+                   | CREATE TABLE IF NOT EXISTS binaryTable (
+                   |    id double,
+                   |    label boolean,
+                   |    name STRING,
+                   |    binaryField BINARY,
+                   |    autoLabel boolean)
+                   | STORED BY 'carbondata'
+                   | TBLPROPERTIES('SORT_COLUMNS'='binaryField')
+             """.stripMargin)
+        }
+        assert(exception.getMessage.contains("sort_columns is unsupported for binary datatype column"))
+    }
+
+    test("Unsupport LOCAL_DICTIONARY_INCLUDE for binary") {
+
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[MalformedCarbonCommandException] {
+            sql(
+                """
+                  | CREATE TABLE binaryTable(
+                  |     id int,
+                  |     name string,
+                  |     city string,
+                  |     age int,
+                  |     binaryField binary)
+                  | STORED BY 'org.apache.carbondata.format'
+                  | tblproperties('local_dictionary_enable'='true','local_dictionary_include'='binaryField')
+                """.stripMargin)
+        }
+        assert(exception.getMessage.contains(
+            "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: binaryfield is not a string/complex/varchar datatype column. " +
+                    "LOCAL_DICTIONARY_COLUMN should be no dictionary string/complex/varchar datatype column"))
+    }
+
+    test("Supports LOCAL_DICTIONARY_EXCLUDE for binary") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            """
+              | CREATE TABLE binaryTable(
+              |     id int,
+              |     name string,
+              |     city string,
+              |     age int,
+              |     binaryField binary)
+              | STORED BY 'org.apache.carbondata.format'
+              | tblproperties('local_dictionary_enable'='true','LOCAL_DICTIONARY_EXCLUDE'='binaryField')
+            """.stripMargin)
+        assert(true)
+    }
+
+    test("Unsupport inverted_index for binary") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[MalformedCarbonCommandException] {
+            sql(
+                """
+                  | CREATE TABLE binaryTable(
+                  |     id int,
+                  |     name string,
+                  |     city string,
+                  |     age int,
+                  |     binaryField binary)
+                  | STORED BY 'org.apache.carbondata.format'
+                  | tblproperties('inverted_index'='binaryField')
+                """.stripMargin)
+        }
+        assert(exception.getMessage.contains("INVERTED_INDEX column: binaryfield should be present in SORT_COLUMNS"))
+    }
+
+    test("Unsupport inverted_index and sort_columns for binary") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[MalformedCarbonCommandException] {
+            sql(
+                """
+                  | CREATE TABLE binaryTable(
+                  |     id int,
+                  |     name string,
+                  |     city string,
+                  |     age int,
+                  |     binaryField binary)
+                  | STORED BY 'org.apache.carbondata.format'
+                  | tblproperties('inverted_index'='binaryField','SORT_COLUMNS'='binaryField')
+                """.stripMargin)
+        }
+        assert(exception.getMessage.contains("sort_columns is unsupported for binary datatype column: binaryfield"))
+    }
+
+    test("COLUMN_META_CACHE doesn't support binary") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[Exception] {
+            sql(
+                s"""
+                   | CREATE TABLE IF NOT EXISTS binaryTable (
+                   |    id INT,
+                   |    label boolean,
+                   |    name STRING,
+                   |    binaryField BINARY,
+                   |    autoLabel boolean)
+                   | STORED BY 'carbondata'
+                   | TBLPROPERTIES('COLUMN_META_CACHE'='binaryField')
+             """.stripMargin)
+        }
+        assert(exception.getMessage.contains("binaryfield is a binary data type column and binary data type is not allowed for the option"))
+    }
+
+    test("RANGE_COLUMN doesn't support binary") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[Exception] {
+            sql(
+                s"""
+                   | CREATE TABLE IF NOT EXISTS binaryTable (
+                   |    id INT,
+                   |    label boolean,
+                   |    name STRING,
+                   |    binaryField BINARY,
+                   |    autoLabel boolean)
+                   | STORED BY 'carbondata'
+                   | TBLPROPERTIES('RANGE_COLUMN'='binaryField')
+             """.stripMargin)
+        }
+        assert(exception.getMessage.contains("RANGE_COLUMN doesn't support binary data type"))
+    }
+
+    test("Test carbon.column.compressor=zstd") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id INT,
+               |    label boolean,
+               |    name STRING,
+               |    binaryField BINARY,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('carbon.column.compressor'='zstd')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable"), Seq(Row(3)))
+        val value = sql("SELECT * FROM binaryTable").collect()
+        value.foreach { each =>
+            assert(5 == each.length)
+            assert(1 == each.getAs[Int](0) || 2 == each.getAs[Int](0) || 3 == each.getAs[Int](0))
+            assert(".png".equals(each.getAs(2).toString.substring(1, 5)))
+            assert("89504e470d0a1a0a0000000d4948445200000".equals(new String(each.getAs[Array[Byte]](3).slice(0, 37))))
+        }
+    }
+
+    test("Test carbon.column.compressor=gzip") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id INT,
+               |    label boolean,
+               |    name STRING,
+               |    binaryField BINARY,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('carbon.column.compressor'='gzip')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable"), Seq(Row(3)))
+        val value = sql("SELECT * FROM binaryTable").collect()
+        value.foreach { each =>
+            assert(5 == each.length)
+            assert(1 == each.getAs[Int](0) || 2 == each.getAs[Int](0) || 3 == each.getAs[Int](0))
+            assert(".png".equals(each.getAs(2).toString.substring(1, 5)))
+            assert("89504e470d0a1a0a0000000d4948445200000".equals(new String(each.getAs[Array[Byte]](3).slice(0, 37))))
+        }
+    }
+
+    test("Test carbon.column.compressor=snappy") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id INT,
+               |    label boolean,
+               |    name STRING,
+               |    binaryField BINARY,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('carbon.column.compressor'='snappy')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable"), Seq(Row(3)))
+        val value = sql("SELECT * FROM binaryTable").collect()
+        value.foreach { each =>
+            assert(5 == each.length)
+            assert(1 == each.getAs[Int](0) || 2 == each.getAs[Int](0) || 3 == each.getAs[Int](0))
+            assert(".png".equals(each.getAs(2).toString.substring(1, 5)))
+            assert("89504e470d0a1a0a0000000d4948445200000".equals(new String(each.getAs[Array[Byte]](3).slice(0, 37))))
+        }
+    }
+
+    test("Support filter other column in binary table") {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id INT,
+               |    label boolean,
+               |    name STRING,
+               |    binaryField BINARY,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('carbon.column.compressor'='zstd')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable where id =1"), Seq(Row(1)))
+
+
+        sql("insert into binaryTable values(1,true,'Bob','hello',false)")
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryTable where binaryField =cast('hello' as binary)"), Seq(Row(1)))
+    }
+
+    test("Test create table with buckets unsafe") {
+        CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT, "true")
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS binaryTable (
+               |    id INT,
+               |    label boolean,
+               |    name STRING,
+               |    binaryField BINARY,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+               | TBLPROPERTIES('BUCKETNUMBER'='4', 'BUCKETCOLUMNS'='binaryField')
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarydata.csv'
+               | INTO TABLE binaryTable
+               | OPTIONS('header'='false')
+             """.stripMargin)
+
+        CarbonProperties.getInstance().addProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT, "false")
+        val table: CarbonTable = CarbonMetadata.getInstance().getCarbonTable("default", "binaryTable")
+        if (table != null && table.getBucketingInfo("binarytable") != null) {
+            assert(true)
+        } else {
+            assert(false, "Bucketing info does not exist")
+        }
+    }
+
+    test("insert into for hive and carbon") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS hiveTable2")
+        sql("DROP TABLE IF EXISTS carbontable2")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by ','
+             """.stripMargin)
+        sql("insert into hivetable values(1,true,'Bob','binary',false)")
+        sql("insert into hivetable values(2,false,'Xu','test',true)")
+        sql("insert into hivetable select 2,false,'Xu',cast('carbon' as binary),true")
+        val hiveResult = sql("SELECT * FROM hivetable")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+        sql("insert into carbontable select 2,false,'Xu',cast('carbon' as binary),true")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        checkAnswer(hiveResult, carbonResult)
+
+        sql("CREATE TABLE hivetable2 AS SELECT * FROM carbontable")
+        sql("CREATE TABLE carbontable2 AS SELECT * FROM hivetable")
+        val carbonResult2 = sql("SELECT * FROM carbontable2")
+        val hiveResult2 = sql("SELECT * FROM hivetable2")
+        checkAnswer(hiveResult2, carbonResult2)
+        checkAnswer(carbonResult, carbonResult2)
+        checkAnswer(hiveResult, hiveResult2)
+        assert(3 == carbonResult2.collect().length)
+        assert(3 == hiveResult2.collect().length)
+
+        sql("INSERT INTO hivetable2 SELECT * FROM carbontable")
+        sql("INSERT INTO carbontable2 SELECT * FROM hivetable")
+        val carbonResult3 = sql("SELECT * FROM carbontable2")
+        val hiveResult3 = sql("SELECT * FROM hivetable2")
+        checkAnswer(carbonResult3, hiveResult3)
+        assert(6 == carbonResult3.collect().length)
+        assert(6 == hiveResult3.collect().length)
+    }
+
+    test("Support filter for hive and carbon") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by ','
+             """.stripMargin)
+        sql("insert into hivetable values(1,true,'Bob','binary',false)")
+        sql("insert into hivetable values(2,false,'Xu','test',true)")
+        val hiveResult = sql("SELECT * FROM hivetable where binaryField=cast('binary' as binary)")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+        val carbonResult = sql("SELECT * FROM carbontable where binaryField=cast('binary' as binary)")
+        checkAnswer(hiveResult, carbonResult)
+        assert(1 == carbonResult.collect().length)
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        // filter with non string
+        val exception = intercept[Exception] {
+            sql("SELECT * FROM carbontable where binaryField=binary").collect()
+        }
+        assert(exception.getMessage.contains("cannot resolve '`binary`' given input columns"))
+
+        // filter with not equal
+        val hiveResult3 = sql("SELECT * FROM hivetable where binaryField!=cast('binary' as binary)")
+        val carbonResult3 = sql("SELECT * FROM carbontable where binaryField!=cast('binary' as binary)")
+        checkAnswer(hiveResult3, carbonResult3)
+        assert(1 == carbonResult3.collect().length)
+        carbonResult3.collect().foreach { each =>
+            assert(2 == each.get(0))
+            assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+
+        // filter with in
+        val hiveResult4 = sql("SELECT * FROM hivetable where binaryField in (cast('binary' as binary))")
+        val carbonResult4 = sql("SELECT * FROM carbontable where binaryField in (cast('binary' as binary))")
+        checkAnswer(hiveResult4, carbonResult4)
+        assert(1 == carbonResult4.collect().length)
+        carbonResult4.collect().foreach { each =>
+            assert(1 == each.get(0))
+            assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+
+        // filter with not in
+        val hiveResult5 = sql("SELECT * FROM hivetable where binaryField not in (cast('binary' as binary))")
+        val carbonResult5 = sql("SELECT * FROM carbontable where binaryField not in (cast('binary' as binary))")
+        checkAnswer(hiveResult5, carbonResult5)
+        assert(1 == carbonResult5.collect().length)
+        carbonResult5.collect().foreach { each =>
+            assert(2 == each.get(0))
+            assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+    }
+
+    test("Support update and delete ") {
+        sql("DROP TABLE IF EXISTS carbontable")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+        var carbonResult = sql("SELECT * FROM carbontable where binaryField=cast('binary' as binary)")
+        assert(1 == carbonResult.collect().length)
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        // Update for binary in carbon
+        sql("UPDATE carbontable SET (name) = ('David') WHERE id = 1").show()
+        sql("UPDATE carbontable SET (binaryField) = ('carbon2') WHERE id = 1").show()
+
+        carbonResult = sql("SELECT * FROM carbontable where binaryField=cast('binary' as binary)")
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("carbon2".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        // test cast string to binary, binary to string
+        val stringValue = sql("SELECT cast(binaryField as string) FROM carbontable WHERE id = 1").collect()
+        stringValue.foreach { each =>
+            assert("carbon2".equals(each.getAs(0)))
+        }
+        val binaryValue = sql("SELECT cast(name as binary) FROM carbontable WHERE id = 1").collect()
+        binaryValue.foreach { each =>
+            assert("David".equals(new String(each.getAs[Array[Byte]](0))))
+        }
+
+        // Test delete
+        sql("DELETE FROM carbontable WHERE id = 2").show()
+
+        carbonResult = sql("SELECT * FROM carbontable where binaryField=cast('binary' as binary)")
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("carbon2".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+    }
+
+    test("Create table and load data with binary column for hive and carbon, CTAS and insert int hive table select from carbon table") {
+        sql("DROP TABLE IF EXISTS hivetable")
+        sql("DROP TABLE IF EXISTS hivetable2")
+        sql("DROP TABLE IF EXISTS hivetable3")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by '|'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+               | INTO TABLE hivetable
+             """.stripMargin)
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+               | INTO TABLE carbontable
+               | OPTIONS('header'='false','DELIMITER'='|','bad_records_action'='fail')
+             """.stripMargin)
+
+        val hiveResult = sql("SELECT * FROM hivetable")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        checkAnswer(hiveResult, carbonResult)
+        checkAnswer(sql("SELECT COUNT(*) FROM hivetable"), Seq(Row(3)))
+        try {
+            val carbonDF = carbonResult.collect()
+            assert(3 == carbonDF.length)
+            carbonDF.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value)
+                        || "\u0001education\u0002".equals(value) || "".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+
+            val df = hiveResult.collect()
+            assert(3 == df.length)
+            df.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value)
+                        || "\u0001education\u0002".equals(value) || "".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+
+            sql(
+                s"""
+                   | CREATE TABLE IF NOT EXISTS hivetable2 (
+                   |    id int,
+                   |    label boolean,
+                   |    name string,
+                   |    binaryField binary,
+                   |    autoLabel boolean)
+                   | row format delimited fields terminated by '|'
+             """.stripMargin)
+            sql("insert into hivetable2 select * from carbontable")
+            sql("create table hivetable3 as select * from carbontable")
+            val hiveResult2 = sql("SELECT * FROM hivetable2")
+            val hiveResult3 = sql("SELECT * FROM hivetable3")
+            checkAnswer(hiveResult, hiveResult2)
+            checkAnswer(hiveResult2, hiveResult3)
+        } catch {
+            case e: Exception =>
+                e.printStackTrace()
+                assert(false)
+        }
+    }
+
+    // TODO
+    ignore("Create table and load data with binary column for hive and carbon, CTAS and insert int hive table select from carbon table, for null value") {
+        sql("DROP TABLE IF EXISTS hivetable")
+        sql("DROP TABLE IF EXISTS hivetable2")
+        sql("DROP TABLE IF EXISTS hivetable3")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by '|'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binaryStringNullData.csv'
+               | INTO TABLE hivetable
+             """.stripMargin)
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binaryStringNullData.csv'
+               | INTO TABLE carbontable
+               | OPTIONS('header'='false','DELIMITER'='|','bad_records_action'='fail')
+             """.stripMargin)
+
+        val hiveResult = sql("SELECT * FROM hivetable")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        checkAnswer(hiveResult, carbonResult)
+        checkAnswer(sql("SELECT COUNT(*) FROM hivetable"), Seq(Row(4)))
+        try {
+            val carbonDF = carbonResult.collect()
+            assert(4 == carbonDF.length)
+            carbonDF.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value)
+                        || "\u0001education\u0002".equals(value) || "".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+
+            val df = hiveResult.collect()
+            assert(4 == df.length)
+            df.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value)
+                        || "\u0001education\u0002".equals(value) || "".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+
+            sql(
+                s"""
+                   | CREATE TABLE IF NOT EXISTS hivetable2 (
+                   |    id int,
+                   |    label boolean,
+                   |    name string,
+                   |    binaryField binary,
+                   |    autoLabel boolean)
+                   | row format delimited fields terminated by '|'
+             """.stripMargin)
+            sql("insert into hivetable2 select * from carbontable")
+            sql("create table hivetable3 as select * from carbontable")
+            val hiveResult2 = sql("SELECT * FROM hivetable2")
+            val hiveResult3 = sql("SELECT * FROM hivetable3")
+            checkAnswer(hiveResult, hiveResult2)
+            checkAnswer(hiveResult2, hiveResult3)
+        } catch {
+            case e: Exception =>
+                e.printStackTrace()
+                assert(false)
+        }
+    }
+
+    test("insert into carbon as select from hive after hive load data") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS hiveTable2")
+        sql("DROP TABLE IF EXISTS carbontable2")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by '|'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+               | INTO TABLE hivetable
+             """.stripMargin)
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql("insert into carbontable select * from hivetable")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        val hiveResult = sql("SELECT * FROM hivetable")
+
+        assert(3 == carbonResult.collect().length)
+        assert(3 == hiveResult.collect().length)
+        checkAnswer(hiveResult, carbonResult)
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert("\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        sql("CREATE TABLE hivetable2 AS SELECT * FROM carbontable")
+        sql("CREATE TABLE carbontable2 STORED BY 'carbondata' AS SELECT * FROM hivetable")
+        val carbonResult2 = sql("SELECT * FROM carbontable2")
+        val hiveResult2 = sql("SELECT * FROM hivetable2")
+        checkAnswer(hiveResult2, carbonResult2)
+        checkAnswer(carbonResult, carbonResult2)
+        checkAnswer(hiveResult, hiveResult2)
+        assert(3 == carbonResult2.collect().length)
+        assert(3 == hiveResult2.collect().length)
+
+        sql("INSERT INTO hivetable2 SELECT * FROM carbontable")
+        sql("INSERT INTO carbontable2 SELECT * FROM hivetable")
+        val carbonResult3 = sql("SELECT * FROM carbontable2")
+        val hiveResult3 = sql("SELECT * FROM hivetable2")
+        checkAnswer(carbonResult3, hiveResult3)
+        assert(6 == carbonResult3.collect().length)
+        assert(6 == hiveResult3.collect().length)
+    }
+
+    test("compaction for binary") {
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "false")
+                .addProperty(CarbonCommonConstants.COMPACTION_SEGMENT_LEVEL_THRESHOLD,
+                    CarbonCommonConstants.DEFAULT_SEGMENT_LEVEL_THRESHOLD)
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        for (i <- 0 until (3)) {
+            sql(
+                s"""
+                   | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+                   | INTO TABLE carbontable
+                   | OPTIONS('header'='false','DELIMITER'='|')
+             """.stripMargin)
+        }
+        // 3 segments, no compaction
+        var segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        var SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(!SegmentSequenceIds.contains("0.1"))
+        assert(SegmentSequenceIds.length == 3)
+        for (i <- 0 until (3)) {
+            sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        }
+
+        // without auto compaction will not compact
+        segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(!SegmentSequenceIds.contains("0.1"))
+        assert(SegmentSequenceIds.length == 6)
+
+        // minor compaction
+        sql("alter table carbontable compact 'MINOR'")
+        segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(SegmentSequenceIds.contains("0.1"))
+        assert(!SegmentSequenceIds.contains("0.2"))
+        assert(SegmentSequenceIds.length == 7)
+
+        // major compaction
+        sql("alter table carbontable compact 'major'")
+        segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(SegmentSequenceIds.contains("0.2"))
+        assert(SegmentSequenceIds.contains("0.1"))
+        assert(SegmentSequenceIds.length == 8)
+
+        // clean files
+        segments = sql("CLEAN FILES FOR TABLE  carbontable")
+        segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(SegmentSequenceIds.contains("0.2"))
+        assert(!SegmentSequenceIds.contains("0.1"))
+        assert(SegmentSequenceIds.length == 1)
+
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE, "true")
+        for (i <- 0 until (4)) {
+            sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        }
+        // auto compaction
+        segments = sql("SHOW SEGMENTS FOR TABLE carbontable")
+        SegmentSequenceIds = segments.collect().map { each => (each.toSeq) (0) }
+        assert(SegmentSequenceIds.contains("6.1"))
+        assert(!SegmentSequenceIds.contains("0.1"))
+        assert(SegmentSequenceIds.contains("0.2"))
+        assert(SegmentSequenceIds.length == 6)
+
+        // check the data
+        val carbonResult = sql("SELECT * FROM carbontable")
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert("\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.ENABLE_AUTO_LOAD_MERGE,
+                    CarbonCommonConstants.DEFAULT_ENABLE_AUTO_LOAD_MERGE)
+    }
+
+    test("alter table for binary") {
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS binarytable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+
+
+        sql("insert into carbontable values(1,true,'Bob')")
+
+        sql(
+            s"""
+               | alter table carbontable add columns (
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | TBLPROPERTIES('DEFAULT.VALUE.binaryField'='binary','DEFAULT.VALUE.autoLabel'='true')
+            """.stripMargin)
+
+        var carbonResult = sql("SELECT * FROM carbontable")
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+               | INTO TABLE carbontable
+               | OPTIONS('header'='false','DELIMITER'='|')
+             """.stripMargin)
+
+
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+
+        carbonResult = sql("SELECT * FROM carbontable")
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert("\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        var result = sql("show tables")
+        result.collect().foreach { each =>
+            assert(!"binarytable".equalsIgnoreCase(each.getAs[String](1)))
+        }
+
+        // rename
+        sql(
+            s"""
+               | alter table carbontable RENAME TO binarytable
+            """.stripMargin)
+        result = sql("show tables")
+        assert(result.collect().exists { each =>
+            "binarytable".equalsIgnoreCase(each.getAs[String](1))
+        })
+
+        // add columns after rename
+        sql(
+            s"""
+               | alter table binarytable add columns (
+               |    binaryField2 binary,
+               |    autoLabel2 boolean)
+               | TBLPROPERTIES('DEFAULT.VALUE.binaryField2'='binary','DEFAULT.VALUE.autoLabel2'='true')
+            """.stripMargin)
+        sql("insert into binarytable values(1,true,'Bob','binary',false,'binary',false)")
+
+        carbonResult = sql("SELECT * FROM binarytable")
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert(null == each.getAs[Array[Byte]](3)
+                        || "\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        // drop columns after rename
+        sql(
+            s"""
+               | alter table binarytable drop columns (
+               |    binaryField2,
+               |    autoLabel2)
+            """.stripMargin)
+        sql("insert into binarytable values(1,true,'Bob','binary',false)")
+
+        carbonResult = sql("SELECT * FROM binarytable")
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert("\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        // change data type
+        val e = intercept[Exception] {
+            sql(s"alter table binarytable CHANGE binaryField binaryField3 STRING ")
+        }
+        assert(e.getMessage.contains("operation failed for default.binarytable: Alter table data type change operation failed: Given column binaryfield with data type BINARY cannot be modified. Only Int and Decimal data types are allowed for modification"))
+    }
+
+    ignore("Create table and load data with binary column for hive: test encode without \u0001") {
+        sql("DROP TABLE IF EXISTS hivetable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by '|'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata2.csv'
+               | INTO TABLE hivetable
+             """.stripMargin)
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | STORED BY 'carbondata'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata2.csv'
+               | INTO TABLE carbontable
+               | OPTIONS('header'='false','DELIMITER'='|')
+             """.stripMargin)
+
+        val hiveResult = sql("SELECT * FROM hivetable")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        // TODO
+        checkAnswer(hiveResult, carbonResult)
+
+        checkAnswer(sql("SELECT COUNT(*) FROM hivetable"), Seq(Row(3)))
+        try {
+            val carbonDF = carbonResult.collect()
+            assert(3 == carbonDF.length)
+            carbonDF.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                // assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value) || "\u0001education\u0002".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+
+            val df = hiveResult.collect()
+            assert(3 == df.length)
+            df.foreach { each =>
+                assert(5 == each.length)
+
+                assert(Integer.valueOf(each(0).toString) > 0)
+                assert(each(1).toString.equalsIgnoreCase("false") || (each(1).toString.equalsIgnoreCase("true")))
+                assert(each(2).toString.contains(".png"))
+
+
+                val value = new String(each.getAs[Array[Byte]](3))
+                // assert("\u0001history\u0002".equals(value) || "\u0001biology\u0002".equals(value) || "\u0001education\u0002".equals(value))
+                assert(each(4).toString.equalsIgnoreCase("false") || (each(4).toString.equalsIgnoreCase("true")))
+            }
+        } catch {
+            case e: Exception =>
+                e.printStackTrace()
+                assert(false)
+        }
+    }
+
+    override def afterAll: Unit = {
+        sql("DROP TABLE IF EXISTS binaryTable")
+        sql("DROP TABLE IF EXISTS hiveTable")
+    }
+}
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
index 001964a..92a49dd 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/allqueries/TestQueryWithColumnMetCacheAndCacheLevelProperty.scala
@@ -17,8 +17,6 @@
 package org.apache.carbondata.spark.testsuite.allqueries
 
 
-import java.util
-
 import scala.collection.JavaConverters._
 
 import org.apache.hadoop.conf.Configuration
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
new file mode 100644
index 0000000..28e3f32
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableForBinary.scala
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.createTable
+
+import java.io._
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.sdk.util.BinaryUtil
+
+import org.apache.commons.io.FileUtils
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.test.util.QueryTest
+import org.apache.spark.util.SparkUtil
+import org.scalatest.BeforeAndAfterAll
+
+
+class TestNonTransactionalCarbonTableForBinary extends QueryTest with BeforeAndAfterAll {
+
+    var writerPath = new File(this.getClass.getResource("/").getPath
+            + "../../target/SparkCarbonFileFormat/WriterOutput/")
+            .getCanonicalPath
+    var outputPath = writerPath + 2
+    //getCanonicalPath gives path with \, but the code expects /.
+    writerPath = writerPath.replace("\\", "/")
+
+    var sdkPath = new File(this.getClass.getResource("/").getPath + "../../../../store/sdk/")
+            .getCanonicalPath
+
+    def buildTestBinaryData(): Any = {
+        FileUtils.deleteDirectory(new File(writerPath))
+
+        val sourceImageFolder = sdkPath + "/src/test/resources/image/flowers"
+        val sufAnnotation = ".txt"
+        BinaryUtil.binaryToCarbon(sourceImageFolder, writerPath, sufAnnotation, ".jpg")
+    }
+
+    def cleanTestData() = {
+        FileUtils.deleteDirectory(new File(writerPath))
+        FileUtils.deleteDirectory(new File(outputPath))
+    }
+
+    override def beforeAll(): Unit = {
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                    CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+        buildTestBinaryData()
+
+        FileUtils.deleteDirectory(new File(outputPath))
+        sql("DROP TABLE IF EXISTS sdkOutputTable")
+    }
+
+    override def afterAll(): Unit = {
+        cleanTestData()
+        sql("DROP TABLE IF EXISTS sdkOutputTable")
+    }
+
+    test("test read image carbon with external table, generate by sdk, CTAS") {
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+        if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+            sql(s"CREATE EXTERNAL TABLE binaryCarbon STORED BY 'carbondata' LOCATION '$writerPath'")
+            sql(s"CREATE TABLE binaryCarbon3 STORED BY 'carbondata' LOCATION '$outputPath'" + " AS SELECT * FROM binaryCarbon")
+
+            checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon"),
+                Seq(Row(3)))
+            checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon3"),
+                Seq(Row(3)))
+
+            val result = sql("desc formatted binaryCarbon").collect()
+            var flag = false
+            result.foreach { each =>
+                if ("binary".equals(each.get(1))) {
+                    flag = true
+                }
+            }
+            assert(flag)
+            val value = sql("SELECT * FROM binaryCarbon").collect()
+            assert(3 == value.length)
+            value.foreach { each =>
+                val byteArray = each.getAs[Array[Byte]](2)
+                assert(new String(byteArray).startsWith("����\u0000\u0010JFIF"))
+            }
+
+            val value3 = sql("SELECT * FROM binaryCarbon3").collect()
+            assert(3 == value3.length)
+            value3.foreach { each =>
+                val byteArray = each.getAs[Array[Byte]](2)
+                assert(new String(byteArray).startsWith("����\u0000\u0010JFIF"))
+            }
+            sql("DROP TABLE IF EXISTS binaryCarbon")
+            sql("DROP TABLE IF EXISTS binaryCarbon3")
+        }
+    }
+
+    test("Don't support insert into partition table") {
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon2")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+        sql("DROP TABLE IF EXISTS binaryCarbon4")
+        if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+            sql(s"CREATE TABLE binaryCarbon USING CARBON LOCATION '$writerPath'")
+            sql(
+                s"""
+                   | CREATE TABLE binaryCarbon2(
+                   |    binaryId INT,
+                   |    binaryName STRING,
+                   |    binary BINARY,
+                   |    labelName STRING,
+                   |    labelContent STRING
+                   |) STORED BY 'carbondata'""".stripMargin)
+            sql(
+                s"""
+                   | CREATE TABLE binaryCarbon3(
+                   |    binaryId INT,
+                   |    binaryName STRING,
+                   |    labelName STRING,
+                   |    labelContent STRING
+                   |)  partitioned by ( binary BINARY) STORED BY 'carbondata'""".stripMargin)
+
+            sql("insert into binaryCarbon2 select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+            val carbonResult2 = sql("SELECT * FROM binaryCarbon2")
+
+            sql("create table binaryCarbon4 STORED BY 'carbondata' select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+            val carbonResult4 = sql("SELECT * FROM binaryCarbon4")
+            val carbonResult = sql("SELECT * FROM binaryCarbon")
+
+            assert(3 == carbonResult.collect().length)
+            assert(1 == carbonResult4.collect().length)
+            assert(1 == carbonResult2.collect().length)
+            checkAnswer(carbonResult4, carbonResult2)
+
+            try {
+                sql("insert into binaryCarbon3 select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+                assert(false)
+            } catch {
+                case e: Exception =>
+                    e.printStackTrace()
+                    assert(true)
+            }
+            sql("DROP TABLE IF EXISTS binaryCarbon")
+            sql("DROP TABLE IF EXISTS binaryCarbon2")
+            sql("DROP TABLE IF EXISTS binaryCarbon3")
+            sql("DROP TABLE IF EXISTS binaryCarbon4")
+        }
+    }
+}
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
index 862c72a..d485235 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala
@@ -93,12 +93,14 @@ class TestNonTransactionalCarbonTableJsonWriter extends QueryTest with BeforeAnd
   private def writeCarbonFileFromJsonRowInput(jsonRow: String,
       carbonSchema: Schema) = {
     try {
-      var options: util.Map[String, String] = Map("bAd_RECords_action" -> "FAIL", "quotechar" -> "\"").asJava
+      val options: util.Map[String, String] = Map("bAd_RECords_action" -> "FAIL", "quotechar" -> "\"").asJava
       val writer = CarbonWriter.builder
-        .outputPath(writerPath)
-        .uniqueIdentifier(System.currentTimeMillis())
-        .withLoadOptions(options)
-        .withJsonInput(carbonSchema).writtenBy("TestNonTransactionalCarbonTableJsonWriter").build()
+              .outputPath(writerPath)
+              .uniqueIdentifier(System.currentTimeMillis())
+              .withLoadOptions(options)
+              .withJsonInput(carbonSchema)
+              .writtenBy("TestNonTransactionalCarbonTableJsonWriter")
+              .build()
       writer.write(jsonRow)
       writer.close()
     }
@@ -347,4 +349,29 @@ class TestNonTransactionalCarbonTableJsonWriter extends QueryTest with BeforeAnd
     assert(new File(writerPath).listFiles().length > 0)
     FileUtils.deleteDirectory(new File(writerPath))
   }
+
+  // test : Schema length is lesser than array length
+  test("Read Json for binary") {
+    FileUtils.deleteDirectory(new File(writerPath))
+    var dataPath: String = null
+    dataPath = resourcesPath + "/jsonFiles/data/allPrimitiveType.json"
+    val fields = new Array[Field](3)
+    fields(0) = new Field("stringField", DataTypes.STRING)
+    fields(1) = new Field("intField", DataTypes.INT)
+    fields(2) = new Field("binaryField", DataTypes.BINARY)
+    val jsonRow = readFromFile(dataPath)
+    writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
+    assert(new File(writerPath).exists())
+    sql("DROP TABLE IF EXISTS sdkOutputTable")
+    sql(
+      s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
+         |'$writerPath' """.stripMargin)
+    sql("select * from sdkOutputTable").show()
+    checkAnswer(sql("select * from sdkOutputTable"),
+      Seq(Row("ajantha\"bhat\"", 26, "abc".getBytes())))
+    sql("DROP TABLE sdkOutputTable")
+    // drop table should not delete the files
+    assert(new File(writerPath).listFiles().length > 0)
+    FileUtils.deleteDirectory(new File(writerPath))
+  }
 }
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionBadRecordLoggerTest.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionBadRecordLoggerTest.scala
index 9689f3d..60952e4 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionBadRecordLoggerTest.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionBadRecordLoggerTest.scala
@@ -17,8 +17,6 @@
 
 package org.apache.carbondata.spark.testsuite.standardpartition
 
-import java.io.File
-
 import org.apache.spark.sql.Row
 import org.apache.spark.sql.hive.HiveContext
 import org.apache.spark.sql.test.util.QueryTest
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
index 4ec66a7..4b29e77 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CarbonScalaUtil.scala
@@ -648,12 +648,21 @@ object CarbonScalaUtil {
                      !x.dataType.get.equalsIgnoreCase("STRUCT") &&
                      !x.dataType.get.equalsIgnoreCase("MAP") &&
                      !x.dataType.get.equalsIgnoreCase("ARRAY"))) {
-        val errorMsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " +
-                       dictColm.trim +
-                       " is not a string/complex/varchar datatype column. LOCAL_DICTIONARY_COLUMN" +
-                       " should be no dictionary string/complex/varchar datatype column." +
-                       "Please check the DDL."
-        throw new MalformedCarbonCommandException(errorMsg)
+        if (fields.exists(x => x.column.equalsIgnoreCase(dictColm)
+                && x.dataType.get.equalsIgnoreCase("BINARY"))
+                && tableProperties.get("local_dictionary_exclude").nonEmpty
+                && tableProperties.get("local_dictionary_exclude").get.contains(dictColm)
+                && (tableProperties.get("local_dictionary_include").isEmpty
+                || (!tableProperties.get("local_dictionary_include").get.contains(dictColm)))) {
+          LOGGER.info("Local_dictionary_exclude supports binary")
+        } else {
+          val errorMsg = "LOCAL_DICTIONARY_INCLUDE/LOCAL_DICTIONARY_EXCLUDE column: " +
+                  dictColm.trim +
+                  " is not a string/complex/varchar datatype column. LOCAL_DICTIONARY_COLUMN" +
+                  " should be no dictionary string/complex/varchar datatype column." +
+                  "Please check the DDL."
+          throw new MalformedCarbonCommandException(errorMsg)
+        }
       }
     }
 
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
index b0a236f..8050e5f 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/DataTypeConverterUtil.scala
@@ -44,6 +44,7 @@ object DataTypeConverterUtil {
       case FIXED_DECIMAL(_, _) => DataTypes.createDefaultDecimalType
       case "timestamp" => DataTypes.TIMESTAMP
       case "date" => DataTypes.DATE
+      case "binary" => DataTypes.BINARY
       case "array" => DataTypes.createDefaultArrayType
       case "struct" => DataTypes.createDefaultStructType
       case "map" => DataTypes.createDefaultMapType
@@ -68,6 +69,7 @@ object DataTypeConverterUtil {
       case FIXED_DECIMALTYPE(_, _) => DataTypes.createDefaultDecimalType
       case "timestamptype" => DataTypes.TIMESTAMP
       case "datetype" => DataTypes.DATE
+      case "binarytype" => DataTypes.BINARY
       case others =>
         if (others != null && others.startsWith("arraytype")) {
           DataTypes.createDefaultArrayType()
@@ -105,6 +107,7 @@ object DataTypeConverterUtil {
       case "decimal" => ThriftDataType.DECIMAL
       case "date" => ThriftDataType.DATE
       case "timestamp" => ThriftDataType.TIMESTAMP
+      case "binary" => ThriftDataType.BINARY
       case "array" => ThriftDataType.ARRAY
       case "struct" => ThriftDataType.STRUCT
       case "map" => ThriftDataType.MAP
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 3cb068f..3e80ea6 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -172,6 +172,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   protected val BOOLEAN = carbonKeyWord("BOOLEAN")
   protected val LONG = carbonKeyWord("LONG")
   protected val BIGINT = carbonKeyWord("BIGINT")
+  protected val BINARY = carbonKeyWord("BINARY")
   protected val ARRAY = carbonKeyWord("ARRAY")
   protected val STRUCT = carbonKeyWord("STRUCT")
   protected val MAP = carbonKeyWord("MAP")
@@ -421,6 +422,12 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
               s"$column is a complex type column and complex type is not allowed for " +
               s"the option(s): ${ CarbonCommonConstants.COLUMN_META_CACHE }"
             throw new MalformedCarbonCommandException(errorMessage)
+          } else if (dimFieldToBeCached.nonEmpty && DataTypes.BINARY.getName
+                  .equalsIgnoreCase(dimFieldToBeCached(0).dataType.get)) {
+            val errorMessage =
+              s"$column is a binary data type column and binary data type is not allowed for " +
+                      s"the option(s): ${CarbonCommonConstants.COLUMN_META_CACHE}"
+            throw new MalformedCarbonCommandException(errorMessage)
           }
         }
       }
@@ -802,6 +809,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         val errorMsg = "range_column: " + rangeColumn +
                        " does not exist in table. Please check the create table statement."
         throw new MalformedCarbonCommandException(errorMsg)
+      } else if (DataTypes.BINARY.getName.equalsIgnoreCase(rangeField.get.dataType.get)) {
+        throw new MalformedCarbonCommandException(
+          "RANGE_COLUMN doesn't support binary data type:" + rangeColumn)
       } else {
         tableProperties.put(CarbonCommonConstants.RANGE_COLUMN, rangeField.get.column)
       }
@@ -877,8 +887,9 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         dimFields += field
       } else if (isDetectAsDimentionDataType(field.dataType.get)) {
         dimFields += field
-        // consider all String cols as noDicitonaryDims by default
-        if (DataTypes.STRING.getName.equalsIgnoreCase(field.dataType.get)) {
+        // consider all String and binary cols as noDicitonaryDims by default
+        if ((DataTypes.STRING.getName.equalsIgnoreCase(field.dataType.get)) ||
+            (DataTypes.BINARY.getName.equalsIgnoreCase(field.dataType.get))) {
           noDictionaryDims :+= field.column
         }
       } else if (sortKeyDimsTmp.exists(x => x.equalsIgnoreCase(field.column)) &&
@@ -943,7 +954,14 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
    * @param dimensionDatatype
    */
   def isDetectAsDimentionDataType(dimensionDatatype: String): Boolean = {
-    val dimensionType = Array("string", "array", "struct", "map", "timestamp", "date", "char")
+    val dimensionType = Array("string",
+      "array",
+      "struct",
+      "map",
+      "timestamp",
+      "date",
+      "char",
+      "binary")
     dimensionType.exists(x => dimensionDatatype.toLowerCase.contains(x))
   }
 
@@ -959,7 +977,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
    * detects whether datatype is part of sort_column
    */
   private def isDataTypeSupportedForSortColumn(columnDataType: String): Boolean = {
-    val dataTypes = Array("array", "struct", "map", "double", "float", "decimal")
+    val dataTypes = Array("array", "struct", "map", "double", "float", "decimal", "binary")
     dataTypes.exists(x => x.equalsIgnoreCase(columnDataType))
   }
 
@@ -967,7 +985,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
    * detects whether datatype is part of dictionary_exclude
    */
   def isDataTypeSupportedForDictionary_Exclude(columnDataType: String): Boolean = {
-    val dataTypes = Array("string", "timestamp", "int", "long", "bigint", "struct", "array", "map")
+    val dataTypes =
+      Array("string", "timestamp", "int", "long", "bigint", "struct", "array", "map", "binary")
     dataTypes.exists(x => x.equalsIgnoreCase(columnDataType))
   }
 
@@ -1296,6 +1315,8 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
     INT ^^^ "int" | DOUBLE ^^^ "double" | FLOAT ^^^ "double" | decimalType |
     DATE ^^^ "date" | charType
 
+  protected lazy val miscType = BINARY ^^^ "binary"
+
   /**
    * Matching the char data type and returning the same.
    */
@@ -1318,7 +1339,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   }
 
   protected lazy val nestedType: Parser[Field] = structFieldType | arrayFieldType | mapFieldType |
-                                                 primitiveFieldType
+                                                 primitiveFieldType | miscFieldType
 
   lazy val anyFieldDef: Parser[Field] =
     (ident | stringLit) ~ (":".? ~> nestedType) ~ (IN ~> (ident | stringLit)).? ^^ {
@@ -1344,6 +1365,12 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
         Field("unknown", Some(e1), Some("unknown"), Some(null))
     }
 
+  protected lazy val miscFieldType: Parser[Field] =
+    miscType ^^ {
+      case e1 =>
+        Field("unknown", Some(e1), Some("unknown"), Some(null))
+    }
+
   protected lazy val arrayFieldType: Parser[Field] =
     ((ARRAY ^^^ "array") ~> "<" ~> nestedType <~ ">") ^^ {
       case e1 =>
diff --git a/integration/spark-datasource/pom.xml b/integration/spark-datasource/pom.xml
index d9d20cb..11a4df2 100644
--- a/integration/spark-datasource/pom.xml
+++ b/integration/spark-datasource/pom.xml
@@ -85,7 +85,6 @@
   </dependencies>
 
   <build>
-    <testSourceDirectory>src/test/scala</testSourceDirectory>
     <resources>
       <resource>
         <directory>src/resources</directory>
diff --git a/integration/spark-datasource/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java b/integration/spark-datasource/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
index 99fac45..41b378d 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
+++ b/integration/spark-datasource/src/main/scala/org/apache/carbondata/converter/SparkDataTypeConverterImpl.java
@@ -132,6 +132,8 @@ public final class SparkDataTypeConverterImpl implements DataTypeConverter, Seri
       return DataTypes.TimestampType;
     } else if (carbonDataType == org.apache.carbondata.core.metadata.datatype.DataTypes.DATE) {
       return DataTypes.DateType;
+    } else if (carbonDataType == org.apache.carbondata.core.metadata.datatype.DataTypes.BINARY) {
+      return DataTypes.BinaryType;
     } else {
       return null;
     }
@@ -167,7 +169,8 @@ public final class SparkDataTypeConverterImpl implements DataTypeConverter, Seri
         if (dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.BOOLEAN
             || dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.SHORT
             || dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.INT
-            || dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.LONG) {
+            || dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.LONG
+            || dataType == org.apache.carbondata.core.metadata.datatype.DataTypes.BINARY) {
           fields[i] = new StructField(carbonColumn.getColName(),
               convertCarbonToSparkDataType(dataType), true, null);
         } else if (org.apache.carbondata.core.metadata.datatype.DataTypes.isDecimal(dataType)) {
diff --git a/integration/spark-datasource/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java b/integration/spark-datasource/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
index 34e7c23..0fb8b4b 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
+++ b/integration/spark-datasource/src/main/scala/org/apache/carbondata/spark/vectorreader/VectorizedCarbonRecordReader.java
@@ -265,7 +265,7 @@ public class VectorizedCarbonRecordReader extends AbstractRecordReader<Object> {
       DataType dataType = msr.getMeasure().getDataType();
       if (dataType == DataTypes.BOOLEAN || dataType == DataTypes.SHORT || dataType == DataTypes.INT
           || dataType == DataTypes.LONG || dataType == DataTypes.FLOAT
-          || dataType == DataTypes.BYTE) {
+          || dataType == DataTypes.BYTE || dataType == DataTypes.BINARY) {
         fields[msr.getOrdinal()] = new StructField(msr.getColumnName(),
             CarbonSparkDataSourceUtil.convertCarbonToSparkDataType(msr.getMeasure().getDataType()), true,
             null);
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
index 71dba3d..8bdb512 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
@@ -63,6 +63,7 @@ object CarbonSparkDataSourceUtil {
         case CarbonDataTypes.FLOAT => FloatType
         case CarbonDataTypes.BOOLEAN => BooleanType
         case CarbonDataTypes.TIMESTAMP => TimestampType
+        case CarbonDataTypes.BINARY => BinaryType
         case CarbonDataTypes.DATE => DateType
         case CarbonDataTypes.VARCHAR => StringType
       }
@@ -84,6 +85,7 @@ object CarbonSparkDataSourceUtil {
       case DateType => CarbonDataTypes.DATE
       case BooleanType => CarbonDataTypes.BOOLEAN
       case TimestampType => CarbonDataTypes.TIMESTAMP
+      case BinaryType => CarbonDataTypes.BINARY
       case ArrayType(elementType, _) =>
         CarbonDataTypes.createArrayType(convertSparkToCarbonDataType(elementType))
       case StructType(fields) =>
@@ -195,7 +197,13 @@ object CarbonSparkDataSourceUtil {
       } else {
         dataTypeOfAttribute
       }
-      new CarbonLiteralExpression(value, dataType)
+      val dataValue = if (dataTypeOfAttribute.equals(CarbonDataTypes.BINARY)
+              && Option(value).isDefined) {
+        new String(value.asInstanceOf[Array[Byte]])
+      } else {
+        value
+      }
+      new CarbonLiteralExpression(dataValue, dataType)
     }
 
     createFilter(predicate)
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
index 6819a4c..5f62362 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
@@ -219,6 +219,8 @@ class SparkCarbonFileFormat extends FileFormat
           fieldTypes(i).dataType match {
             case StringType =>
               data(i) = row.getString(i)
+            case BinaryType =>
+              data(i) = row.getBinary(i)
             case d: DecimalType =>
               data(i) = row.getDecimal(i, d.precision, d.scale).toJavaBigDecimal
             case s: StructType =>
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
index cb07e04..2ea3d43 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/util/SparkTypeConverter.scala
@@ -82,6 +82,7 @@ private[spark] object SparkTypeConverter {
         case CarbonDataTypes.DOUBLE => DoubleType
         case CarbonDataTypes.FLOAT => FloatType
         case CarbonDataTypes.BYTE => ByteType
+        case CarbonDataTypes.BINARY => BinaryType
         case CarbonDataTypes.BOOLEAN => BooleanType
         case CarbonDataTypes.TIMESTAMP => TimestampType
         case CarbonDataTypes.DATE => DateType
diff --git a/integration/spark-datasource/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java b/integration/spark-datasource/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
new file mode 100644
index 0000000..2de5df3
--- /dev/null
+++ b/integration/spark-datasource/src/test/java/org/apache/carbondata/sdk/util/BinaryUtil.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.sdk.util;
+
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.sdk.file.CarbonWriter;
+import org.apache.carbondata.sdk.file.Field;
+import org.apache.carbondata.sdk.file.Schema;
+
+import java.io.*;
+
+import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
+
+public class BinaryUtil {
+  public static void binaryToCarbon(String sourceImageFolder, String outputPath,
+                                    String sufAnnotation, final String sufImage) throws Exception {
+    Field[] fields = new Field[5];
+    fields[0] = new Field("binaryId", DataTypes.INT);
+    fields[1] = new Field("binaryName", DataTypes.STRING);
+    fields[2] = new Field("binary", DataTypes.BINARY);
+    fields[3] = new Field("labelName", DataTypes.STRING);
+    fields[4] = new Field("labelContent", DataTypes.STRING);
+    CarbonWriter writer = CarbonWriter
+        .builder()
+        .outputPath(outputPath)
+        .withCsvInput(new Schema(fields))
+        .withBlockSize(256)
+        .writtenBy("binaryExample")
+        .withPageSizeInMb(1)
+        .build();
+    binaryToCarbon(sourceImageFolder, writer, sufAnnotation, sufImage);
+  }
+
+  public static boolean binaryToCarbon(String sourceImageFolder, CarbonWriter writer,
+      String sufAnnotation, final String sufImage) throws Exception {
+    int num = 1;
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+
+      Object[] files = listFiles(sourceImageFolder, sufImage).toArray();
+
+      if (null != files) {
+        for (int i = 0; i < files.length; i++) {
+          // read image and encode to Hex
+          BufferedInputStream bis = new BufferedInputStream(
+              new FileInputStream(new File((String) files[i])));
+          originBinary = new byte[bis.available()];
+          while ((bis.read(originBinary)) != -1) {
+          }
+
+          String labelFileName = ((String) files[i]).split(sufImage)[0] + sufAnnotation;
+          BufferedInputStream txtBis = new BufferedInputStream(new FileInputStream(labelFileName));
+          String labelValue = null;
+          byte[] labelBinary = null;
+          labelBinary = new byte[txtBis.available()];
+          while ((txtBis.read(labelBinary)) != -1) {
+            labelValue = new String(labelBinary, "UTF-8");
+          }
+          // write data
+          writer.write(new Object[]{i, (String) files[i], originBinary,
+              labelFileName, labelValue});
+          bis.close();
+          txtBis.close();
+        }
+      }
+      writer.close();
+    }
+    return true;
+  }
+
+}
diff --git a/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
new file mode 100644
index 0000000..064efc2
--- /dev/null
+++ b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceBinaryTest.scala
@@ -0,0 +1,544 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.spark.sql.carbondata.datasource
+
+import java.io.File
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+import org.apache.carbondata.sdk.util.BinaryUtil
+import org.apache.commons.io.FileUtils
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.carbondata.datasource.TestUtil._
+import org.apache.spark.util.SparkUtil
+
+import org.scalatest.{BeforeAndAfterAll, FunSuite}
+
+class SparkCarbonDataSourceBinaryTest extends FunSuite with BeforeAndAfterAll {
+
+    var writerPath = new File(this.getClass.getResource("/").getPath
+            + "../../target/SparkCarbonFileFormat/WriterOutput/")
+            .getCanonicalPath
+    var resourcesPath = new File(this.getClass.getResource("/").getPath
+            + "../../../spark-common-test/src/test/resources/")
+            .getCanonicalPath
+    var outputPath = writerPath + 2
+    //getCanonicalPath gives path with \, but the code expects /.
+    writerPath = writerPath.replace("\\", "/")
+
+    var sdkPath = new File(this.getClass.getResource("/").getPath + "../../../../store/sdk/")
+            .getCanonicalPath
+
+    def buildTestBinaryData(): Any = {
+        FileUtils.deleteDirectory(new File(writerPath))
+        FileUtils.deleteDirectory(new File(outputPath))
+
+        val sourceImageFolder = sdkPath + "/src/test/resources/image/flowers"
+        val sufAnnotation = ".txt"
+        BinaryUtil.binaryToCarbon(sourceImageFolder, writerPath, sufAnnotation, ".jpg")
+    }
+
+    def cleanTestData() = {
+        FileUtils.deleteDirectory(new File(writerPath))
+        FileUtils.deleteDirectory(new File(outputPath))
+    }
+
+    import spark._
+
+    override def beforeAll(): Unit = {
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+                    CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+        buildTestBinaryData()
+
+        FileUtils.deleteDirectory(new File(outputPath))
+        sql("DROP TABLE IF EXISTS sdkOutputTable")
+    }
+
+    override def afterAll(): Unit = {
+        cleanTestData()
+        sql("DROP TABLE IF EXISTS sdkOutputTable")
+    }
+
+    test("Test direct sql read carbon") {
+        assert(new File(writerPath).exists())
+        checkAnswer(
+            sql(s"SELECT COUNT(*) FROM carbon.`$writerPath`"),
+            Seq(Row(3)))
+    }
+
+    test("Test read image carbon with spark carbon file format, generate by sdk, CTAS") {
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+        FileUtils.deleteDirectory(new File(outputPath))
+        if (SparkUtil.isSparkVersionEqualTo("2.1")) {
+            sql(s"CREATE TABLE binaryCarbon USING CARBON OPTIONS(PATH '$writerPath')")
+            sql(s"CREATE TABLE binaryCarbon3 USING CARBON OPTIONS(PATH '$outputPath')" + " AS SELECT * FROM binaryCarbon")
+        } else {
+            sql(s"CREATE TABLE binaryCarbon USING CARBON LOCATION '$writerPath'")
+            sql(s"CREATE TABLE binaryCarbon3 USING CARBON LOCATION '$outputPath'" + " AS SELECT * FROM binaryCarbon")
+        }
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon"),
+            Seq(Row(3)))
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon3"),
+            Seq(Row(3)))
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+        FileUtils.deleteDirectory(new File(outputPath))
+    }
+
+    test("Don't support sort_columns") {
+        import spark._
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[Exception] {
+            sql(
+                s"""
+                   | CREATE TABLE binaryTable (
+                   |    id DOUBLE,
+                   |    label BOOLEAN,
+                   |    name STRING,
+                   |    image BINARY,
+                   |    autoLabel BOOLEAN)
+                   | using carbon
+                   | options('SORT_COLUMNS'='image')
+       """.stripMargin)
+            sql("SELECT COUNT(*) FROM binaryTable").show()
+        }
+        assert(exception.getCause.getMessage.contains("sort columns not supported for array, struct, map, double, float, decimal, varchar, binary"))
+    }
+
+    test("Don't support long_string_columns for binary") {
+        import spark._
+        sql("DROP TABLE IF EXISTS binaryTable")
+        val exception = intercept[Exception] {
+            sql(
+                s"""
+                   | CREATE TABLE binaryTable (
+                   |    id DOUBLE,
+                   |    label BOOLEAN,
+                   |    name STRING,
+                   |    image BINARY,
+                   |    autoLabel BOOLEAN)
+                   | using carbon
+                   | options('long_string_columns'='image')
+       """.stripMargin)
+            sql("SELECT COUNT(*) FROM binaryTable").show()
+        }
+        assert(exception.getCause.getMessage.contains("long string column : image is not supported for data type: BINARY"))
+    }
+
+    test("Don't support insert into partition table") {
+        if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+            sql("DROP TABLE IF EXISTS binaryCarbon")
+            sql("DROP TABLE IF EXISTS binaryCarbon2")
+            sql("DROP TABLE IF EXISTS binaryCarbon3")
+            sql("DROP TABLE IF EXISTS binaryCarbon4")
+            sql(s"CREATE TABLE binaryCarbon USING CARBON LOCATION '$writerPath'")
+            sql(
+                s"""
+                   | CREATE TABLE binaryCarbon2(
+                   |    binaryId INT,
+                   |    binaryName STRING,
+                   |    binary BINARY,
+                   |    labelName STRING,
+                   |    labelContent STRING
+                   |) USING CARBON""".stripMargin)
+            sql(
+                s"""
+                   | CREATE TABLE binaryCarbon3(
+                   |    binaryId INT,
+                   |    binaryName STRING,
+                   |    binary BINARY,
+                   |    labelName STRING,
+                   |    labelContent STRING
+                   |) USING CARBON partitioned by (binary) """.stripMargin)
+            sql("select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0").show()
+
+            sql("insert into binaryCarbon2 select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+            val carbonResult2 = sql("SELECT * FROM binaryCarbon2")
+
+            sql("create table binaryCarbon4 using carbon select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+            val carbonResult4 = sql("SELECT * FROM binaryCarbon4")
+            val carbonResult = sql("SELECT * FROM binaryCarbon")
+
+            assert(3 == carbonResult.collect().length)
+            assert(1 == carbonResult4.collect().length)
+            assert(1 == carbonResult2.collect().length)
+            checkAnswer(carbonResult4, carbonResult2)
+
+            try {
+                sql("insert into binaryCarbon3 select binaryId,binaryName,binary,labelName,labelContent from binaryCarbon where binaryId=0 ")
+                assert(false)
+            } catch {
+                case e: Exception =>
+                    e.printStackTrace()
+                    assert(true)
+            }
+            sql("DROP TABLE IF EXISTS binaryCarbon")
+            sql("DROP TABLE IF EXISTS binaryCarbon2")
+            sql("DROP TABLE IF EXISTS binaryCarbon3")
+            sql("DROP TABLE IF EXISTS binaryCarbon4")
+        }
+    }
+
+    test("Test unsafe as false") {
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE, "false")
+        FileUtils.deleteDirectory(new File(outputPath))
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+        if (SparkUtil.isSparkVersionEqualTo("2.1")) {
+            sql(s"CREATE TABLE binaryCarbon USING CARBON OPTIONS(PATH '$writerPath')")
+            sql(s"CREATE TABLE binaryCarbon3 USING CARBON OPTIONS(PATH '$outputPath')" + " AS SELECT * FROM binaryCarbon")
+        } else {
+            sql(s"CREATE TABLE binaryCarbon USING CARBON LOCATION '$writerPath'")
+            sql(s"CREATE TABLE binaryCarbon3 USING CARBON LOCATION '$outputPath'" + " AS SELECT * FROM binaryCarbon")
+        }
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon"),
+            Seq(Row(3)))
+        checkAnswer(sql("SELECT COUNT(*) FROM binaryCarbon3"),
+            Seq(Row(3)))
+        sql("DROP TABLE IF EXISTS binaryCarbon")
+        sql("DROP TABLE IF EXISTS binaryCarbon3")
+
+        FileUtils.deleteDirectory(new File(outputPath))
+        CarbonProperties.getInstance()
+                .addProperty(CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE,
+                    CarbonCommonConstants.ENABLE_UNSAFE_COLUMN_PAGE_DEFAULT)
+    }
+
+    test("insert into for hive and carbon, CTAS") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS hiveTable2")
+        sql("DROP TABLE IF EXISTS carbontable2")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by ','
+             """.stripMargin)
+        sql("insert into hivetable values(1,true,'Bob','binary',false)")
+        sql("insert into hivetable values(2,false,'Xu','test',true)")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | using carbon
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        val hiveResult = sql("SELECT * FROM hivetable")
+
+        assert(2 == carbonResult.collect().length)
+        assert(2 == hiveResult.collect().length)
+        checkAnswer(hiveResult, carbonResult)
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        sql("CREATE TABLE hivetable2 AS SELECT * FROM carbontable")
+        sql("CREATE TABLE carbontable2  USING CARBON AS SELECT * FROM hivetable")
+        val carbonResult2 = sql("SELECT * FROM carbontable2")
+        val hiveResult2 = sql("SELECT * FROM hivetable2")
+        checkAnswer(hiveResult2, carbonResult2)
+        checkAnswer(carbonResult, carbonResult2)
+        checkAnswer(hiveResult, hiveResult2)
+        assert(2 == carbonResult2.collect().length)
+        assert(2 == hiveResult2.collect().length)
+
+        sql("INSERT INTO hivetable2 SELECT * FROM carbontable")
+        sql("INSERT INTO carbontable2 SELECT * FROM hivetable")
+        val carbonResult3 = sql("SELECT * FROM carbontable2")
+        val hiveResult3 = sql("SELECT * FROM hivetable2")
+        checkAnswer(carbonResult3, hiveResult3)
+        assert(4 == carbonResult3.collect().length)
+        assert(4 == hiveResult3.collect().length)
+    }
+
+    test("insert into for parquet and carbon, CTAS") {
+        sql("DROP TABLE IF EXISTS parquetTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS parquetTable2")
+        sql("DROP TABLE IF EXISTS carbontable2")
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS parquettable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | using parquet
+             """.stripMargin)
+        sql("insert into parquettable values(1,true,'Bob','binary',false)")
+        sql("insert into parquettable values(2,false,'Xu','test',true)")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | using carbon
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        val parquetResult = sql("SELECT * FROM parquettable")
+
+        assert(2 == carbonResult.collect().length)
+        assert(2 == parquetResult.collect().length)
+        checkAnswer(parquetResult, carbonResult)
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        sql("CREATE TABLE parquettable2 AS SELECT * FROM carbontable")
+        sql("CREATE TABLE carbontable2  USING CARBON AS SELECT * FROM parquettable")
+        val carbonResult2 = sql("SELECT * FROM carbontable2")
+        val parquetResult2 = sql("SELECT * FROM parquettable2")
+        checkAnswer(parquetResult2, carbonResult2)
+        checkAnswer(carbonResult, carbonResult2)
+        checkAnswer(parquetResult, parquetResult2)
+        assert(2 == carbonResult2.collect().length)
+        assert(2 == parquetResult2.collect().length)
+
+        sql("INSERT INTO parquettable2 SELECT * FROM carbontable")
+        sql("INSERT INTO carbontable2 SELECT * FROM parquettable")
+        val carbonResult3 = sql("SELECT * FROM carbontable2")
+        val parquetResult3 = sql("SELECT * FROM parquettable2")
+        checkAnswer(carbonResult3, parquetResult3)
+        assert(4 == carbonResult3.collect().length)
+        assert(4 == parquetResult3.collect().length)
+    }
+
+    test("insert into carbon as select from hive after hive load data") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS hiveTable2")
+        sql("DROP TABLE IF EXISTS carbontable2")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by '|'
+             """.stripMargin)
+        sql(
+            s"""
+               | LOAD DATA LOCAL INPATH '$resourcesPath/binarystringdata.csv'
+               | INTO TABLE hivetable
+             """.stripMargin)
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | using carbon
+             """.stripMargin)
+        sql("insert into carbontable select * from hivetable")
+        val carbonResult = sql("SELECT * FROM carbontable")
+        val hiveResult = sql("SELECT * FROM hivetable")
+
+        assert(3 == carbonResult.collect().length)
+        assert(3 == hiveResult.collect().length)
+        checkAnswer(hiveResult, carbonResult)
+        carbonResult.collect().foreach { each =>
+            if (2 == each.get(0)) {
+                assert("\u0001history\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (1 == each.get(0)) {
+                assert("\u0001education\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (3 == each.get(0)) {
+                assert("".equals(new String(each.getAs[Array[Byte]](3)))
+                        || "\u0001biology\u0002".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        sql("CREATE TABLE hivetable2 AS SELECT * FROM carbontable")
+        sql("CREATE TABLE carbontable2  USING CARBON AS SELECT * FROM hivetable")
+        val carbonResult2 = sql("SELECT * FROM carbontable2")
+        val hiveResult2 = sql("SELECT * FROM hivetable2")
+        checkAnswer(hiveResult2, carbonResult2)
+        checkAnswer(carbonResult, carbonResult2)
+        checkAnswer(hiveResult, hiveResult2)
+        assert(3 == carbonResult2.collect().length)
+        assert(3 == hiveResult2.collect().length)
+
+        sql("INSERT INTO hivetable2 SELECT * FROM carbontable")
+        sql("INSERT INTO carbontable2 SELECT * FROM hivetable")
+        val carbonResult3 = sql("SELECT * FROM carbontable2")
+        val hiveResult3 = sql("SELECT * FROM hivetable2")
+        checkAnswer(carbonResult3, hiveResult3)
+        assert(6 == carbonResult3.collect().length)
+        assert(6 == hiveResult3.collect().length)
+    }
+
+    test("filter for hive and carbon") {
+        sql("DROP TABLE IF EXISTS hiveTable")
+        sql("DROP TABLE IF EXISTS carbontable")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS hivetable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | row format delimited fields terminated by ','
+             """.stripMargin)
+        sql("insert into hivetable values(1,true,'Bob','binary',false)")
+        sql("insert into hivetable values(2,false,'Xu','test',true)")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    image binary,
+               |    autoLabel boolean)
+               | using carbon
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+
+        // filter with equal
+        val hiveResult = sql("SELECT * FROM hivetable where image=cast('binary' as binary)")
+        val carbonResult = sql("SELECT * FROM carbontable where image=cast('binary' as binary)")
+
+        checkAnswer(hiveResult, carbonResult)
+        assert(1 == carbonResult.collect().length)
+        carbonResult.collect().foreach { each =>
+            assert(1 == each.get(0))
+            assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+
+        // filter with non string
+        val exception = intercept[Exception] {
+            sql("SELECT * FROM carbontable where image=binary").collect()
+        }
+        assert(exception.getMessage.contains("cannot resolve '`binary`' given input columns"))
+
+        // filter with not equal
+        val hiveResult3 = sql("SELECT * FROM hivetable where image!=cast('binary' as binary)")
+        val carbonResult3 = sql("SELECT * FROM carbontable where image!=cast('binary' as binary)")
+        checkAnswer(hiveResult3, carbonResult3)
+        assert(1 == carbonResult3.collect().length)
+        carbonResult3.collect().foreach { each =>
+            assert(2 == each.get(0))
+            assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+
+        // filter with in
+        val hiveResult4 = sql("SELECT * FROM hivetable where image in (cast('binary' as binary))")
+        val carbonResult4 = sql("SELECT * FROM carbontable where image in (cast('binary' as binary))")
+        checkAnswer(hiveResult4, carbonResult4)
+        assert(1 == carbonResult4.collect().length)
+        carbonResult4.collect().foreach { each =>
+            assert(1 == each.get(0))
+            assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+
+        // filter with not in
+        val hiveResult5 = sql("SELECT * FROM hivetable where image not in (cast('binary' as binary))")
+        val carbonResult5 = sql("SELECT * FROM carbontable where image not in (cast('binary' as binary))")
+        checkAnswer(hiveResult5, carbonResult5)
+        assert(1 == carbonResult5.collect().length)
+        carbonResult5.collect().foreach { each =>
+            assert(2 == each.get(0))
+            assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+        }
+    }
+
+    test("Spark DataSource don't support update, delete") {
+        sql("DROP TABLE IF EXISTS carbontable")
+        sql("DROP TABLE IF EXISTS carbontable2")
+
+        sql(
+            s"""
+               | CREATE TABLE IF NOT EXISTS carbontable (
+               |    id int,
+               |    label boolean,
+               |    name string,
+               |    binaryField binary,
+               |    autoLabel boolean)
+               | using carbon
+             """.stripMargin)
+        sql("insert into carbontable values(1,true,'Bob','binary',false)")
+        sql("insert into carbontable values(2,false,'Xu','test',true)")
+
+        val carbonResult = sql("SELECT * FROM carbontable")
+
+        carbonResult.collect().foreach { each =>
+            if (1 == each.get(0)) {
+                assert("binary".equals(new String(each.getAs[Array[Byte]](3))))
+            } else if (2 == each.get(0)) {
+                assert("test".equals(new String(each.getAs[Array[Byte]](3))))
+            } else {
+                assert(false)
+            }
+        }
+
+        var exception = intercept[Exception] {
+            sql("UPDATE carbontable SET binaryField = 'binary2' WHERE id = 1").show()
+        }
+        assert(exception.getMessage.contains("mismatched input 'UPDATE' expecting"))
+
+        exception = intercept[Exception] {
+            sql("DELETE FROM carbontable WHERE id = 1").show()
+        }
+        assert(exception.getMessage.contains("Operation not allowed: DELETE FROM"))
+    }
+
+}
diff --git a/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
index d25e675..19cf99f 100644
--- a/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
+++ b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/SparkCarbonDataSourceTest.scala
@@ -1779,6 +1779,32 @@ class SparkCarbonDataSourceTest extends FunSuite with BeforeAndAfterAll {
     assert(ex.getMessage.contains("column: abc specified in inverted index columns does not exist in schema"))
   }
 
+  var writerPath = new File(this.getClass.getResource("/").getPath
+          + "../../target/SparkCarbonFileFormat/WriterOutput/")
+          .getCanonicalPath
+
+  test("Don't support load for datasource") {
+    import spark._
+    sql("DROP TABLE IF EXISTS binaryCarbon")
+    if (SparkUtil.isSparkVersionXandAbove("2.2")) {
+      sql(
+        s"""
+           | CREATE TABLE binaryCarbon(
+           |    binaryId INT,
+           |    binaryName STRING,
+           |    binary BINARY,
+           |    labelName STRING,
+           |    labelContent STRING
+           |) USING CARBON  """.stripMargin)
+
+      val exception = intercept[Exception] {
+        sql(s"load data local inpath '$writerPath' into table binaryCarbon")
+      }
+      assert(exception.getMessage.contains("LOAD DATA is not supported for datasource tables"))
+    }
+    sql("DROP TABLE IF EXISTS binaryCarbon")
+  }
+
   override protected def beforeAll(): Unit = {
     drop
     createParquetTable
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index 9835938..b84a7b0 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -167,7 +167,13 @@ object CarbonFilters {
       } else {
         dataTypeOfAttribute
       }
-      new CarbonLiteralExpression(value, dataType)
+      val dataValue = if (dataTypeOfAttribute.equals(CarbonDataTypes.BINARY)
+              && Option(value).isDefined) {
+        new String(value.asInstanceOf[Array[Byte]])
+      } else {
+        value
+      }
+      new CarbonLiteralExpression(dataValue, dataType)
     }
 
     createFilter(predicate)
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/BinaryFieldConverterImpl.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/BinaryFieldConverterImpl.java
new file mode 100644
index 0000000..766cfeb
--- /dev/null
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/BinaryFieldConverterImpl.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.carbondata.processing.loading.converter.impl;
+
+import java.nio.charset.Charset;
+
+import org.apache.carbondata.common.logging.LogServiceFactory;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.row.CarbonRow;
+import org.apache.carbondata.core.metadata.datatype.DataType;
+import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
+import org.apache.carbondata.processing.loading.DataField;
+import org.apache.carbondata.processing.loading.converter.BadRecordLogHolder;
+import org.apache.carbondata.processing.loading.converter.FieldConverter;
+import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException;
+
+import org.apache.log4j.Logger;
+
+/**
+ * Converter for binary
+ */
+public class BinaryFieldConverterImpl implements FieldConverter {
+  private static final Logger LOGGER =
+      LogServiceFactory.getLogService(BinaryFieldConverterImpl.class.getName());
+
+  private int index;
+  private DataType dataType;
+  private CarbonDimension dimension;
+  private String nullformat;
+  private boolean isEmptyBadRecord;
+  private DataField dataField;
+  public BinaryFieldConverterImpl(DataField dataField, String nullformat, int index,
+      boolean isEmptyBadRecord) {
+    this.dataType = dataField.getColumn().getDataType();
+    this.dimension = (CarbonDimension) dataField.getColumn();
+    this.nullformat = nullformat;
+    this.index = index;
+    this.isEmptyBadRecord = isEmptyBadRecord;
+    this.dataField = dataField;
+  }
+
+  @Override
+  public void convert(CarbonRow row, BadRecordLogHolder logHolder)
+      throws CarbonDataLoadingException {
+    if (row.getObject(index) instanceof String) {
+      row.update((((String) row.getObject(index)))
+          .getBytes(Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)), index);
+    } else if (row.getObject(index) instanceof byte[]) {
+      row.update(row.getObject(index), index);
+    } else {
+      throw new CarbonDataLoadingException("Binary only support String and byte[] data type");
+    }
+  }
+
+  @Override
+  public Object convert(Object value, BadRecordLogHolder logHolder)
+      throws RuntimeException {
+    return null;
+  }
+
+  @Override
+  public void clear() {
+  }
+}
\ No newline at end of file
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
index 3b4df75..a6c61b4 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/converter/impl/FieldEncoderFactory.java
@@ -119,6 +119,8 @@ public class FieldEncoderFactory {
         return new ComplexFieldConverterImpl(
             createComplexDataType(dataField, absoluteTableIdentifier,
                 client, useOnePass, localCache, index, nullFormat, isEmptyBadRecord), index);
+      } else if (dataField.getColumn().getDataType() == DataTypes.BINARY) {
+        return new BinaryFieldConverterImpl(dataField, nullFormat, index, isEmptyBadRecord);
       } else {
         // if the no dictionary column is a numeric column and no need to convert to binary
         // then treat it is as measure col
diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
index 8a0f8ea..2157c60 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/loading/sort/SortStepRowHandler.java
@@ -400,6 +400,11 @@ public class SortStepRowHandler implements Serializable {
       byte[] decimalBytes = new byte[len];
       rowBuffer.get(decimalBytes);
       tmpContent = DataTypeUtil.byteToBigDecimal(decimalBytes);
+    } else if (DataTypes.BINARY == tmpDataType) {
+      int len = rowBuffer.getInt();
+      byte[] bytes = new byte[len];
+      rowBuffer.get(bytes);
+      tmpContent = bytes;
     } else {
       throw new IllegalArgumentException("Unsupported data type: " + tmpDataType);
     }
@@ -847,6 +852,10 @@ public class SortStepRowHandler implements Serializable {
       byte[] decimalBytes = DataTypeUtil.bigDecimalToByte((BigDecimal) tmpValue);
       reUsableByteArrayDataOutputStream.writeShort((short) decimalBytes.length);
       reUsableByteArrayDataOutputStream.write(decimalBytes);
+    } else if (DataTypes.BINARY == tmpDataType) {
+      byte[] bytes = (byte[]) tmpValue;
+      reUsableByteArrayDataOutputStream.writeInt(bytes.length);
+      reUsableByteArrayDataOutputStream.write(bytes);
     } else {
       throw new IllegalArgumentException("Unsupported data type: " + tmpDataType);
     }
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
index 76c5613..a6e4b34 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
@@ -292,7 +292,8 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
       Object[] nonDictArray = WriteStepRowUtil.getNoDictAndComplexDimension(row);
       for (int i = 0; i < noDictDataTypesList.size(); i++) {
         DataType columnType = noDictDataTypesList.get(i);
-        if ((columnType == DataTypes.STRING) || (columnType == DataTypes.VARCHAR)) {
+        if ((columnType == DataTypes.STRING) || (columnType == DataTypes.VARCHAR) || (columnType
+            == DataTypes.BINARY)) {
           currentElementLength = ((byte[]) nonDictArray[i]).length;
           noDictColumnPageSize[bucketCounter] += currentElementLength;
           canSnappyHandleThisRow(noDictColumnPageSize[bucketCounter]);
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
index a201679..ebd21e8 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
@@ -121,6 +121,8 @@ public class TablePage {
         DataType dataType = DataTypes.STRING;
         if (DataTypes.VARCHAR == spec.getSchemaDataType()) {
           dataType = DataTypes.VARCHAR;
+        } else if (DataTypes.BINARY == spec.getSchemaDataType()) {
+          dataType = DataTypes.BINARY;
         }
         ColumnPageEncoderMeta columnPageEncoderMeta =
             new ColumnPageEncoderMeta(spec, dataType, columnCompressor);
@@ -147,7 +149,7 @@ public class TablePage {
           }
         }
         // set the stats collector according to the data type of the columns
-        if (DataTypes.VARCHAR == dataType) {
+        if (DataTypes.VARCHAR == dataType || DataTypes.BINARY == dataType) {
           page.setStatsCollector(LVLongStringStatsCollector.newInstance());
         } else if (DataTypeUtil.isPrimitiveColumn(spec.getSchemaDataType())) {
           if (spec.getSchemaDataType() == DataTypes.TIMESTAMP) {
@@ -216,7 +218,7 @@ public class TablePage {
       dictDimensionPages[i].putData(rowId, keys[i]);
     }
 
-    // 2. convert noDictionary columns and complex columns and varchar columns.
+    // 2. convert noDictionary columns and complex columns and varchar, binary columns.
     int noDictionaryCount = noDictDimensionPages.length;
     int complexColumnCount = complexDimensionPages.length;
     if (noDictionaryCount > 0 || complexColumnCount > 0) {
@@ -225,8 +227,8 @@ public class TablePage {
           tableSpec.getNoDictionaryDimensionSpec();
       Object[] noDictAndComplex = WriteStepRowUtil.getNoDictAndComplexDimension(row);
       for (int i = 0; i < noDictAndComplex.length; i++) {
-        if (noDictionaryDimensionSpec.get(i).getSchemaDataType()
-            == DataTypes.VARCHAR) {
+        if (noDictionaryDimensionSpec.get(i).getSchemaDataType() == DataTypes.VARCHAR
+            || noDictionaryDimensionSpec.get(i).getSchemaDataType() == DataTypes.BINARY) {
           byte[] valueWithLength = addIntLengthToByteArray((byte[]) noDictAndComplex[i]);
           noDictDimensionPages[i].putData(rowId, valueWithLength);
         } else if (i < noDictionaryCount) {
diff --git a/store/sdk/pom.xml b/store/sdk/pom.xml
index 3d25426..4ea828c 100644
--- a/store/sdk/pom.xml
+++ b/store/sdk/pom.xml
@@ -14,6 +14,7 @@
 
   <properties>
     <dev.path>${basedir}/../../dev</dev.path>
+    <dep.jackson.version>2.6.5</dep.jackson.version>
   </properties>
 
   <dependencies>
@@ -74,7 +75,7 @@
                 <exclude>META-INF/*.DSA</exclude>
                 <exclude>META-INF/*.RSA</exclude>
                 <exclude>META-INF/vfs-providers.xml</exclude>
-                <exclude>io/netty/**</exclude>
+                <!--<exclude>io/netty/**</exclude>-->
               </excludes>
             </filter>
           </filters>
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
index a8899a7..aa5c671 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CSVCarbonWriter.java
@@ -65,7 +65,7 @@ class CSVCarbonWriter extends CarbonWriter {
   @Override
   public void write(Object object) throws IOException {
     try {
-      writable.set((String[]) object);
+      writable.set((Object[]) object);
       recordWriter.write(NullWritable.get(), writable);
     } catch (Exception e) {
       throw new IOException(e);
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonReader.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonReader.java
index 9666cfa..e5c0680 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonReader.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonReader.java
@@ -144,7 +144,6 @@ public class CarbonReader<T> {
 
   /**
    * Return a new {@link CarbonReaderBuilder} instance
-   * Default value of table name is table + tablePath + time
    *
    * @param tablePath table path
    * @return CarbonReaderBuilder object
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index cfae2ae..7569926 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -113,6 +113,7 @@ public class CarbonWriterBuilder {
 
   /**
    * sets the list of columns for which inverted index needs to generated
+   *
    * @param invertedIndexColumns is a string array of columns for which inverted index needs to
    * generated.
    * If it is null or an empty array, inverted index will be generated for none of the columns
@@ -156,30 +157,30 @@ public class CarbonWriterBuilder {
 
   /**
    * To support the load options for sdk writer
-   * @param options key,value pair of load options.
-   * supported keys values are
-   * a. bad_records_logger_enable -- true (write into separate logs), false
-   * b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
-   * c. bad_record_path -- path
-   * d. dateformat -- same as JAVA SimpleDateFormat
-   * e. timestampformat -- same as JAVA SimpleDateFormat
-   * f. complex_delimiter_level_1 -- value to Split the complexTypeData
-   * g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
-   * h. quotechar
-   * i. escapechar
-   *
-   * Default values are as follows.
-   *
-   * a. bad_records_logger_enable -- "false"
-   * b. bad_records_action -- "FAIL"
-   * c. bad_record_path -- ""
-   * d. dateformat -- "" , uses from carbon.properties file
-   * e. timestampformat -- "", uses from carbon.properties file
-   * f. complex_delimiter_level_1 -- "\001"
-   * g. complex_delimiter_level_2 -- "\002"
-   * h. quotechar -- "\""
-   * i. escapechar -- "\\"
    *
+   * @param options key,value pair of load options.
+   *                supported keys values are
+   *                a. bad_records_logger_enable -- true (write into separate logs), false
+   *                b. bad_records_action -- FAIL, FORCE, IGNORE, REDIRECT
+   *                c. bad_record_path -- path
+   *                d. dateformat -- same as JAVA SimpleDateFormat
+   *                e. timestampformat -- same as JAVA SimpleDateFormat
+   *                f. complex_delimiter_level_1 -- value to Split the complexTypeData
+   *                g. complex_delimiter_level_2 -- value to Split the nested complexTypeData
+   *                h. quotechar
+   *                i. escapechar
+   *                <p>
+   *                Default values are as follows.
+   *                <p>
+   *                a. bad_records_logger_enable -- "false"
+   *                b. bad_records_action -- "FAIL"
+   *                c. bad_record_path -- ""
+   *                d. dateformat -- "" , uses from carbon.properties file
+   *                e. timestampformat -- "", uses from carbon.properties file
+   *                f. complex_delimiter_level_1 -- "\001"
+   *                g. complex_delimiter_level_2 -- "\002"
+   *                h. quotechar -- "\""
+   *                i. escapechar -- "\\"
    * @return updated CarbonWriterBuilder
    */
   public CarbonWriterBuilder withLoadOptions(Map<String, String> options) {
@@ -279,7 +280,7 @@ public class CarbonWriterBuilder {
     Set<String> supportedOptions = new HashSet<>(Arrays
         .asList("table_blocksize", "table_blocklet_size", "local_dictionary_threshold",
             "local_dictionary_enable", "sort_columns", "sort_scope", "long_string_columns",
-            "inverted_index","table_page_size_inmb"));
+            "inverted_index", "table_page_size_inmb"));
 
     for (String key : options.keySet()) {
       if (!supportedOptions.contains(key.toLowerCase())) {
@@ -613,11 +614,11 @@ public class CarbonWriterBuilder {
 
   private void validateLongStringColumns(Schema carbonSchema, Set<String> longStringColumns) {
     // long string columns must be string or varchar type
-    for (Field field :carbonSchema.getFields()) {
+    for (Field field : carbonSchema.getFields()) {
       if (longStringColumns.contains(field.getFieldName().toLowerCase()) && (
           (field.getDataType() != DataTypes.STRING) && field.getDataType() != DataTypes.VARCHAR)) {
         throw new RuntimeException(
-            "long string column : " + field.getFieldName() + "is not supported for data type: "
+            "long string column : " + field.getFieldName() + " is not supported for data type: "
                 + field.getDataType());
       }
     }
@@ -662,7 +663,7 @@ public class CarbonWriterBuilder {
       for (Field field : schema.getFields()) {
         if (null != field) {
           if (field.getDataType() == DataTypes.STRING ||
-              field.getDataType() == DataTypes.DATE  ||
+              field.getDataType() == DataTypes.DATE ||
               field.getDataType() == DataTypes.TIMESTAMP) {
             sortColumnsList.add(field.getFieldName());
           }
@@ -704,7 +705,7 @@ public class CarbonWriterBuilder {
     // differentiated to any level
     AtomicInteger valIndex = new AtomicInteger(0);
     // Check if any of the columns specified in sort columns are missing from schema.
-    for (String sortColumn: sortColumnsList) {
+    for (String sortColumn : sortColumnsList) {
       boolean exists = false;
       for (Field field : fields) {
         if (field.getFieldName().equalsIgnoreCase(sortColumn)) {
@@ -718,7 +719,7 @@ public class CarbonWriterBuilder {
       }
     }
     // Check if any of the columns specified in inverted index are missing from schema.
-    for (String invertedIdxColumn: invertedIdxColumnsList) {
+    for (String invertedIdxColumn : invertedIdxColumnsList) {
       boolean exists = false;
       for (Field field : fields) {
         if (field.getFieldName().equalsIgnoreCase(invertedIdxColumn)) {
@@ -744,10 +745,11 @@ public class CarbonWriterBuilder {
           // unsupported types for ("array", "struct", "double", "float", "decimal")
           if (field.getDataType() == DataTypes.DOUBLE || field.getDataType() == DataTypes.FLOAT
               || DataTypes.isDecimal(field.getDataType()) || field.getDataType().isComplexType()
-              || field.getDataType() == DataTypes.VARCHAR) {
+              || field.getDataType() == DataTypes.VARCHAR
+              || field.getDataType() == DataTypes.BINARY) {
             String errorMsg =
-                "sort columns not supported for array, struct, map, double, float, decimal,"
-                    + "varchar";
+                "sort columns not supported for array, struct, map, double, float, decimal, "
+                    + "varchar, binary";
             throw new RuntimeException(errorMsg);
           }
         }
@@ -814,7 +816,7 @@ public class CarbonWriterBuilder {
     if (schema == null) {
       return null;
     }
-    Field[] fields =  schema.getFields();
+    Field[] fields = schema.getFields();
     for (int i = 0; i < fields.length; i++) {
       if (fields[i] != null) {
         if (longStringColumns != null) {
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/Field.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/Field.java
index f7fceda..ab375f8 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/Field.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/Field.java
@@ -78,6 +78,8 @@ public class Field {
       this.type = DataTypes.FLOAT;
     } else if (type.equalsIgnoreCase("double")) {
       this.type = DataTypes.DOUBLE;
+    } else if (type.equalsIgnoreCase("binary")) {
+      this.type = DataTypes.BINARY;
     } else if (type.equalsIgnoreCase("array")) {
       this.type = DataTypes.createDefaultArrayType();
     } else if (type.equalsIgnoreCase("struct")) {
@@ -114,6 +116,8 @@ public class Field {
       this.type = DataTypes.FLOAT;
     } else if (type.equalsIgnoreCase("double")) {
       this.type = DataTypes.DOUBLE;
+    } else if (type.equalsIgnoreCase("binary")) {
+      this.type = DataTypes.BINARY;
     } else if (type.equalsIgnoreCase("array")) {
       this.type = DataTypes.createArrayType(fields.get(0).getDataType());
     } else if (type.equalsIgnoreCase("struct")) {
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/JsonCarbonWriter.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/JsonCarbonWriter.java
index 5f65539..d40e05d 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/JsonCarbonWriter.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/JsonCarbonWriter.java
@@ -41,7 +41,8 @@ import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
  * Writer Implementation to write Json Record to carbondata file.
  * json writer requires the path of json file and carbon schema.
  */
-@InterfaceAudience.User public class JsonCarbonWriter extends CarbonWriter {
+@InterfaceAudience.User
+public class JsonCarbonWriter extends CarbonWriter {
   private RecordWriter<NullWritable, ObjectArrayWritable> recordWriter;
   private TaskAttemptContext context;
   private ObjectArrayWritable writable;
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/RowUtil.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/RowUtil.java
index fdf3cfc..acbf525 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/RowUtil.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/RowUtil.java
@@ -148,4 +148,15 @@ public class RowUtil implements Serializable {
     return ((BigDecimal) data[ordinal]).toString();
   }
 
+  /**
+   * get binary data type data by ordinal
+   *
+   * @param data carbon row data
+   * @param ordinal the data index of Row
+   * @return byte data type data
+   */
+  public static byte[] getBinary(Object[] data, int ordinal) {
+    return (byte[]) data[ordinal];
+  }
+
 }
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/utils/SDKUtil.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/utils/SDKUtil.java
new file mode 100644
index 0000000..9fec185
--- /dev/null
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/utils/SDKUtil.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.sdk.file.utils;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.carbondata.core.datastore.filesystem.CarbonFile;
+import org.apache.carbondata.core.datastore.impl.FileFactory;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class SDKUtil {
+  public static ArrayList listFiles(String sourceImageFolder, final String suf) throws Exception {
+    return listFiles(sourceImageFolder, suf, new Configuration(true));
+  }
+
+  public static ArrayList listFiles(String sourceImageFolder,
+                                    final String suf, Configuration conf) throws Exception {
+    final String sufImageFinal = suf;
+    ArrayList result = new ArrayList();
+    CarbonFile[] fileList = FileFactory.getCarbonFile(sourceImageFolder).listFiles();
+    for (int i = 0; i < fileList.length; i++) {
+      if (fileList[i].isDirectory()) {
+        result.addAll(listFiles(fileList[i].getCanonicalPath(), sufImageFinal, conf));
+      } else if (fileList[i].getCanonicalPath().endsWith(sufImageFinal)) {
+        result.add(fileList[i].getCanonicalPath());
+      }
+    }
+    return result;
+  }
+
+
+  public static Object[] getSplitList(String path, String suf,
+                                      int numOfSplit, Configuration conf) throws Exception {
+    List fileList = listFiles(path, suf, conf);
+    List splitList = new ArrayList<List>();
+    if (numOfSplit < fileList.size()) {
+      // If maxSplits is less than the no. of files
+      // Split the reader into maxSplits splits with each
+      // element containing >= 1 CarbonRecordReader objects
+      float filesPerSplit = (float) fileList.size() / numOfSplit;
+      for (int i = 0; i < numOfSplit; ++i) {
+        splitList.add(fileList.subList(
+            (int) Math.ceil(i * filesPerSplit),
+            (int) Math.ceil(((i + 1) * filesPerSplit))));
+      }
+    } else {
+      // If maxSplits is greater than the no. of files
+      // Split the reader into <num_files> splits with each
+      // element contains exactly 1 CarbonRecordReader object
+      for (int i = 0; i < fileList.size(); ++i) {
+        splitList.add((fileList.subList(i, i + 1)));
+      }
+    }
+    return splitList.toArray();
+  }
+
+  public static Object[] getSplitList(String path, String suf,
+                                      int numOfSplit) throws Exception {
+    return getSplitList(path, suf, numOfSplit, new Configuration());
+  }
+
+}
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
index 156ca5f..27b4e3a 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CSVCarbonWriterTest.java
@@ -139,7 +139,6 @@ public class CSVCarbonWriterTest {
 
   @Test
   public void testAllPrimitiveDataType() throws IOException {
-    // TODO: write all data type and read by CarbonRecordReader to verify the content
     String path = "./testWriteFiles";
     FileUtils.deleteDirectory(new File(path));
 
@@ -159,15 +158,16 @@ public class CSVCarbonWriterTest {
       CarbonWriter writer = builder.withCsvInput(new Schema(fields)).writtenBy("CSVCarbonWriterTest").build();
 
       for (int i = 0; i < 100; i++) {
-        String[] row = new String[]{
+        Object[] row = new Object[]{
             "robot" + (i % 10),
-            String.valueOf(i),
-            String.valueOf(i),
-            String.valueOf(Long.MAX_VALUE - i),
-            String.valueOf((double) i / 2),
-            String.valueOf(true),
+            i,
+            i,
+            (Long.MAX_VALUE - i),
+            ((double) i / 2),
+            true,
             "2019-03-02",
-            "2019-02-12 03:03:34"
+            "2019-02-12 03:03:34",
+            "1.234567"
         };
         writer.write(row);
       }
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
index f09581a..6a3578c 100644
--- a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/CarbonReaderTest.java
@@ -23,8 +23,9 @@ import java.sql.Timestamp;
 import java.util.*;
 
 import org.apache.avro.generic.GenericData;
-import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
 import org.apache.log4j.Logger;
+
+import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.datamap.DataMapStoreManager;
@@ -553,27 +554,27 @@ public class CarbonReaderTest extends TestCase {
     CarbonWriter carbonWriter = null;
     try {
       carbonWriter = builder.outputPath(path1).uniqueIdentifier(12345)
-  .withCsvInput(schema).writtenBy("CarbonReaderTest").build();
+          .withCsvInput(schema).writtenBy("CarbonReaderTest").build();
     } catch (InvalidLoadOptionException e) {
       e.printStackTrace();
       Assert.fail(e.getMessage());
     }
-    carbonWriter.write(new String[] { "MNO", "100" });
+    carbonWriter.write(new String[]{"MNO", "100"});
     carbonWriter.close();
 
-    Field[] fields1 = new Field[] { new Field("p1", "string"),
-         new Field("p2", "int") };
+    Field[] fields1 = new Field[]{new Field("p1", "string"),
+        new Field("p2", "int")};
     Schema schema1 = new Schema(fields1);
     CarbonWriterBuilder builder1 = CarbonWriter.builder();
     CarbonWriter carbonWriter1 = null;
     try {
       carbonWriter1 = builder1.outputPath(path2).uniqueIdentifier(12345)
-   .withCsvInput(schema1).writtenBy("CarbonReaderTest").build();
+          .withCsvInput(schema1).writtenBy("CarbonReaderTest").build();
     } catch (InvalidLoadOptionException e) {
       e.printStackTrace();
       Assert.fail(e.getMessage());
     }
-    carbonWriter1.write(new String[] { "PQR", "200" });
+    carbonWriter1.write(new String[]{"PQR", "200"});
     carbonWriter1.close();
 
     try {
@@ -592,9 +593,9 @@ public class CarbonReaderTest extends TestCase {
             .build();
 
     while (reader1.hasNext()) {
-       Object[] row1 = (Object[]) reader1.readNextRow();
-       System.out.println(row1[0]);
-       System.out.println(row1[1]);
+      Object[] row1 = (Object[]) reader1.readNextRow();
+      System.out.println(row1[0]);
+      System.out.println(row1[1]);
     }
     reader1.close();
 
@@ -785,7 +786,8 @@ public class CarbonReaderTest extends TestCase {
     TestUtil.writeFilesAndVerify(100, new Schema(fields), path);
 
     File[] dataFiles = new File(path).listFiles(new FilenameFilter() {
-      @Override public boolean accept(File dir, String name) {
+      @Override
+      public boolean accept(File dir, String name) {
         return name.endsWith("carbondata");
       }
     });
@@ -1020,7 +1022,8 @@ public class CarbonReaderTest extends TestCase {
     }
 
     File[] dataFiles2 = new File(path).listFiles(new FilenameFilter() {
-      @Override public boolean accept(File dir, String name) {
+      @Override
+      public boolean accept(File dir, String name) {
         return name.endsWith("carbondata");
       }
     });
@@ -1131,7 +1134,8 @@ public class CarbonReaderTest extends TestCase {
     }
 
     File[] dataFiles1 = new File(path).listFiles(new FilenameFilter() {
-      @Override public boolean accept(File dir, String name) {
+      @Override
+      public boolean accept(File dir, String name) {
         return name.endsWith("carbondata");
       }
     });
@@ -1139,7 +1143,8 @@ public class CarbonReaderTest extends TestCase {
     assertTrue(versionDetails.contains("SDK_1.0.0 in version: "));
 
     File[] dataFiles2 = new File(path).listFiles(new FilenameFilter() {
-      @Override public boolean accept(File dir, String name) {
+      @Override
+      public boolean accept(File dir, String name) {
         return name.endsWith("carbonindex");
       }
     });
@@ -1357,26 +1362,26 @@ public class CarbonReaderTest extends TestCase {
     FileUtils.deleteDirectory(new File(path));
 
     String mySchema =
-        "{ "+
-            "  \"name\": \"address\", "+
-            "  \"type\": \"record\", "+
-            "  \"fields\": [ "+
-            "    { "+
-            "      \"name\": \"name\", "+
-            "      \"type\": \"string\" "+
-            "    }, "+
-            "    { "+
-            "      \"name\": \"age\", "+
-            "      \"type\": \"int\" "+
-            "    }, "+
-            "    { "+
-            "      \"name\": \"mapRecord\", "+
-            "      \"type\": { "+
-            "        \"type\": \"map\", "+
-            "        \"values\": \"string\" "+
-            "      } "+
-            "    } "+
-            "  ] "+
+        "{ " +
+            "  \"name\": \"address\", " +
+            "  \"type\": \"record\", " +
+            "  \"fields\": [ " +
+            "    { " +
+            "      \"name\": \"name\", " +
+            "      \"type\": \"string\" " +
+            "    }, " +
+            "    { " +
+            "      \"name\": \"age\", " +
+            "      \"type\": \"int\" " +
+            "    }, " +
+            "    { " +
+            "      \"name\": \"mapRecord\", " +
+            "      \"type\": { " +
+            "        \"type\": \"map\", " +
+            "        \"values\": \"string\" " +
+            "      } " +
+            "    } " +
+            "  ] " +
             "} ";
 
     String json =
@@ -1400,8 +1405,8 @@ public class CarbonReaderTest extends TestCase {
     String name = "bob";
     int age = 10;
     Object[] mapKeValue = new Object[2];
-    mapKeValue[0] = new Object[] { "city", "street" };
-    mapKeValue[1] = new Object[] { "bangalore", "k-lane" };
+    mapKeValue[0] = new Object[]{"city", "street"};
+    mapKeValue[1] = new Object[]{"bangalore", "k-lane"};
     int i = 0;
     while (reader.hasNext()) {
       Object[] row = (Object[]) reader.readNextRow();
@@ -1416,9 +1421,9 @@ public class CarbonReaderTest extends TestCase {
 
   @Test
   public void testReadWithFilterOfnonTransactionalwithsubfolders() throws IOException, InterruptedException {
-    String path1 = "./testWriteFiles/1/"+System.nanoTime();
-    String path2 = "./testWriteFiles/2/"+System.nanoTime();
-    String path3 = "./testWriteFiles/3/"+System.nanoTime();
+    String path1 = "./testWriteFiles/1/" + System.nanoTime();
+    String path2 = "./testWriteFiles/2/" + System.nanoTime();
+    String path3 = "./testWriteFiles/3/" + System.nanoTime();
     FileUtils.deleteDirectory(new File("./testWriteFiles"));
 
     Field[] fields = new Field[2];
@@ -1543,7 +1548,7 @@ public class CarbonReaderTest extends TestCase {
     }
   }
 
-   @Test
+  @Test
   public void testReadNextRowWithRowUtil() {
     String path = "./carbondata";
     try {
@@ -1661,7 +1666,7 @@ public class CarbonReaderTest extends TestCase {
         Assert.fail(e.getMessage());
       }
     }
-   }
+  }
 
   @Test
   public void testReadNextRowWithProjectionAndRowUtil() {
@@ -1737,7 +1742,7 @@ public class CarbonReaderTest extends TestCase {
         assertEquals(RowUtil.getFloat(data, 11), (float) 1.23);
         i++;
       }
-      assert  (i == 10);
+      assert (i == 10);
       reader.close();
     } catch (Throwable e) {
       e.printStackTrace();
@@ -1821,7 +1826,7 @@ public class CarbonReaderTest extends TestCase {
         assertEquals(RowUtil.getFloat(data, 11), new Float("1.23"));
         i++;
       }
-      assert(i==10);
+      assert (i == 10);
       reader.close();
     } catch (Throwable e) {
       e.printStackTrace();
@@ -2076,7 +2081,7 @@ public class CarbonReaderTest extends TestCase {
         .addProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION, "FAIL");
 
     String path = "./testSdkWriteWhenArrayOfStringIsEmpty";
-    String[] rec = { "aaa", "bbb", "aaa@cdf.com", "", "", "mmm", "" };
+    String[] rec = {"aaa", "bbb", "aaa@cdf.com", "", "", "mmm", ""};
     Field[] fields = new Field[7];
     fields[0] = new Field("stringField", DataTypes.STRING);
     fields[1] = new Field("varcharField", DataTypes.VARCHAR);
@@ -2320,4 +2325,97 @@ public class CarbonReaderTest extends TestCase {
       FileUtils.deleteDirectory(new File(path));
     }
   }
+
+  @Test
+  public void testWriteWithDifferentDataType() {
+    String path = "./carbondata";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+
+      Field[] fields = new Field[13];
+      fields[0] = new Field("stringField", DataTypes.STRING);
+      fields[1] = new Field("shortField", DataTypes.SHORT);
+      fields[2] = new Field("intField", DataTypes.INT);
+      fields[3] = new Field("longField", DataTypes.LONG);
+      fields[4] = new Field("doubleField", DataTypes.DOUBLE);
+      fields[5] = new Field("boolField", DataTypes.BOOLEAN);
+      fields[6] = new Field("dateField", DataTypes.DATE);
+      fields[7] = new Field("timeField", DataTypes.TIMESTAMP);
+      fields[8] = new Field("decimalField", DataTypes.createDecimalType(8, 2));
+      fields[9] = new Field("varcharField", DataTypes.VARCHAR);
+      fields[10] = new Field("arrayField", DataTypes.createArrayType(DataTypes.STRING));
+      fields[11] = new Field("floatField", DataTypes.FLOAT);
+      fields[12] = new Field("binaryField", DataTypes.BINARY);
+      Map<String, String> map = new HashMap<>();
+      map.put("complex_delimiter_level_1", "#");
+      CarbonWriter writer = CarbonWriter.builder()
+          .outputPath(path)
+          .withLoadOptions(map)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("CarbonReaderTest")
+          .build();
+      byte[] value = "Binary".getBytes();
+      for (int i = 0; i < 10; i++) {
+        Object[] row2 = new Object[]{
+            "robot" + (i % 10),
+            i % 10000,
+            i,
+            (Long.MAX_VALUE - i),
+            ((double) i / 2),
+            (true),
+            "2019-03-02",
+            "2019-02-12 03:03:34",
+            12.345,
+            "varchar",
+            "Hello#World#From#Carbon",
+            1.23,
+            value
+        };
+        writer.write(row2);
+      }
+      writer.close();
+
+      // Read data
+      CarbonReader reader = CarbonReader
+          .builder(path, "_temp")
+          .withRowRecordReader()
+          .build();
+
+      int i = 0;
+      while (reader.hasNext()) {
+        Object[] data = (Object[]) reader.readNextRow();
+
+        assert (RowUtil.getString(data, 0).equals("robot" + i));
+        assertEquals(RowUtil.getInt(data, 1), 17957);
+        Assert.assertEquals(new String(value), new String(RowUtil.getBinary(data, 3)));
+        assert (RowUtil.getVarchar(data, 4).equals("varchar"));
+        Object[] arr = RowUtil.getArray(data, 5);
+        assert (arr[0].equals("Hello"));
+        assert (arr[1].equals("World"));
+        assert (arr[2].equals("From"));
+        assert (arr[3].equals("Carbon"));
+        assertEquals(RowUtil.getShort(data, 6), i);
+        assertEquals(RowUtil.getInt(data, 7), i);
+        assertEquals(RowUtil.getLong(data, 8), Long.MAX_VALUE - i);
+        assertEquals(RowUtil.getDouble(data, 9), ((double) i) / 2);
+        assert (RowUtil.getBoolean(data, 10));
+        assert (RowUtil.getDecimal(data, 11).equals("12.35"));
+        assertEquals(RowUtil.getFloat(data, 12), (float) 1.23);
+
+        i++;
+      }
+      assert (i == 10);
+      reader.close();
+    } catch (Throwable e) {
+      e.printStackTrace();
+      Assert.fail(e.getMessage());
+    } finally {
+      try {
+        FileUtils.deleteDirectory(new File(path));
+      } catch (IOException e) {
+        e.printStackTrace();
+        Assert.fail(e.getMessage());
+      }
+    }
+  }
 }
diff --git a/store/sdk/src/test/java/org/apache/carbondata/sdk/file/ImageTest.java b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/ImageTest.java
new file mode 100644
index 0000000..e69a981
--- /dev/null
+++ b/store/sdk/src/test/java/org/apache/carbondata/sdk/file/ImageTest.java
@@ -0,0 +1,818 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.sdk.file;
+
+import junit.framework.TestCase;
+
+import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
+import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.core.scan.expression.ColumnExpression;
+import org.apache.carbondata.core.scan.expression.LiteralExpression;
+import org.apache.carbondata.core.scan.expression.conditional.EqualToExpression;
+import org.apache.carbondata.core.util.CarbonProperties;
+import org.apache.carbondata.util.BinaryUtil;
+
+import org.apache.commons.codec.DecoderException;
+import org.apache.commons.codec.binary.Hex;
+import org.apache.commons.io.FileUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.imageio.ImageIO;
+import javax.imageio.ImageReadParam;
+import javax.imageio.ImageReader;
+import javax.imageio.ImageTypeSpecifier;
+import javax.imageio.stream.FileImageInputStream;
+import javax.imageio.stream.ImageInputStream;
+import java.awt.color.ColorSpace;
+import java.awt.image.BufferedImage;
+import java.io.*;
+import java.util.Iterator;
+import java.util.List;
+
+import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
+
+public class ImageTest extends TestCase {
+
+  @Test
+  public void testWriteWithByteArrayDataType() throws IOException, InvalidLoadOptionException, InterruptedException {
+    String imagePath = "./src/test/resources/image/carbondatalogo.jpg";
+    int num = 1;
+    int rows = 10;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[5];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image1", DataTypes.BINARY);
+    fields[3] = new Field("image2", DataTypes.BINARY);
+    fields[4] = new Field("image3", DataTypes.BINARY);
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = CarbonWriter
+          .builder()
+          .outputPath(path)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("SDKS3Example")
+          .withPageSizeInMb(1)
+          .build();
+
+      for (int i = 0; i < rows; i++) {
+        // read image and encode to Hex
+        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(imagePath));
+        originBinary = new byte[bis.available()];
+        while ((bis.read(originBinary)) != -1) {
+        }
+        // write data
+        writer.write(new Object[]{"robot" + (i % 10), i, originBinary, originBinary, originBinary});
+        bis.close();
+      }
+      writer.close();
+    }
+
+    CarbonReader reader = CarbonReader
+        .builder(path, "_temp")
+        .build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = (byte[]) row[1];
+      byte[] outputBinary2 = (byte[]) row[2];
+      byte[] outputBinary3 = (byte[]) row[3];
+      System.out.println(row[0] + " " + row[1] + " image1 size:" + outputBinary.length
+          + " image2 size:" + outputBinary2.length + " image3 size:" + outputBinary3.length);
+
+      for (int k = 0; k < 3; k++) {
+
+        byte[] originBinaryTemp = (byte[]) row[1 + k];
+        // validate output binary data and origin binary data
+        assert (originBinaryTemp.length == outputBinary.length);
+        for (int j = 0; j < originBinaryTemp.length; j++) {
+          assert (originBinaryTemp[j] == outputBinary[j]);
+        }
+
+        // save image, user can compare the save image and original image
+        String destString = "./target/binary/image" + k + "_" + i + ".jpg";
+        BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+        bos.write(originBinaryTemp);
+        bos.close();
+      }
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+  }
+
+  @Test
+  public void testWriteBinaryWithSort() {
+    int num = 1;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[5];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image1", DataTypes.BINARY);
+    fields[3] = new Field("image2", DataTypes.BINARY);
+    fields[4] = new Field("image3", DataTypes.BINARY);
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      try {
+        CarbonWriter
+            .builder()
+            .outputPath(path)
+            .withCsvInput(new Schema(fields))
+            .writtenBy("SDKS3Example")
+            .withPageSizeInMb(1)
+            .withTableProperty("sort_columns", "image1")
+            .build();
+        assert (false);
+      } catch (Exception e) {
+        assert (e.getMessage().contains("sort columns not supported for array, struct, map, double, float, decimal, varchar, binary"));
+      }
+    }
+  }
+
+  @Test
+  public void testWriteBinaryWithLong_string_columns() {
+    int num = 1;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[5];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image1", DataTypes.BINARY);
+    fields[3] = new Field("image2", DataTypes.BINARY);
+    fields[4] = new Field("image3", DataTypes.BINARY);
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      try {
+        CarbonWriter
+            .builder()
+            .outputPath(path)
+            .withCsvInput(new Schema(fields))
+            .writtenBy("SDKS3Example")
+            .withPageSizeInMb(1)
+            .withTableProperty("long_string_columns", "image1")
+            .build();
+        assert (false);
+      } catch (Exception e) {
+        assert (e.getMessage().contains("long string column : image1 is not supported for data type: BINARY"));
+      }
+    }
+  }
+
+  @Test
+  public void testWriteBinaryWithInverted_index() {
+    int num = 1;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[5];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image1", DataTypes.BINARY);
+    fields[3] = new Field("image2", DataTypes.BINARY);
+    fields[4] = new Field("image3", DataTypes.BINARY);
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      try {
+        CarbonWriter
+            .builder()
+            .outputPath(path)
+            .withCsvInput(new Schema(fields))
+            .writtenBy("SDKS3Example")
+            .withPageSizeInMb(1)
+            .withTableProperty("inverted_index", "image1")
+            .build();
+        // TODO: should throw exception
+        //        assert(false);
+      } catch (Exception e) {
+        System.out.println(e.getMessage());
+        assert (e.getMessage().contains("INVERTED_INDEX column: image1 should be present in SORT_COLUMNS"));
+      }
+    }
+  }
+
+  @Test
+  public void testWriteWithNull() throws IOException, InvalidLoadOptionException {
+    String imagePath = "./src/test/resources/image/carbondatalogo.jpg";
+    int num = 1;
+    int rows = 10;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[5];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image1", DataTypes.BINARY);
+    fields[3] = new Field("image2", DataTypes.BINARY);
+    fields[4] = new Field("image3", DataTypes.BINARY);
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = CarbonWriter
+          .builder()
+          .outputPath(path)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("SDKS3Example")
+          .withPageSizeInMb(1)
+          .withLoadOption("bad_records_action", "force")
+          .build();
+
+      for (int i = 0; i < rows; i++) {
+        // read image and encode to Hex
+        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(imagePath));
+        originBinary = new byte[bis.available()];
+        while ((bis.read(originBinary)) != -1) {
+        }
+        // write data
+        writer.write(new Object[]{"robot" + (i % 10), i, originBinary, originBinary, 1});
+        bis.close();
+      }
+      try {
+        writer.close();
+      } catch (Exception e) {
+        assert (e.getMessage().contains("Binary only support String and byte[] data type"));
+      }
+    }
+
+  }
+
+  @Test
+  public void testBinaryWithOrWithoutFilter() throws IOException, InvalidLoadOptionException, InterruptedException, DecoderException {
+    String imagePath = "./src/test/resources/image/carbondatalogo.jpg";
+    int num = 1;
+    int rows = 1;
+    String path = "./target/binary";
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    Field[] fields = new Field[3];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("image", DataTypes.BINARY);
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = CarbonWriter
+          .builder()
+          .outputPath(path)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("SDKS3Example")
+          .withPageSizeInMb(1)
+          .build();
+
+      for (int i = 0; i < rows; i++) {
+        // read image and encode to Hex
+        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(imagePath));
+        char[] hexValue = null;
+        originBinary = new byte[bis.available()];
+        while ((bis.read(originBinary)) != -1) {
+          hexValue = Hex.encodeHex(originBinary);
+        }
+        // write data
+        writer.write(new String[]{"robot" + (i % 10), String.valueOf(i), String.valueOf(hexValue)});
+        bis.close();
+      }
+      writer.close();
+    }
+
+    // Read data with filter
+    EqualToExpression equalToExpression = new EqualToExpression(
+        new ColumnExpression("name", DataTypes.STRING),
+        new LiteralExpression("robot0", DataTypes.STRING));
+
+    CarbonReader reader = CarbonReader
+        .builder(path, "_temp")
+        .filter(equalToExpression)
+        .build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = Hex.decodeHex(new String((byte[]) row[1]).toCharArray());
+      System.out.println(row[0] + " " + row[2] + " image size:" + outputBinary.length);
+
+      // validate output binary data and origin binary data
+      assert (originBinary.length == outputBinary.length);
+      for (int j = 0; j < originBinary.length; j++) {
+        assert (originBinary[j] == outputBinary[j]);
+      }
+      String value = new String(outputBinary);
+      Assert.assertTrue(value.startsWith("�PNG"));
+      // save image, user can compare the save image and original image
+      String destString = "./target/binary/image" + i + ".jpg";
+      BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+      bos.write(outputBinary);
+      bos.close();
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+
+    CarbonReader reader2 = CarbonReader
+        .builder(path, "_temp")
+        .build();
+
+    System.out.println("\nData:");
+    i = 0;
+    while (i < 20 && reader2.hasNext()) {
+      Object[] row = (Object[]) reader2.readNextRow();
+
+      byte[] outputBinary = Hex.decodeHex(new String((byte[]) row[1]).toCharArray());
+      System.out.println(row[0] + " " + row[2] + " image size:" + outputBinary.length);
+
+      // validate output binary data and origin binary data
+      assert (originBinary.length == outputBinary.length);
+      for (int j = 0; j < originBinary.length; j++) {
+        assert (originBinary[j] == outputBinary[j]);
+      }
+
+      // save image, user can compare the save image and original image
+      String destString = "./target/binary/image" + i + ".jpg";
+      BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+      bos.write(outputBinary);
+      bos.close();
+      i++;
+    }
+    reader2.close();
+    try {
+      FileUtils.deleteDirectory(new File(path));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    System.out.println("\nFinished");
+  }
+
+  @Test
+  public void testBinaryWithManyImages() throws IOException, InvalidLoadOptionException, InterruptedException {
+    int num = 1;
+    String path = "./target/flowers";
+    Field[] fields = new Field[5];
+    fields[0] = new Field("binaryId", DataTypes.INT);
+    fields[1] = new Field("binaryName", DataTypes.STRING);
+    fields[2] = new Field("binary", "Binary");
+    fields[3] = new Field("labelName", DataTypes.STRING);
+    fields[4] = new Field("labelContent", DataTypes.STRING);
+
+    String imageFolder = "./src/test/resources/image/flowers";
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = CarbonWriter
+          .builder()
+          .outputPath(path)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("SDKS3Example")
+          .withPageSizeInMb(1)
+          .build();
+      File file = new File(imageFolder);
+      File[] files = file.listFiles(new FilenameFilter() {
+        @Override
+        public boolean accept(File dir, String name) {
+          if (name == null) {
+            return false;
+          }
+          return name.endsWith(".jpg");
+        }
+      });
+
+      if (null != files) {
+        for (int i = 0; i < files.length; i++) {
+          // read image and encode to Hex
+          BufferedInputStream bis = new BufferedInputStream(new FileInputStream(files[i]));
+          char[] hexValue = null;
+          originBinary = new byte[bis.available()];
+          while ((bis.read(originBinary)) != -1) {
+            hexValue = Hex.encodeHex(originBinary);
+          }
+
+          String txtFileName = files[i].getCanonicalPath().split(".jpg")[0] + ".txt";
+          BufferedInputStream txtBis = new BufferedInputStream(new FileInputStream(txtFileName));
+          String txtValue = null;
+          byte[] txtBinary = null;
+          txtBinary = new byte[txtBis.available()];
+          while ((txtBis.read(txtBinary)) != -1) {
+            txtValue = new String(txtBinary, "UTF-8");
+          }
+          // write data
+          System.out.println(files[i].getCanonicalPath());
+          writer.write(new String[]{String.valueOf(i), files[i].getCanonicalPath(), String.valueOf(hexValue),
+              txtFileName, txtValue});
+          bis.close();
+        }
+      }
+      writer.close();
+    }
+
+    CarbonReader reader = CarbonReader
+        .builder(path)
+        .build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = (byte[]) row[1];
+      System.out.println(row[0] + " " + row[2] + " image size:" + outputBinary.length);
+
+      // save image, user can compare the save image and original image
+      String destString = "./target/flowers/image" + i + ".jpg";
+      BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+      bos.write(outputBinary);
+      bos.close();
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+  }
+
+  public void testWriteTwoImageColumn() throws Exception {
+    String imagePath = "./src/test/resources/image/vocForSegmentationClass";
+    String path = "./target/vocForSegmentationClass";
+    int num = 1;
+    Field[] fields = new Field[4];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("rawImage", DataTypes.BINARY);
+    fields[3] = new Field("segmentationClass", DataTypes.BINARY);
+
+    byte[] originBinary = null;
+    byte[] originBinary2 = null;
+
+    Object[] files = listFiles(imagePath, ".jpg").toArray();
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+      CarbonWriter writer = CarbonWriter
+          .builder()
+          .outputPath(path)
+          .withCsvInput(new Schema(fields))
+          .writtenBy("SDKS3Example")
+          .withPageSizeInMb(1)
+          .build();
+
+      for (int i = 0; i < files.length; i++) {
+        // read image and encode to Hex
+        String filePath = (String) files[i];
+        BufferedInputStream bis = new BufferedInputStream(new FileInputStream(filePath));
+        originBinary = new byte[bis.available()];
+        while ((bis.read(originBinary)) != -1) {
+        }
+
+        BufferedInputStream bis2 = new BufferedInputStream(new FileInputStream(filePath.replace(".jpg", ".png")));
+        originBinary2 = new byte[bis2.available()];
+        while ((bis2.read(originBinary2)) != -1) {
+        }
+
+        // write data
+        writer.write(new Object[]{"robot" + (i % 10), i, originBinary, originBinary2});
+        bis.close();
+        bis2.close();
+      }
+      writer.close();
+    }
+
+    CarbonReader reader = CarbonReader
+        .builder(path, "_temp")
+        .build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = (byte[]) row[1];
+      byte[] outputBinary2 = (byte[]) row[2];
+      System.out.println(row[0] + " " + row[3] + " image1 size:" + outputBinary.length
+          + " image2 size:" + outputBinary2.length);
+
+      for (int k = 0; k < 2; k++) {
+
+        byte[] originBinaryTemp = (byte[]) row[1 + k];
+
+        // save image, user can compare the save image and original image
+        String destString = null;
+        if (k == 0) {
+          destString = "./target/vocForSegmentationClass/image" + k + "_" + i + ".jpg";
+        } else {
+          destString = "./target/vocForSegmentationClass/image" + k + "_" + i + ".png";
+        }
+        BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+        bos.write(originBinaryTemp);
+        bos.close();
+      }
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+  }
+
+  @Test
+  public void testWriteWithByteArrayDataTypeAndManyImagesTxt()
+      throws Exception {
+    long startWrite = System.nanoTime();
+    String sourceImageFolder = "./src/test/resources/image/flowers";
+    String outputPath = "./target/flowers";
+    String preDestPath = "./target/flowers/image";
+    String sufAnnotation = ".txt";
+    BinaryUtil.binaryToCarbon(sourceImageFolder, outputPath, sufAnnotation, ".jpg");
+    BinaryUtil.carbonToBinary(outputPath, preDestPath);
+    long endWrite = System.nanoTime();
+    System.out.println("write time is " + (endWrite - startWrite) / 1000000000.0 + "s");
+  }
+
+  @Test
+  public void testWriteWithByteArrayDataTypeAndManyImagesXml()
+      throws Exception {
+    long startWrite = System.nanoTime();
+    String sourceImageFolder = "./src/test/resources/image/voc";
+
+    String outputPath = "./target/voc";
+    String preDestPath = "./target/voc/image";
+    String sufAnnotation = ".xml";
+    BinaryUtil.binaryToCarbon(sourceImageFolder, outputPath, sufAnnotation, ".jpg");
+    BinaryUtil.carbonToBinary(outputPath, preDestPath);
+    long endWrite = System.nanoTime();
+    System.out.println("write time is " + (endWrite - startWrite) / 1000000000.0 + "s");
+    ReadPerformance();
+  }
+
+  public void ReadPerformance() throws Exception {
+    CarbonProperties.getInstance()
+        .addProperty(CarbonCommonConstants.UNSAFE_WORKING_MEMORY_IN_MB, "2048");
+
+    long start = System.nanoTime();
+    int i = 0;
+    String path = "./target/voc";
+    CarbonReader reader2 = CarbonReader
+        .builder(path)
+        .withBatch(1000)
+        .build();
+
+    System.out.println("\nData2:");
+    i = 0;
+    while (reader2.hasNext()) {
+      Object[] rows = reader2.readNextBatchRow();
+
+      for (int j = 0; j < rows.length; j++) {
+        Object[] row = (Object[]) rows[j];
+        i++;
+        if (0 == i % 1000) {
+          System.out.println(i);
+        }
+        for (int k = 0; k < row.length; k++) {
+          Object column = row[k];
+        }
+      }
+    }
+
+    System.out.println(i);
+    reader2.close();
+    long end = System.nanoTime();
+    System.out.println("all time is " + (end - start) / 1000000000.0);
+    System.out.println("\nFinished");
+  }
+
+  @Test
+  public void testWriteWithByteArrayDataTypeAndManyImagesTxt3()
+      throws Exception {
+    String sourceImageFolder = "./src/test/resources/image/flowers";
+    String outputPath = "./target/flowers2";
+    String preDestPath = "./target/flowers2/image";
+    String sufAnnotation = ".txt";
+    try {
+      FileUtils.deleteDirectory(new File(outputPath));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+    binaryToCarbonWithHWD(sourceImageFolder, outputPath, preDestPath, sufAnnotation, ".jpg", 2000);
+    try {
+      FileUtils.deleteDirectory(new File(outputPath));
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+
+  @Test
+  public void testNumberOfFiles() throws Exception {
+    String sourceImageFolder = "./src/test/resources/image/flowers";
+    List result = listFiles(sourceImageFolder, ".jpg");
+    assertEquals(3, result.size());
+  }
+
+  public void binaryToCarbonWithHWD(String sourceImageFolder, String outputPath, String preDestPath,
+                                    String sufAnnotation, final String sufImage, int numToWrite)
+      throws Exception {
+    int num = 1;
+    Field[] fields = new Field[7];
+    fields[0] = new Field("height", DataTypes.INT);
+    fields[1] = new Field("width", DataTypes.INT);
+    fields[2] = new Field("depth", DataTypes.INT);
+    fields[3] = new Field("binaryName", DataTypes.STRING);
+    fields[4] = new Field("binary", DataTypes.BINARY);
+    fields[5] = new Field("labelName", DataTypes.STRING);
+    fields[6] = new Field("labelContent", DataTypes.STRING);
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+
+      Object[] files = listFiles(sourceImageFolder, sufImage).toArray();
+
+      int index = 0;
+
+      if (null != files) {
+        CarbonWriter writer = CarbonWriter
+            .builder()
+            .outputPath(outputPath)
+            .withCsvInput(new Schema(fields))
+            .withBlockSize(256)
+            .writtenBy("SDKS3Example")
+            .withPageSizeInMb(1)
+            .build();
+
+        for (int i = 0; i < files.length; i++) {
+          if (0 == index % numToWrite) {
+            writer.close();
+            writer = CarbonWriter
+                .builder()
+                .outputPath(outputPath)
+                .withCsvInput(new Schema(fields))
+                .withBlockSize(256)
+                .writtenBy("SDKS3Example")
+                .withPageSizeInMb(1)
+                .build();
+          }
+          index++;
+
+          // read image and encode to Hex
+          File file = new File((String) files[i]);
+          System.out.println(file.getCanonicalPath());
+          BufferedInputStream bis = new BufferedInputStream(new FileInputStream(file));
+          int depth = 0;
+          boolean isGray;
+          boolean hasAlpha;
+          BufferedImage bufferedImage = null;
+          try {
+            bufferedImage = ImageIO.read(file);
+            isGray = bufferedImage.getColorModel().getColorSpace().getType() == ColorSpace.TYPE_GRAY;
+            hasAlpha = bufferedImage.getColorModel().hasAlpha();
+
+            if (isGray) {
+              depth = 1;
+            } else if (hasAlpha) {
+              depth = 4;
+            } else {
+              depth = 3;
+            }
+
+          } catch (Exception e) {
+            e.printStackTrace();
+            System.out.println(i);
+            ImageInputStream stream = new FileImageInputStream(new File(file.getCanonicalPath()));
+            Iterator<ImageReader> iter = ImageIO.getImageReaders(stream);
+
+            Exception lastException = null;
+            while (iter.hasNext()) {
+              ImageReader reader = null;
+              try {
+                reader = (ImageReader) iter.next();
+                ImageReadParam param = reader.getDefaultReadParam();
+                reader.setInput(stream, true, true);
+                Iterator<ImageTypeSpecifier> imageTypes = reader.getImageTypes(0);
+
+                while (imageTypes.hasNext()) {
+                  ImageTypeSpecifier imageTypeSpecifier = imageTypes.next();
+                  System.out.println(imageTypeSpecifier.getColorModel().getColorSpace().getType());
+                  int bufferedImageType = imageTypeSpecifier.getBufferedImageType();
+                  if (bufferedImageType == BufferedImage.TYPE_BYTE_GRAY) {
+                    param.setDestinationType(imageTypeSpecifier);
+                    break;
+                  }
+                }
+                bufferedImage = reader.read(0, param);
+                isGray = bufferedImage.getColorModel().getColorSpace().getType() == ColorSpace.TYPE_GRAY;
+                hasAlpha = bufferedImage.getColorModel().hasAlpha();
+
+                if (isGray) {
+                  depth = 1;
+                } else if (hasAlpha) {
+                  depth = 4;
+                } else {
+                  depth = 3;
+                }
+                if (null != bufferedImage) break;
+              } catch (Exception e2) {
+                lastException = e2;
+              } finally {
+                if (null != reader) reader.dispose();
+              }
+            }
+            // If you don't have an image at the end of all readers
+            if (null == bufferedImage) {
+              if (null != lastException) {
+                throw lastException;
+              }
+            }
+          } finally {
+            originBinary = new byte[bis.available()];
+            while ((bis.read(originBinary)) != -1) {
+            }
+
+            String txtFileName = file.getCanonicalPath().split(sufImage)[0] + sufAnnotation;
+            BufferedInputStream txtBis = new BufferedInputStream(new FileInputStream(txtFileName));
+            String txtValue = null;
+            byte[] txtBinary = null;
+            txtBinary = new byte[txtBis.available()];
+            while ((txtBis.read(txtBinary)) != -1) {
+              txtValue = new String(txtBinary, "UTF-8");
+            }
+            // write data
+            writer.write(new Object[]{bufferedImage.getHeight(), bufferedImage.getWidth(), depth, file.getCanonicalPath(), originBinary,
+                txtFileName, txtValue.replace("\n", "")});
+            bis.close();
+          }
+        }
+        writer.close();
+      }
+    }
+
+    CarbonReader reader = CarbonReader
+        .builder(outputPath)
+        .build();
+
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = (byte[]) row[1];
+      System.out.println(row[2] + " " + row[3] + " " + row[4] + " " + row[5] + " image size:" + outputBinary.length + " " + row[0]);
+
+      // save image, user can compare the save image and original image
+      String destString = preDestPath + i + sufImage;
+      BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+      bos.write(outputBinary);
+      bos.close();
+      i++;
+    }
+    System.out.println("\nFinished");
+    reader.close();
+  }
+
+}
diff --git a/store/sdk/src/test/java/org/apache/carbondata/util/BinaryUtil.java b/store/sdk/src/test/java/org/apache/carbondata/util/BinaryUtil.java
new file mode 100644
index 0000000..073f704
--- /dev/null
+++ b/store/sdk/src/test/java/org/apache/carbondata/util/BinaryUtil.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.util;
+
+import java.io.*;
+
+import org.apache.carbondata.core.metadata.datatype.DataTypes;
+import org.apache.carbondata.sdk.file.CarbonReader;
+import org.apache.carbondata.sdk.file.CarbonWriter;
+import org.apache.carbondata.sdk.file.Field;
+import org.apache.carbondata.sdk.file.Schema;
+
+import static org.apache.carbondata.sdk.file.utils.SDKUtil.listFiles;
+
+public class BinaryUtil {
+  public static void binaryToCarbon(String sourceImageFolder, String outputPath,
+                                    String sufAnnotation, final String sufImage) throws Exception {
+    Field[] fields = new Field[5];
+    fields[0] = new Field("binaryId", DataTypes.INT);
+    fields[1] = new Field("binaryName", DataTypes.STRING);
+    fields[2] = new Field("binary", DataTypes.BINARY);
+    fields[3] = new Field("labelName", DataTypes.STRING);
+    fields[4] = new Field("labelContent", DataTypes.STRING);
+    CarbonWriter writer = CarbonWriter
+        .builder()
+        .outputPath(outputPath)
+        .withCsvInput(new Schema(fields))
+        .withBlockSize(256)
+        .writtenBy("binaryExample")
+        .withPageSizeInMb(1)
+        .build();
+    binaryToCarbon(sourceImageFolder, writer, sufAnnotation, sufImage);
+  }
+
+  public static boolean binaryToCarbon(String sourceImageFolder, CarbonWriter writer,
+      String sufAnnotation, final String sufImage) throws Exception {
+    int num = 1;
+
+    byte[] originBinary = null;
+
+    // read and write image data
+    for (int j = 0; j < num; j++) {
+
+      Object[] files = listFiles(sourceImageFolder, sufImage).toArray();
+
+      if (null != files) {
+        for (int i = 0; i < files.length; i++) {
+          // read image and encode to Hex
+          BufferedInputStream bis = new BufferedInputStream(
+              new FileInputStream(new File((String) files[i])));
+          originBinary = new byte[bis.available()];
+          while ((bis.read(originBinary)) != -1) {
+          }
+
+          String labelFileName = ((String) files[i]).split(sufImage)[0] + sufAnnotation;
+          BufferedInputStream txtBis = new BufferedInputStream(new FileInputStream(labelFileName));
+          String labelValue = null;
+          byte[] labelBinary = null;
+          labelBinary = new byte[txtBis.available()];
+          while ((txtBis.read(labelBinary)) != -1) {
+            labelValue = new String(labelBinary, "UTF-8");
+          }
+          // write data
+          writer.write(new Object[]{i, (String) files[i], originBinary,
+              labelFileName, labelValue});
+          bis.close();
+          txtBis.close();
+        }
+      }
+      writer.close();
+    }
+    return true;
+  }
+
+  public static boolean carbonToBinary(String carbonPath, String outputPath)
+      throws IOException, InterruptedException {
+    CarbonReader reader = CarbonReader
+        .builder(carbonPath)
+        .build();
+    return carbonToBinary(reader, outputPath);
+  }
+
+  public static boolean carbonToBinary(CarbonReader reader, String outputPath)
+      throws IOException, InterruptedException {
+    System.out.println("\nData:");
+    int i = 0;
+    while (i < 20 && reader.hasNext()) {
+      Object[] row = (Object[]) reader.readNextRow();
+
+      byte[] outputBinary = (byte[]) row[1];
+      System.out.println(row[0] + " " + row[2] + " image size:" + outputBinary.length);
+
+      // save image, user can compare the save image and original image
+      String originalPath = (String) row[0];
+      int index = originalPath.lastIndexOf("/");
+      File file = new File(outputPath);
+      if (!file.exists()) {
+        assert file.mkdir();
+      }
+      String destString = outputPath + originalPath.substring(index, originalPath.length());
+      BufferedOutputStream bos = new BufferedOutputStream(new FileOutputStream(destString));
+      bos.write(outputBinary);
+      bos.close();
+      i++;
+    }
+    System.out.println("number of reading: " + i);
+    System.out.println("\nFinished");
+    reader.close();
+    return true;
+  }
+}
diff --git a/store/sdk/src/test/resources/image/carbondatalogo.jpg b/store/sdk/src/test/resources/image/carbondatalogo.jpg
new file mode 100644
index 0000000..3469469
Binary files /dev/null and b/store/sdk/src/test/resources/image/carbondatalogo.jpg differ
diff --git a/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.jpg b/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.jpg
new file mode 100644
index 0000000..12937a0
Binary files /dev/null and b/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.jpg differ
diff --git a/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.txt b/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.txt
new file mode 100644
index 0000000..12f7d78
--- /dev/null
+++ b/store/sdk/src/test/resources/image/flowers/10686568196_b1915544a8.txt
@@ -0,0 +1 @@
+tulips
\ No newline at end of file
diff --git a/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.jpg b/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.jpg
new file mode 100644
index 0000000..48591bf
Binary files /dev/null and b/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.jpg differ
diff --git a/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.txt b/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.txt
new file mode 100644
index 0000000..84bd766
--- /dev/null
+++ b/store/sdk/src/test/resources/image/flowers/10712722853_5632165b04.txt
@@ -0,0 +1 @@
+daisy
\ No newline at end of file
diff --git a/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.jpg b/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.jpg
new file mode 100644
index 0000000..0822034
Binary files /dev/null and b/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.jpg differ
diff --git a/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.txt b/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.txt
new file mode 100644
index 0000000..84bd766
--- /dev/null
+++ b/store/sdk/src/test/resources/image/flowers/subfolder/10841136265_af473efc60.txt
@@ -0,0 +1 @@
+daisy
\ No newline at end of file
diff --git a/store/sdk/src/test/resources/image/voc/2007_000027.jpg b/store/sdk/src/test/resources/image/voc/2007_000027.jpg
new file mode 100755
index 0000000..fe9ba8c
Binary files /dev/null and b/store/sdk/src/test/resources/image/voc/2007_000027.jpg differ
diff --git a/store/sdk/src/test/resources/image/voc/2007_000027.xml b/store/sdk/src/test/resources/image/voc/2007_000027.xml
new file mode 100755
index 0000000..576da53
--- /dev/null
+++ b/store/sdk/src/test/resources/image/voc/2007_000027.xml
@@ -0,0 +1,63 @@
+<annotation>
+	<folder>VOC2012</folder>
+	<filename>2007_000027.jpg</filename>
+	<source>
+		<database>The VOC2007 Database</database>
+		<annotation>PASCAL VOC2007</annotation>
+		
+	</source>
+	<size>
+		<width>486</width>
+		<height>500</height>
+		<depth>3</depth>
+	</size>
+	<segmented>0</segmented>
+	<object>
+		<name>person</name>
+		<pose>Unspecified</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>174</xmin>
+			<ymin>101</ymin>
+			<xmax>349</xmax>
+			<ymax>351</ymax>
+		</bndbox>
+		<part>
+			<name>head</name>
+			<bndbox>
+				<xmin>169</xmin>
+				<ymin>104</ymin>
+				<xmax>209</xmax>
+				<ymax>146</ymax>
+			</bndbox>
+		</part>
+		<part>
+			<name>hand</name>
+			<bndbox>
+				<xmin>278</xmin>
+				<ymin>210</ymin>
+				<xmax>297</xmax>
+				<ymax>233</ymax>
+			</bndbox>
+		</part>
+		<part>
+			<name>foot</name>
+			<bndbox>
+				<xmin>273</xmin>
+				<ymin>333</ymin>
+				<xmax>297</xmax>
+				<ymax>354</ymax>
+			</bndbox>
+		</part>
+		<part>
+			<name>foot</name>
+			<bndbox>
+				<xmin>319</xmin>
+				<ymin>307</ymin>
+				<xmax>340</xmax>
+				<ymax>326</ymax>
+			</bndbox>
+		</part>
+	</object>
+</annotation>
diff --git a/store/sdk/src/test/resources/image/voc/2007_000032.jpg b/store/sdk/src/test/resources/image/voc/2007_000032.jpg
new file mode 100755
index 0000000..b111b5a
Binary files /dev/null and b/store/sdk/src/test/resources/image/voc/2007_000032.jpg differ
diff --git a/store/sdk/src/test/resources/image/voc/2007_000032.xml b/store/sdk/src/test/resources/image/voc/2007_000032.xml
new file mode 100755
index 0000000..779abb6
--- /dev/null
+++ b/store/sdk/src/test/resources/image/voc/2007_000032.xml
@@ -0,0 +1,63 @@
+<annotation>
+	<folder>VOC2012</folder>
+	<filename>2007_000032.jpg</filename>
+	<source>
+		<database>The VOC2007 Database</database>
+		<annotation>PASCAL VOC2007</annotation>
+		
+	</source>
+	<size>
+		<width>500</width>
+		<height>281</height>
+		<depth>3</depth>
+	</size>
+	<segmented>1</segmented>
+	<object>
+		<name>aeroplane</name>
+		<pose>Frontal</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>104</xmin>
+			<ymin>78</ymin>
+			<xmax>375</xmax>
+			<ymax>183</ymax>
+		</bndbox>
+	</object>
+	<object>
+		<name>aeroplane</name>
+		<pose>Left</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>133</xmin>
+			<ymin>88</ymin>
+			<xmax>197</xmax>
+			<ymax>123</ymax>
+		</bndbox>
+	</object>
+	<object>
+		<name>person</name>
+		<pose>Rear</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>195</xmin>
+			<ymin>180</ymin>
+			<xmax>213</xmax>
+			<ymax>229</ymax>
+		</bndbox>
+	</object>
+	<object>
+		<name>person</name>
+		<pose>Rear</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>26</xmin>
+			<ymin>189</ymin>
+			<xmax>44</xmax>
+			<ymax>238</ymax>
+		</bndbox>
+	</object>
+</annotation>
diff --git a/store/sdk/src/test/resources/image/voc/2007_000033.jpg b/store/sdk/src/test/resources/image/voc/2007_000033.jpg
new file mode 100755
index 0000000..01f478f
Binary files /dev/null and b/store/sdk/src/test/resources/image/voc/2007_000033.jpg differ
diff --git a/store/sdk/src/test/resources/image/voc/2007_000033.xml b/store/sdk/src/test/resources/image/voc/2007_000033.xml
new file mode 100755
index 0000000..61899d6
--- /dev/null
+++ b/store/sdk/src/test/resources/image/voc/2007_000033.xml
@@ -0,0 +1,51 @@
+<annotation>
+	<folder>VOC2012</folder>
+	<filename>2007_000033.jpg</filename>
+	<source>
+		<database>The VOC2007 Database</database>
+		<annotation>PASCAL VOC2007</annotation>
+		
+	</source>
+	<size>
+		<width>500</width>
+		<height>366</height>
+		<depth>3</depth>
+	</size>
+	<segmented>1</segmented>
+	<object>
+		<name>aeroplane</name>
+		<pose>Unspecified</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>9</xmin>
+			<ymin>107</ymin>
+			<xmax>499</xmax>
+			<ymax>263</ymax>
+		</bndbox>
+	</object>
+	<object>
+		<name>aeroplane</name>
+		<pose>Left</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>421</xmin>
+			<ymin>200</ymin>
+			<xmax>482</xmax>
+			<ymax>226</ymax>
+		</bndbox>
+	</object>
+	<object>
+		<name>aeroplane</name>
+		<pose>Left</pose>
+		<truncated>1</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>325</xmin>
+			<ymin>188</ymin>
+			<xmax>411</xmax>
+			<ymax>223</ymax>
+		</bndbox>
+	</object>
+</annotation>
diff --git a/store/sdk/src/test/resources/image/voc/2007_000039.jpg b/store/sdk/src/test/resources/image/voc/2007_000039.jpg
new file mode 100755
index 0000000..1a3b717
Binary files /dev/null and b/store/sdk/src/test/resources/image/voc/2007_000039.jpg differ
diff --git a/store/sdk/src/test/resources/image/voc/2007_000039.xml b/store/sdk/src/test/resources/image/voc/2007_000039.xml
new file mode 100755
index 0000000..bc73f4e
--- /dev/null
+++ b/store/sdk/src/test/resources/image/voc/2007_000039.xml
@@ -0,0 +1,27 @@
+<annotation>
+	<folder>VOC2012</folder>
+	<filename>2007_000039.jpg</filename>
+	<source>
+		<database>The VOC2007 Database</database>
+		<annotation>PASCAL VOC2007</annotation>
+		
+	</source>
+	<size>
+		<width>500</width>
+		<height>375</height>
+		<depth>3</depth>
+	</size>
+	<segmented>1</segmented>
+	<object>
+		<name>tvmonitor</name>
+		<pose>Frontal</pose>
+		<truncated>0</truncated>
+		<difficult>0</difficult>
+		<bndbox>
+			<xmin>156</xmin>
+			<ymin>89</ymin>
+			<xmax>344</xmax>
+			<ymax>279</ymax>
+		</bndbox>
+	</object>
+</annotation>
diff --git a/store/sdk/src/test/resources/image/voc/2009_001444.jpg b/store/sdk/src/test/resources/image/voc/2009_001444.jpg
new file mode 100755
index 0000000..f01c62c
Binary files /dev/null and b/store/sdk/src/test/resources/image/voc/2009_001444.jpg differ
diff --git a/store/sdk/src/test/resources/image/voc/2009_001444.xml b/store/sdk/src/test/resources/image/voc/2009_001444.xml
new file mode 100755
index 0000000..9a68cbc
--- /dev/null
+++ b/store/sdk/src/test/resources/image/voc/2009_001444.xml
@@ -0,0 +1,28 @@
+<annotation>
+	<filename>2009_001444.jpg</filename>
+	<folder>VOC2012</folder>
+	<object>
+		<name>cat</name>
+		<bndbox>
+			<xmax>344</xmax>
+			<xmin>1</xmin>
+			<ymax>388</ymax>
+			<ymin>1</ymin>
+		</bndbox>
+		<difficult>0</difficult>
+		<occluded>0</occluded>
+		<pose>Unspecified</pose>
+		<truncated>1</truncated>
+	</object>
+	<segmented>1</segmented>
+	<size>
+		<depth>3</depth>
+		<height>388</height>
+		<width>500</width>
+	</size>
+	<source>
+		<annotation>PASCAL VOC2009</annotation>
+		<database>The VOC2009 Database</database>
+		
+	</source>
+</annotation>
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.jpg b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.jpg
new file mode 100755
index 0000000..b111b5a
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.jpg differ
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.png b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.png
new file mode 100755
index 0000000..1f7181c
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000032.png differ
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.jpg b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.jpg
new file mode 100755
index 0000000..01f478f
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.jpg differ
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.png b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.png
new file mode 100755
index 0000000..bbeb3f4
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000033.png differ
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.jpg b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.jpg
new file mode 100755
index 0000000..2188d51
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.jpg differ
diff --git a/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.png b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.png
new file mode 100755
index 0000000..73b6059
Binary files /dev/null and b/store/sdk/src/test/resources/image/vocForSegmentationClass/2007_000042.png differ
diff --git a/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java b/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
index fdd57c0..601fa40 100644
--- a/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
+++ b/tools/cli/src/test/java/org/apache/carbondata/tool/CarbonCliTest.java
@@ -22,12 +22,11 @@ import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 
+import org.apache.carbondata.common.exceptions.sql.InvalidLoadOptionException;
 import org.apache.carbondata.core.constants.CarbonVersionConstants;
 import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.util.CarbonUtil;
-import org.apache.carbondata.sdk.file.Field;
-import org.apache.carbondata.sdk.file.Schema;
-import org.apache.carbondata.sdk.file.TestUtil;
+import org.apache.carbondata.sdk.file.*;
 
 import org.apache.commons.io.FileUtils;
 import org.junit.After;
@@ -38,6 +37,7 @@ import org.junit.Test;
 public class CarbonCliTest {
 
   private String path = "./CarbonCliTest";
+  private String pathBinary = "./CarbonCliTestBinary";
 
   private String buildLines(String... lines) {
     ByteArrayOutputStream expectedOut = null;
@@ -67,6 +67,35 @@ public class CarbonCliTest {
     TestUtil.writeFilesAndVerify(5000000, new Schema(fields), path, new String[]{"name"}, 3, 8);
   }
 
+  public void buildBinaryData(int rows, Schema schema, String path, String[] sortColumns,
+                              int blockletSize, int blockSize)
+      throws IOException, InvalidLoadOptionException {
+
+    CarbonWriterBuilder builder = CarbonWriter.builder()
+        .outputPath(path);
+    if (sortColumns != null) {
+      builder = builder.sortBy(sortColumns);
+    }
+    if (blockletSize != -1) {
+      builder = builder.withBlockletSize(blockletSize);
+    }
+    if (blockSize != -1) {
+      builder = builder.withBlockSize(blockSize);
+    }
+
+    CarbonWriter writer = builder.withCsvInput(schema).writtenBy("TestUtil").build();
+
+    for (int i = 0; i < rows; i++) {
+      writer.write(new String[]{
+          "robot" + (i % 10), String.valueOf(i % 3000000), String.valueOf((double) i / 2)});
+    }
+    for (int i = 0; i < rows; i++) {
+      writer.write(new String[]{
+          "robot" + (i % 10), String.valueOf(i % 3000000), String.valueOf("robot" + i / 2)});
+    }
+    writer.close();
+  }
+
   @Test
   public void testInvalidCmd() {
     String[] args = {"-cmd", "DD", "-p", path};
@@ -231,9 +260,35 @@ public class CarbonCliTest {
     System.out.println(output);
   }
 
+  @Test
+  public void testBinary() throws IOException, InvalidLoadOptionException {
+    FileUtils.deleteDirectory(new File(pathBinary));
+    Field[] fields = new Field[3];
+    fields[0] = new Field("name", DataTypes.STRING);
+    fields[1] = new Field("age", DataTypes.INT);
+    fields[2] = new Field("binaryField", DataTypes.BINARY);
+
+    buildBinaryData(5000000, new Schema(fields), pathBinary, new String[]{"name"}, 3, 8);
+    String[] args = {"-cmd", "summary", "-p", pathBinary};
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    PrintStream stream = new PrintStream(out);
+    CarbonCli.run(args, stream);
+
+    String[] args2 = {"-cmd", "summary", "-p", pathBinary, "-s"};
+    out = new ByteArrayOutputStream();
+    stream = new PrintStream(out);
+    CarbonCli.run(args2, stream);
+    String output = new String(out.toByteArray());
+
+    Assert.assertTrue(output.contains("binaryfield") && output.contains("BINARY"));
+    FileUtils.deleteDirectory(new File(pathBinary));
+  }
+
+
   @After
   public void after() throws IOException {
     FileUtils.deleteDirectory(new File(path));
+    FileUtils.deleteDirectory(new File(pathBinary));
   }
 
 }


[carbondata] 02/22: [CARBONDATA-3001] configurable page size in MB

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit eeb7e3a9b52f07e8298091252638175328a7aa9c
Author: ajantha-bhat <aj...@gmail.com>
AuthorDate: Mon Oct 15 18:49:33 2018 +0530

    [CARBONDATA-3001] configurable page size in MB
    
    Changes proposed in this PR:
    
    supported a table property table_page_size_inmb (1 MB to 1755 MB), configurable page size for each table in below scenarios.
    
    TBLProperties in creating table
    API in SdkWriter
    Options in creating table using spark file format
    Options in DataFrameWriter
    If this table properties is not configured, Default vaue will be taken (1 MB) [currently no default value, will set in next version].
    Based on this property value. Page will be cut if it crosses the value before 32000 rows. This enables in fitting pages into cache.
    
    This closes #2814
---
 .../core/constants/CarbonCommonConstants.java      |  14 ++
 .../core/datastore/blocklet/EncodedBlocklet.java   |  19 +++
 .../blockletindex/BlockletDataRefNode.java         |   6 +-
 .../core/metadata/blocklet/BlockletInfo.java       |  10 ++
 .../metadata/schema/table/TableSchemaBuilder.java  |  10 ++
 .../carbondata/core/util/CarbonMetadataUtil.java   |  15 +-
 .../core/util/DataFileFooterConverterV3.java       |   8 +
 docs/carbon-as-spark-datasource-guide.md           |   1 +
 docs/ddl-of-carbondata.md                          |  13 ++
 docs/sdk-guide.md                                  |   1 +
 format/src/main/thrift/carbondata.thrift           |   1 +
 .../TestCreateTableWithPageSizeInMb.scala          |  67 ++++++++
 .../TestNonTransactionalCarbonTable.scala          |  49 ++++++
 .../org/apache/carbondata/spark/CarbonOption.scala |   2 +
 .../apache/carbondata/spark/util/CommonUtil.scala  |  32 ++++
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    |   3 +-
 .../datasources/CarbonSparkDataSourceUtil.scala    |   4 +
 .../apache/spark/sql/CarbonDataFrameWriter.scala   |   1 +
 .../table/CarbonDescribeFormattedCommand.scala     |   9 +-
 .../sql/CarbonGetTableDetailComandTestCase.scala   |   0
 .../processing/datatypes/ArrayDataType.java        |  15 ++
 .../processing/datatypes/GenericDataType.java      |   5 +
 .../processing/datatypes/PrimitiveDataType.java    |   6 +
 .../processing/datatypes/StructDataType.java       |  14 ++
 .../store/CarbonFactDataHandlerColumnar.java       | 190 ++++++++++++++++-----
 .../store/CarbonFactDataHandlerModel.java          | 106 ++++++------
 .../carbondata/processing/store/TablePage.java     |  36 +---
 .../writer/v3/CarbonFactDataWriterImplV3.java      |   7 +
 .../carbondata/sdk/file/CarbonWriterBuilder.java   |  26 ++-
 29 files changed, 540 insertions(+), 130 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index 69374ad..e02241e 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -1999,6 +1999,20 @@ public final class CarbonCommonConstants {
    */
   public static final int CARBON_ALLOW_DIRECT_FILL_DICT_COLS_LIMIT = 100;
 
+  /**
+   * page size in mb. If page size exceeds this value before 32000 rows count, page will be cut.
+   * And remaining rows will written in next page.
+   */
+  public static final String TABLE_PAGE_SIZE_INMB = "table_page_size_inmb";
+
+  public static final int TABLE_PAGE_SIZE_MIN_INMB = 1;
+
+  // default 1 MB
+  public static final int TABLE_PAGE_SIZE_INMB_DEFAULT = 1;
+
+  // As due to SnappyCompressor.MAX_BYTE_TO_COMPRESS is 1.75 GB
+  public static final int TABLE_PAGE_SIZE_MAX_INMB = 1755;
+
   //////////////////////////////////////////////////////////////////////////////////////////
   // Unused constants and parameters start here
   //////////////////////////////////////////////////////////////////////////////////////////
diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/blocklet/EncodedBlocklet.java b/core/src/main/java/org/apache/carbondata/core/datastore/blocklet/EncodedBlocklet.java
index d017145..8a19522 100644
--- a/core/src/main/java/org/apache/carbondata/core/datastore/blocklet/EncodedBlocklet.java
+++ b/core/src/main/java/org/apache/carbondata/core/datastore/blocklet/EncodedBlocklet.java
@@ -63,6 +63,11 @@ public class EncodedBlocklet {
   private int numberOfPages;
 
   /**
+   * row count in each page
+   */
+  private List<Short> rowCountInPage;
+
+  /**
    * is decoder based fallback is enabled or not
    */
   private boolean isDecoderBasedFallBackEnabled;
@@ -77,6 +82,7 @@ public class EncodedBlocklet {
     this.executorService = executorService;
     this.isDecoderBasedFallBackEnabled = isDecoderBasedFallBackEnabled;
     this.localDictionaryGeneratorMap = localDictionaryGeneratorMap;
+    this.rowCountInPage = new ArrayList<>();
   }
 
   /**
@@ -90,10 +96,14 @@ public class EncodedBlocklet {
     if (null == pageMetadataList) {
       pageMetadataList = new ArrayList<>();
     }
+    if (null == rowCountInPage) {
+      rowCountInPage = new ArrayList<>();
+    }
     // update details
     blockletSize += encodedTablePage.getPageSize();
     pageMetadataList.add(encodedTablePage.getPageKey());
     this.numberOfPages++;
+    rowCountInPage.add((short)encodedTablePage.getPageSize());
   }
 
   /**
@@ -187,11 +197,20 @@ public class EncodedBlocklet {
     return this.numberOfPages;
   }
 
+  public List<Short> getRowCountInPage() {
+    return rowCountInPage;
+  }
+
+  public void setRowCountInPage(List<Short> rowCountInPage) {
+    this.rowCountInPage = rowCountInPage;
+  }
+
   public void clear() {
     this.numberOfPages = 0;
     this.encodedDimensionColumnPages = null;
     this.blockletSize = 0;
     this.encodedMeasureColumnPages = null;
     this.pageMetadataList = null;
+    this.rowCountInPage = null;
   }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
index 9046ade..fe372e7 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataRefNode.java
@@ -85,7 +85,11 @@ public class BlockletDataRefNode implements DataRefNode {
       if (lastPageRowCount > 0) {
         pageRowCount[pageRowCount.length - 1] = lastPageRowCount;
       }
-      detailInfo.getBlockletInfo().setNumberOfRowsPerPage(pageRowCount);
+      // V3 old store to V3 new store compatibility. V3 new store will get this info in thrift.
+      // so don't overwrite it with hardcoded values.
+      if (detailInfo.getBlockletInfo().getNumberOfRowsPerPage() == null) {
+        detailInfo.getBlockletInfo().setNumberOfRowsPerPage(pageRowCount);
+      }
     }
     this.index = index;
     this.dimensionLens = dimensionLens;
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
index 104ef1a..717bdbf 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/blocklet/BlockletInfo.java
@@ -230,6 +230,12 @@ public class BlockletInfo implements Serializable, Writable {
     if (isSortedPresent) {
       output.writeBoolean(isSorted);
     }
+    if (null != getNumberOfRowsPerPage()) {
+      output.writeShort(getNumberOfRowsPerPage().length);
+      for (int i = 0; i < getNumberOfRowsPerPage().length; i++) {
+        output.writeInt(getNumberOfRowsPerPage()[i]);
+      }
+    }
   }
 
   /**
@@ -301,6 +307,10 @@ public class BlockletInfo implements Serializable, Writable {
     if (isSortedPresent) {
       this.isSorted = input.readBoolean();
     }
+    numberOfRowsPerPage = new int[input.readShort()];
+    for (int i = 0; i < numberOfRowsPerPage.length; i++) {
+      numberOfRowsPerPage[i] = input.readInt();
+    }
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
index 3c290af..53542d5 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableSchemaBuilder.java
@@ -60,6 +60,8 @@ public class TableSchemaBuilder {
 
   private int blockletSize;
 
+  private int pageSizeInMb;
+
   private String tableName;
   private boolean isLocalDictionaryEnabled;
   private String localDictionaryThreshold;
@@ -80,6 +82,11 @@ public class TableSchemaBuilder {
     return this;
   }
 
+  public TableSchemaBuilder pageSizeInMb(int pageSizeInMb) {
+    this.pageSizeInMb = pageSizeInMb;
+    return this;
+  }
+
   public TableSchemaBuilder localDictionaryThreshold(int localDictionaryThreshold) {
     this.localDictionaryThreshold = String.valueOf(localDictionaryThreshold);
     return this;
@@ -121,6 +128,9 @@ public class TableSchemaBuilder {
     if (blockletSize > 0) {
       property.put(CarbonCommonConstants.TABLE_BLOCKLET_SIZE, String.valueOf(blockletSize));
     }
+    if (pageSizeInMb > 0) {
+      property.put(CarbonCommonConstants.TABLE_PAGE_SIZE_INMB, String.valueOf(pageSizeInMb));
+    }
 
     // Adding local dictionary, applicable only for String(dictionary exclude)
     if (isLocalDictionaryEnabled) {
diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
index 0fe33b0..f35afc0 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonMetadataUtil.java
@@ -403,9 +403,18 @@ public class CarbonMetadataUtil {
     dimensionChunkOffsets.addAll(blockletInfo.getMeasureChunkOffsets());
     List<Integer> dimensionChunksLength = blockletInfo.getDimensionChunksLength();
     dimensionChunksLength.addAll(blockletInfo.getMeasureChunksLength());
-    return new BlockletInfo3(blockletInfo.getNumberOfRows(), dimensionChunkOffsets,
-        dimensionChunksLength, blockletInfo.getDimensionOffset(), blockletInfo.getMeasureOffsets(),
-        blockletInfo.getNumberOfPages());
+    BlockletInfo3 blockletInfo3 =
+        new BlockletInfo3(blockletInfo.getNumberOfRows(), dimensionChunkOffsets,
+            dimensionChunksLength, blockletInfo.getDimensionOffset(),
+            blockletInfo.getMeasureOffsets(), blockletInfo.getNumberOfPages());
+    List<Integer> rowsPerPage = new ArrayList<>();
+    if (null != blockletInfo.getNumberOfRowsPerPage()) {
+      for (int i = 0; i < blockletInfo.getNumberOfRowsPerPage().length; i++) {
+        rowsPerPage.add(blockletInfo.getNumberOfRowsPerPage()[i]);
+      }
+      blockletInfo3.setRow_count_in_page(rowsPerPage);
+    }
+    return blockletInfo3;
   }
 
   /**
diff --git a/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverterV3.java b/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverterV3.java
index d6d91ed..d278f7a 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverterV3.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/DataFileFooterConverterV3.java
@@ -144,6 +144,14 @@ public class DataFileFooterConverterV3 extends AbstractDataFileFooterConverter {
     blockletInfo.setDimensionOffset(blockletInfoThrift.getDimension_offsets());
     blockletInfo.setMeasureOffsets(blockletInfoThrift.getMeasure_offsets());
     blockletInfo.setNumberOfPages(blockletInfoThrift.getNumber_number_of_pages());
+    if (blockletInfoThrift.getRow_count_in_page() != null
+        && blockletInfoThrift.getRow_count_in_page().size() != 0) {
+      int[] rowCountInPages = new int[blockletInfoThrift.getRow_count_in_page().size()];
+      for (int i = 0; i < blockletInfoThrift.getRow_count_in_page().size(); i++) {
+        rowCountInPages[i] = blockletInfoThrift.getRow_count_in_page().get(i);
+      }
+      blockletInfo.setNumberOfRowsPerPage(rowCountInPages);
+    }
     return blockletInfo;
   }
 
diff --git a/docs/carbon-as-spark-datasource-guide.md b/docs/carbon-as-spark-datasource-guide.md
index bc56a54..598acb0 100644
--- a/docs/carbon-as-spark-datasource-guide.md
+++ b/docs/carbon-as-spark-datasource-guide.md
@@ -44,6 +44,7 @@ Now you can create Carbon table using Spark's datasource DDL syntax.
 |-----------|--------------|------------|
 | table_blocksize | 1024 | Size of blocks to write onto hdfs. For  more details, see [Table Block Size Configuration](./ddl-of-carbondata.md#table-block-size-configuration). |
 | table_blocklet_size | 64 | Size of blocklet to write. |
+| table_page_size_inmb | 0 | Size of each page in carbon table, if page size crosses this value before 32000 rows, page will be cut to that may rows. Helps in keep page size to fit cache size |
 | local_dictionary_threshold | 10000 | Cardinality upto which the local dictionary can be generated. For  more details, see [Local Dictionary Configuration](./ddl-of-carbondata.md#local-dictionary-configuration). |
 | local_dictionary_enable | false | Enable local dictionary generation. For  more details, see [Local Dictionary Configuration](./ddl-of-carbondata.md#local-dictionary-configuration). |
 | sort_columns | all dimensions are sorted | Columns to include in sort and its order of sort. For  more details, see [Sort Columns Configuration](./ddl-of-carbondata.md#sort-columns-configuration). |
diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 07a2670..88615a2 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -94,6 +94,7 @@ CarbonData DDL statements are documented here,which includes:
 | [SORT_SCOPE](#sort-scope-configuration)                      | Sort scope of the load.Options include no sort, local sort ,batch sort and global sort |
 | [TABLE_BLOCKSIZE](#table-block-size-configuration)           | Size of blocks to write onto hdfs                            |
 | [TABLE_BLOCKLET_SIZE](#table-blocklet-size-configuration)    | Size of blocklet to write in the file                        |
+| [TABLE_PAGE_SIZE_INMB](#table-page-size-configuration)       | Size of page in MB; if page size crosses this value before 32000 rows, page will be cut to this many rows and remaining rows are processed in the subsequent pages. This helps in keeping page size to fit in cpu cache size|
 | [MAJOR_COMPACTION_SIZE](#table-compaction-configuration)     | Size upto which the segments can be combined into one        |
 | [AUTO_LOAD_MERGE](#table-compaction-configuration)           | Whether to auto compact the segments                         |
 | [COMPACTION_LEVEL_THRESHOLD](#table-compaction-configuration) | Number of segments to compact into one segment               |
@@ -283,6 +284,18 @@ CarbonData DDL statements are documented here,which includes:
      TBLPROPERTIES ('TABLE_BLOCKLET_SIZE'='8')
      ```
 
+   - ##### Table page Size Configuration
+
+     This property is for setting page size in the carbondata file 
+     and supports a range of 1 MB to 1755 MB.
+     If page size crosses this value before 32000 rows, page will be cut to that many rows. 
+     Helps in keeping page size to fit cpu cache size.
+
+     Example usage:
+     ```
+     TBLPROPERTIES ('TABLE_PAGE_SIZE_INMB'='5')
+     ```
+
    - ##### Table Compaction Configuration
    
      These properties are table level compaction configurations, if not specified, system level configurations in carbon.properties will be used.
diff --git a/docs/sdk-guide.md b/docs/sdk-guide.md
index 573b595..e040e64 100644
--- a/docs/sdk-guide.md
+++ b/docs/sdk-guide.md
@@ -382,6 +382,7 @@ public CarbonWriterBuilder withLoadOptions(Map<String, String> options);
  *                           default value is null.
  * l. inverted_index -- comma separated string columns for which inverted index needs to be
  *                      generated
+ * m. table_page_size_inmb -- [1-1755] MB. 
  *
  * @return updated CarbonWriterBuilder
  */
diff --git a/format/src/main/thrift/carbondata.thrift b/format/src/main/thrift/carbondata.thrift
index 5cad5ac..7dcd4d3 100644
--- a/format/src/main/thrift/carbondata.thrift
+++ b/format/src/main/thrift/carbondata.thrift
@@ -180,6 +180,7 @@ struct BlockletInfo3{
     4: required i64 dimension_offsets;
     5: required i64 measure_offsets;
     6: required i32 number_number_of_pages; // This is rquired for alter table, in case of alter table when filter is only selected on new added column this will help
+    7: optional list<i32> row_count_in_page; // This will contain the row count in each page.
   }
 
 /**
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithPageSizeInMb.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithPageSizeInMb.scala
new file mode 100644
index 0000000..ce374eb
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestCreateTableWithPageSizeInMb.scala
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.createTable
+
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
+
+/**
+ * Test functionality of create table with page size
+ */
+class TestCreateTableWithPageSizeInMb extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll {
+    sql("use default")
+    sql("drop table if exists source")
+  }
+
+  test("test create table with invalid page size") {
+    val ex = intercept[MalformedCarbonCommandException] {
+      sql(
+        "CREATE TABLE T1(name String) STORED AS CARBONDATA TBLPROPERTIES" +
+        "('table_page_size_inmb'='3X')")
+    }
+    assert(ex.getMessage.toLowerCase.contains("invalid table_page_size_inmb"))
+    val ex1 = intercept[MalformedCarbonCommandException] {
+      sql(
+        "CREATE TABLE T1(name String) STORED AS CARBONDATA TBLPROPERTIES" +
+        "('table_page_size_inmb'='0')")
+    }
+    assert(ex1.getMessage.toLowerCase.contains("invalid table_page_size_inmb"))
+    val ex2 = intercept[MalformedCarbonCommandException] {
+      sql(
+        "CREATE TABLE T1(name String) STORED AS CARBONDATA TBLPROPERTIES" +
+        "('table_page_size_inmb'='-1')")
+    }
+    assert(ex2.getMessage.toLowerCase.contains("invalid table_page_size_inmb"))
+    val ex3 = intercept[MalformedCarbonCommandException] {
+      sql(
+        "CREATE TABLE T1(name String) STORED AS CARBONDATA TBLPROPERTIES" +
+        "('table_page_size_inmb'='1999')")
+    }
+    assert(ex3.getMessage.toLowerCase.contains("invalid table_page_size_inmb"))
+  }
+
+  override def afterAll {
+    sql("use default")
+    sql("drop table if exists source")
+  }
+
+}
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
index 06d41b1..274cca9 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTable.scala
@@ -33,6 +33,7 @@ import org.apache.avro.file.DataFileWriter
 import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, GenericRecord}
 import org.apache.avro.io.{DecoderFactory, Encoder}
 import org.apache.commons.io.FileUtils
+import org.apache.commons.lang.RandomStringUtils
 import org.apache.spark.sql.test.util.QueryTest
 import org.apache.spark.sql.{AnalysisException, CarbonEnv, Row}
 import org.junit.Assert
@@ -51,6 +52,8 @@ import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.datastore.page.encoding.DefaultEncodingFactory
 import org.apache.carbondata.core.metadata.ColumnarFormatVersion
 import org.apache.carbondata.core.metadata.datatype.DataTypes
+import org.apache.carbondata.core.reader.CarbonFooterReaderV3
+import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.core.util.{CarbonMetadataUtil, CarbonProperties, CarbonUtil, DataFileFooterConverterV3}
 import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException
 import org.apache.carbondata.sdk.file._
@@ -2537,6 +2540,52 @@ class TestNonTransactionalCarbonTable extends QueryTest with BeforeAndAfterAll {
     FileUtils.deleteDirectory(new File(writerPath))
   }
 
+  test("Test with long string columns with 1 MB pageSize") {
+    FileUtils.deleteDirectory(new File(writerPath))
+    // here we specify the long string column as varchar
+    val schema = new StringBuilder()
+      .append("[ \n")
+      .append("   {\"name\":\"string\"},\n")
+      .append("   {\"  address    \":\"varchar\"},\n")
+      .append("   {\"age\":\"int\"},\n")
+      .append("   {\"note\":\"varchar\"}\n")
+      .append("]")
+      .toString()
+    val builder = CarbonWriter.builder()
+    val writer = builder.outputPath(writerPath).withCsvInput(Schema.parseJson(schema))
+      .withPageSizeInMb(1)
+      .writtenBy("TestCreateTableUsingSparkCarbonFileFormat").build()
+    val totalRecordsNum = 10
+    for (i <- 0 until totalRecordsNum) {
+      // write a varchar with 250,000 length
+      writer
+        .write(Array[String](s"name_$i",
+          RandomStringUtils.randomAlphabetic(250000),
+          i.toString,
+          RandomStringUtils.randomAlphabetic(250000)))
+    }
+    writer.close()
+
+    // read footer and verify number of pages
+    val folder = FileFactory.getCarbonFile(writerPath)
+    val files = folder.listFiles(true)
+    import scala.collection.JavaConverters._
+    val dataFiles = files.asScala.filter(_.getName.endsWith(CarbonTablePath.CARBON_DATA_EXT))
+    dataFiles.foreach { dataFile =>
+      val fileReader = FileFactory
+        .getFileHolder(FileFactory.getFileType(dataFile.getPath))
+      val buffer = fileReader
+        .readByteBuffer(FileFactory.getUpdatedFilePath(dataFile.getPath), dataFile.getSize - 8, 8)
+      val footerReader = new CarbonFooterReaderV3(
+        dataFile.getAbsolutePath,
+        buffer.getLong)
+      val footer = footerReader.readFooterVersion3
+      // without page_size configuration there will be only 1 page, now it will be more.
+      assert(footer.getBlocklet_info_list3.get(0).number_number_of_pages != 1)
+    }
+
+  }
+
   def generateCarbonData(builder :CarbonWriterBuilder): Unit ={
     val fields = new Array[Field](3)
     fields(0) = new Field("name", DataTypes.STRING)
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
index fbbca56..a22c8cb 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/CarbonOption.scala
@@ -62,6 +62,8 @@ class CarbonOption(options: Map[String, String]) {
 
   lazy val tableBlockletSize: Option[String] = options.get("table_blocklet_size")
 
+  lazy val tablePageSizeInMb: Option[String] = options.get("table_page_size_inmb")
+
   lazy val bucketNumber: Int = options.getOrElse("bucketnumber", "0").toInt
 
   lazy val bucketColumns: String = options.getOrElse("bucketcolumns", "")
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index 7887d87..d90c6b2 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -543,6 +543,38 @@ object CommonUtil {
   }
 
   /**
+   * This method will validate the table page size
+   *
+   * @param tableProperties table property specified by user
+   * @param propertyName property name
+   */
+  def validatePageSizeInmb(tableProperties: Map[String, String], propertyName: String): Unit = {
+    var size: Integer = 0
+    if (tableProperties.get(propertyName).isDefined) {
+      val pageSize: String =
+        parsePropertyValueStringInMB(tableProperties(propertyName))
+      val minPageSize = CarbonCommonConstants.TABLE_PAGE_SIZE_MIN_INMB
+      val maxPageSize = CarbonCommonConstants.TABLE_PAGE_SIZE_MAX_INMB
+      try {
+        size = Integer.parseInt(pageSize)
+      } catch {
+        case e: NumberFormatException =>
+          throw new MalformedCarbonCommandException(s"Invalid $propertyName value found: " +
+                                                    s"$pageSize, only int value from $minPageSize" +
+                                                    s" to " +
+                                                    s"$maxPageSize is supported.")
+      }
+      if (size < minPageSize || size > maxPageSize) {
+        throw new MalformedCarbonCommandException(s"Invalid $propertyName value found: " +
+                                                  s"$pageSize, only int value from $minPageSize " +
+                                                  s"to " +
+                                                  s"$maxPageSize is supported.")
+      }
+      tableProperties.put(propertyName, pageSize)
+    }
+  }
+
+  /**
    * This method will parse the configure string from 'XX MB/M' to 'XX'
    *
    * @param propertyValueString
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index e03bebd..3cb068f 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -450,9 +450,10 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
           partitionColIntersecLongStrCols.mkString(",")
         } both in partition and long_string_columns which is not allowed.")
     }
-    // validate the block size and blocklet size in table properties
+    // validate the block size and blocklet size, page size in table properties
     CommonUtil.validateSize(tableProperties, CarbonCommonConstants.TABLE_BLOCKSIZE)
     CommonUtil.validateSize(tableProperties, CarbonCommonConstants.TABLE_BLOCKLET_SIZE)
+    CommonUtil.validatePageSizeInmb(tableProperties, CarbonCommonConstants.TABLE_PAGE_SIZE_INMB)
     // validate table level properties for compaction
     CommonUtil.validateTableLevelCompactionProperties(tableProperties)
     // validate flat folder property.
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
index 1649afd..71dba3d 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/CarbonSparkDataSourceUtil.scala
@@ -229,6 +229,10 @@ object CarbonSparkDataSourceUtil {
     if (blockletSize.isDefined) {
       builder.withBlockletSize(blockletSize.get)
     }
+    val pageSizeInMb = options.get("table_page_size_inmb").map(_.toInt)
+    if (pageSizeInMb.isDefined) {
+      builder.withPageSizeInMb(pageSizeInMb.get)
+    }
     builder.enableLocalDictionary(options.getOrElse(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE,
       CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT).toBoolean)
     builder.localDictionaryThreshold(
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
index f335509..8885f4a 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonDataFrameWriter.scala
@@ -87,6 +87,7 @@ class CarbonDataFrameWriter(sqlContext: SQLContext, val dataFrame: DataFrame) {
       "LONG_STRING_COLUMNS" -> options.longStringColumns,
       "TABLE_BLOCKSIZE" -> options.tableBlockSize,
       "TABLE_BLOCKLET_SIZE" -> options.tableBlockletSize,
+      "TABLE_PAGE_SIZE_INMB" -> options.tablePageSizeInMb,
       "STREAMING" -> Option(options.isStreaming.toString)
     ).filter(_._2.isDefined)
       .map(property => s"'${property._1}' = '${property._2.get}'").mkString(",")
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
index e2a2451..e8f0f23 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/table/CarbonDescribeFormattedCommand.scala
@@ -83,6 +83,12 @@ private[sql] case class CarbonDescribeFormattedCommand(
     val catalog = sparkSession.sessionState.catalog
     val catalogTable = catalog.getTableMetadata(tblIdentifier)
 
+    val pageSizeInMb: String = if (tblProps.get(CarbonCommonConstants.TABLE_PAGE_SIZE_INMB)
+      .isDefined) {
+      tblProps(CarbonCommonConstants.TABLE_PAGE_SIZE_INMB)
+    } else {
+      ""
+    }
     //////////////////////////////////////////////////////////////////////////////
     // Table Basic Information
     //////////////////////////////////////////////////////////////////////////////
@@ -122,7 +128,8 @@ private[sql] case class CarbonDescribeFormattedCommand(
         carbonTable.getMinMaxCachedColumnsInCreateOrder.asScala.mkString(", "), ""),
       ("Min/Max Index Cache Level",
         tblProps.getOrElse(CarbonCommonConstants.CACHE_LEVEL,
-          CarbonCommonConstants.CACHE_LEVEL_DEFAULT_VALUE), "")
+          CarbonCommonConstants.CACHE_LEVEL_DEFAULT_VALUE), ""),
+      ("Table page size in mb", pageSizeInMb, "")
     )
 
     //////////////////////////////////////////////////////////////////////////////
diff --git a/integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala b/integration/spark2/src/test/scala/org/apache/spark/sql/CarbonGetTableDetailComandTestCase.scala
new file mode 100644
index 0000000..e69de29
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
index 4cdd2b9..b64e2a8 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/ArrayDataType.java
@@ -72,6 +72,9 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
    */
   private int dataCounter;
 
+  /* flat complex datatype length, including the children*/
+  private int depth;
+
   private ArrayDataType(int outputArrayIndex, int dataCounter, GenericDataType children,
       String name) {
     this.outputArrayIndex = outputArrayIndex;
@@ -322,4 +325,16 @@ public class ArrayDataType implements GenericDataType<ArrayObject> {
             name, false));
     children.getComplexColumnInfo(columnInfoList);
   }
+
+  @Override
+  public int getDepth() {
+    if (depth == 0) {
+      // calculate only one time
+      List<ComplexColumnInfo> complexColumnInfoList = new ArrayList<>();
+      getComplexColumnInfo(complexColumnInfoList);
+      depth = complexColumnInfoList.size();
+    }
+    return depth;
+  }
+
 }
\ No newline at end of file
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
index 8fe4923..fb8b513 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/GenericDataType.java
@@ -158,4 +158,9 @@ public interface GenericDataType<T> {
   GenericDataType<T> deepCopy();
 
   void getComplexColumnInfo(List<ComplexColumnInfo> columnInfoList);
+
+  /**
+   * @return depth of the complex columns , this is the length of flattened complex data.
+   */
+  int getDepth();
 }
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
index 18dc89d..200a9f6 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/PrimitiveDataType.java
@@ -581,4 +581,10 @@ public class PrimitiveDataType implements GenericDataType<Object> {
             name, !isDictionary));
   }
 
+  @Override
+  public int getDepth() {
+    // primitive type has no children
+    return 1;
+  }
+
 }
\ No newline at end of file
diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
index 3697a09..76ccf17 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java
@@ -68,6 +68,9 @@ public class StructDataType implements GenericDataType<StructObject> {
    */
   private int dataCounter;
 
+  /* flat complex datatype length, including the children*/
+  private int depth;
+
   private StructDataType(List<GenericDataType> children, int outputArrayIndex, int dataCounter,
       String name) {
     this.children = children;
@@ -357,4 +360,15 @@ public class StructDataType implements GenericDataType<StructObject> {
       children.get(i).getComplexColumnInfo(columnInfoList);
     }
   }
+
+  @Override
+  public int getDepth() {
+    if (depth == 0) {
+      // calculate only one time
+      List<ComplexColumnInfo> complexColumnInfoList = new ArrayList<>();
+      getComplexColumnInfo(complexColumnInfoList);
+      depth = complexColumnInfoList.size();
+    }
+    return depth;
+  }
 }
\ No newline at end of file
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
index 1270b1f..76c5613 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerColumnar.java
@@ -17,9 +17,14 @@
 
 package org.apache.carbondata.processing.store;
 
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -47,7 +52,6 @@ import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.core.util.CarbonThreadFactory;
 import org.apache.carbondata.core.util.CarbonUtil;
-import org.apache.carbondata.core.util.DataTypeUtil;
 import org.apache.carbondata.processing.datatypes.GenericDataType;
 import org.apache.carbondata.processing.store.writer.CarbonFactDataWriter;
 
@@ -88,7 +92,7 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
   private ExecutorService consumerExecutorService;
   private List<Future<Void>> consumerExecutorServiceTaskList;
   private List<CarbonRow> dataRows;
-  private int[] varcharColumnSizeInByte;
+  private int[] noDictColumnPageSize;
   /**
    * semaphore which will used for managing node holder objects
    */
@@ -120,6 +124,16 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
    */
   private ColumnarFormatVersion version;
 
+  /*
+  * cannot use the indexMap of model directly,
+  * modifying map in model directly will create problem if accessed later,
+  * Hence take a copy and work on it.
+  * */
+  private Map<Integer, GenericDataType> complexIndexMapCopy = null;
+
+  /* configured page size in MB*/
+  private int configuredPageSizeInBytes = 0;
+
   /**
    * CarbonFactDataHandler constructor
    */
@@ -137,6 +151,16 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
     if (LOGGER.isDebugEnabled()) {
       LOGGER.debug("Columns considered as NoInverted Index are " + noInvertedIdxCol.toString());
     }
+    this.complexIndexMapCopy = new HashMap<>();
+    for (Map.Entry<Integer, GenericDataType> entry: model.getComplexIndexMap().entrySet()) {
+      this.complexIndexMapCopy.put(entry.getKey(), entry.getValue().deepCopy());
+    }
+    String pageSizeStrInBytes =
+        model.getTableSpec().getCarbonTable().getTableInfo().getFactTable().getTableProperties()
+            .get(CarbonCommonConstants.TABLE_PAGE_SIZE_INMB);
+    if (pageSizeStrInBytes != null) {
+      configuredPageSizeInBytes = Integer.parseInt(pageSizeStrInBytes) * 1024 * 1024;
+    }
   }
 
   private void initParameters(CarbonFactDataHandlerModel model) {
@@ -196,11 +220,21 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
    * @throws CarbonDataWriterException
    */
   public void addDataToStore(CarbonRow row) throws CarbonDataWriterException {
+    int totalComplexColumnDepth = setFlatCarbonRowForComplex(row);
+    if (noDictColumnPageSize == null) {
+      // initialization using first row.
+      model.setNoDictAllComplexColumnDepth(totalComplexColumnDepth);
+      if (model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth() > 0) {
+        noDictColumnPageSize =
+            new int[model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth()];
+      }
+    }
+
     dataRows.add(row);
     this.entryCount++;
     // if entry count reaches to leaf node size then we are ready to write
     // this to leaf node file and update the intermediate files
-    if (this.entryCount == this.pageSize || isVarcharColumnFull(row)) {
+    if (this.entryCount == this.pageSize || needToCutThePage(row)) {
       try {
         semaphore.acquire();
 
@@ -218,6 +252,13 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
         }
         dataRows = new ArrayList<>(this.pageSize);
         this.entryCount = 0;
+        // re-init the complexIndexMap
+        this.complexIndexMapCopy = new HashMap<>();
+        for (Map.Entry<Integer, GenericDataType> entry : model.getComplexIndexMap().entrySet()) {
+          this.complexIndexMapCopy.put(entry.getKey(), entry.getValue().deepCopy());
+        }
+        noDictColumnPageSize =
+            new int[model.getNoDictDataTypesList().size() + model.getNoDictAllComplexColumnDepth()];
       } catch (InterruptedException e) {
         LOGGER.error(e.getMessage(), e);
         throw new CarbonDataWriterException(e);
@@ -227,50 +268,120 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
 
   /**
    * Check if column page can be added more rows after adding this row to page.
+   * only few no-dictionary dimensions columns (string, varchar,
+   * complex columns) can grow huge in size.
    *
-   * A varchar column page uses SafeVarLengthColumnPage/UnsafeVarLengthColumnPage to store data
-   * and encoded using HighCardDictDimensionIndexCodec which will call getByteArrayPage() from
-   * column page and flatten into byte[] for compression.
-   * Limited by the index of array, we can only put number of Integer.MAX_VALUE bytes in a page.
-   *
-   * Another limitation is from Compressor. Currently we use snappy as default compressor,
-   * and it will call MaxCompressedLength method to estimate the result size for preparing output.
-   * For safety, the estimate result is oversize: `32 + source_len + source_len/6`.
-   * So the maximum bytes to compress by snappy is (2GB-32)*6/7≈1.71GB.
    *
-   * Size of a row does not exceed 2MB since UnsafeSortDataRows uses 2MB byte[] as rowBuffer.
-   * Such that we can stop adding more row here if any long string column reach this limit.
-   *
-   * If use unsafe column page, please ensure the memory configured is enough.
-   * @param row
-   * @return false if any varchar column page cannot add one more value(2MB)
+   * @param row carbonRow
+   * @return false if next rows can be added to same page.
+   * true if next rows cannot be added to same page
    */
-  private boolean isVarcharColumnFull(CarbonRow row) {
-    //TODO: test and remove this as now  UnsafeSortDataRows can exceed 2MB
-    if (model.getVarcharDimIdxInNoDict().size() > 0) {
+  private boolean needToCutThePage(CarbonRow row) {
+    List<DataType> noDictDataTypesList = model.getNoDictDataTypesList();
+    int totalNoDictPageCount = noDictDataTypesList.size() + model.getNoDictAllComplexColumnDepth();
+    if (totalNoDictPageCount > 0) {
+      int currentElementLength;
+      int bucketCounter = 0;
+      if (configuredPageSizeInBytes == 0) {
+        // no need to cut the page
+        // use default value
+        /*configuredPageSizeInBytes =
+            CarbonCommonConstants.TABLE_PAGE_SIZE_INMB_DEFAULT * 1024 * 1024;*/
+        return false;
+      }
       Object[] nonDictArray = WriteStepRowUtil.getNoDictAndComplexDimension(row);
-      for (int i = 0; i < model.getVarcharDimIdxInNoDict().size(); i++) {
-        if (DataTypeUtil
-            .isPrimitiveColumn(model.getNoDictAndComplexColumns()[i].getDataType())) {
-          // get the size from the data type
-          varcharColumnSizeInByte[i] +=
-              model.getNoDictAndComplexColumns()[i].getDataType().getSizeInBytes();
-        } else {
-          varcharColumnSizeInByte[i] +=
-              ((byte[]) nonDictArray[model.getVarcharDimIdxInNoDict().get(i)]).length;
-        }
-        if (SnappyCompressor.MAX_BYTE_TO_COMPRESS -
-                (varcharColumnSizeInByte[i] + dataRows.size() * 4) < (2 << 20)) {
-          LOGGER.debug("Limited by varchar column, page size is " + dataRows.size());
-          // re-init for next page
-          varcharColumnSizeInByte = new int[model.getVarcharDimIdxInNoDict().size()];
-          return true;
+      for (int i = 0; i < noDictDataTypesList.size(); i++) {
+        DataType columnType = noDictDataTypesList.get(i);
+        if ((columnType == DataTypes.STRING) || (columnType == DataTypes.VARCHAR)) {
+          currentElementLength = ((byte[]) nonDictArray[i]).length;
+          noDictColumnPageSize[bucketCounter] += currentElementLength;
+          canSnappyHandleThisRow(noDictColumnPageSize[bucketCounter]);
+          // If current page size is more than configured page size, cut the page here.
+          if (noDictColumnPageSize[bucketCounter] + dataRows.size() * 4
+              >= configuredPageSizeInBytes) {
+            if (LOGGER.isDebugEnabled()) {
+              LOGGER.debug("cutting the page. Rows count in this page: " + dataRows.size());
+            }
+            // re-init for next page
+            noDictColumnPageSize = new int[totalNoDictPageCount];
+            return true;
+          }
+          bucketCounter++;
+        } else if (columnType.isComplexType()) {
+          // this is for depth of each complex column, model is having only total depth.
+          GenericDataType genericDataType = complexIndexMapCopy
+              .get(i - model.getNoDictionaryCount() + model.getPrimitiveDimLens().length);
+          int depth = genericDataType.getDepth();
+          List<ArrayList<byte[]>> flatComplexColumnList = (List<ArrayList<byte[]>>) nonDictArray[i];
+          for (int k = 0; k < depth; k++) {
+            ArrayList<byte[]> children = flatComplexColumnList.get(k);
+            // Add child element from inner list.
+            int complexElementSize = 0;
+            for (byte[] child : children) {
+              complexElementSize += child.length;
+            }
+            noDictColumnPageSize[bucketCounter] += complexElementSize;
+            canSnappyHandleThisRow(noDictColumnPageSize[bucketCounter]);
+            // If current page size is more than configured page size, cut the page here.
+            if (noDictColumnPageSize[bucketCounter] + dataRows.size() * 4
+                >= configuredPageSizeInBytes) {
+              LOGGER.info("cutting the page. Rows count: " + dataRows.size());
+              // re-init for next page
+              noDictColumnPageSize = new int[totalNoDictPageCount];
+              return true;
+            }
+            bucketCounter++;
+          }
         }
       }
     }
     return false;
   }
 
+  private int setFlatCarbonRowForComplex(CarbonRow row) {
+    int noDictTotalComplexChildDepth = 0;
+    Object[] noDictAndComplexDimension = WriteStepRowUtil.getNoDictAndComplexDimension(row);
+    for (int i = 0; i < noDictAndComplexDimension.length; i++) {
+      // complex types starts after no dictionary dimensions
+      if (i >= model.getNoDictionaryCount() && (model.getTableSpec().getNoDictionaryDimensionSpec()
+          .get(i).getSchemaDataType().isComplexType())) {
+        // this is for depth of each complex column, model is having only total depth.
+        GenericDataType genericDataType = complexIndexMapCopy
+            .get(i - model.getNoDictionaryCount() + model.getPrimitiveDimLens().length);
+        int depth = genericDataType.getDepth();
+        // initialize flatComplexColumnList
+        List<ArrayList<byte[]>> flatComplexColumnList = new ArrayList<>(depth);
+        for (int k = 0; k < depth; k++) {
+          flatComplexColumnList.add(new ArrayList<byte[]>());
+        }
+        // flatten the complex byteArray as per depth
+        try {
+          ByteBuffer byteArrayInput = ByteBuffer.wrap((byte[])noDictAndComplexDimension[i]);
+          ByteArrayOutputStream byteArrayOutput = new ByteArrayOutputStream();
+          DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutput);
+          genericDataType.parseComplexValue(byteArrayInput, dataOutputStream,
+              model.getComplexDimensionKeyGenerator());
+          genericDataType.getColumnarDataForComplexType(flatComplexColumnList,
+              ByteBuffer.wrap(byteArrayOutput.toByteArray()));
+          byteArrayOutput.close();
+        } catch (IOException | KeyGenException e) {
+          throw new CarbonDataWriterException("Problem in splitting and writing complex data", e);
+        }
+        noDictTotalComplexChildDepth += flatComplexColumnList.size();
+        // update the complex column data with the flat data
+        noDictAndComplexDimension[i] = flatComplexColumnList;
+      }
+    }
+    return noDictTotalComplexChildDepth;
+  }
+
+  private void canSnappyHandleThisRow(int currentRowSize) {
+    if (currentRowSize > SnappyCompressor.MAX_BYTE_TO_COMPRESS) {
+      throw new RuntimeException(" page size: " + currentRowSize + " exceed snappy size: "
+          + SnappyCompressor.MAX_BYTE_TO_COMPRESS + " Bytes. Snappy cannot compress it ");
+    }
+  }
+
   /**
    * generate the EncodedTablePage from the input rows (one page in case of V3 format)
    */
@@ -419,11 +530,6 @@ public class CarbonFactDataHandlerColumnar implements CarbonFactHandler {
       LOGGER.debug("Number of rows per column page is configured as pageSize = " + pageSize);
     }
     dataRows = new ArrayList<>(this.pageSize);
-
-    if (model.getVarcharDimIdxInNoDict().size() > 0) {
-      varcharColumnSizeInByte = new int[model.getVarcharDimIdxInNoDict().size()];
-    }
-
     int dimSet =
         Integer.parseInt(CarbonCommonConstants.DIMENSION_SPLIT_VALUE_IN_COLUMNAR_DEFAULTVALUE);
     // if at least one dimension is present then initialize column splitter otherwise null
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
index 9d8202e..e66e233 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/CarbonFactDataHandlerModel.java
@@ -33,7 +33,6 @@ import org.apache.carbondata.core.localdictionary.generator.LocalDictionaryGener
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.CarbonTableIdentifier;
 import org.apache.carbondata.core.metadata.datatype.DataType;
-import org.apache.carbondata.core.metadata.datatype.DataTypes;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonColumn;
@@ -99,7 +98,7 @@ public class CarbonFactDataHandlerModel {
   private int[] dimLens;
 
   /**
-   * total number of no dictionary dimension in the table
+   * total number of no dictionary dimension in the table (without complex type)
    */
   private int noDictionaryCount;
   /**
@@ -183,10 +182,14 @@ public class CarbonFactDataHandlerModel {
 
   private int numberOfCores;
 
-  private List<Integer> varcharDimIdxInNoDict;
-
   private String columnCompressor;
 
+  private List<DataType> noDictDataTypesList;
+
+  // For each complex columns, we will have multiple children. so, this will have count of all child
+  // this will help in knowing complex byte array will be divided into how may new pages.
+  private int noDictAllComplexColumnDepth;
+
   /**
    * Create the model using @{@link CarbonDataLoadConfiguration}
    */
@@ -230,25 +233,32 @@ public class CarbonFactDataHandlerModel {
     for (int i = 0; i < simpleDimsCount; i++) {
       simpleDimsLen[i] = dimLens[i];
     }
-
-    int noDictionayDimensionIndex = 0;
-    // for dynamic page size in write step if varchar columns exist
-    List<Integer> varcharDimIdxInNoDict = new ArrayList<>();
-    for (DataField dataField : configuration.getDataFields()) {
-      CarbonColumn column = dataField.getColumn();
-      if (!dataField.hasDictionaryEncoding()) {
-        if (!column.isComplex() && column.getDataType() == DataTypes.VARCHAR) {
-          varcharDimIdxInNoDict.add(noDictionayDimensionIndex);
+    //To Set MDKey Index of each primitive type in complex type
+    int surrIndex = simpleDimsCount;
+    Iterator<Map.Entry<String, GenericDataType>> complexMap = CarbonDataProcessorUtil
+        .getComplexTypesMap(configuration.getDataFields(),
+            configuration.getDataLoadProperty(DataLoadProcessorConstants.SERIALIZATION_NULL_FORMAT)
+                .toString()).entrySet().iterator();
+    Map<Integer, GenericDataType> complexIndexMap = new HashMap<>(complexDimensionCount);
+    while (complexMap.hasNext()) {
+      Map.Entry<String, GenericDataType> complexDataType = complexMap.next();
+      complexDataType.getValue().setOutputArrayIndex(0);
+      complexIndexMap.put(simpleDimsCount, complexDataType.getValue());
+      simpleDimsCount++;
+      List<GenericDataType> primitiveTypes = new ArrayList<GenericDataType>();
+      complexDataType.getValue().getAllPrimitiveChildren(primitiveTypes);
+      for (GenericDataType eachPrimitive : primitiveTypes) {
+        if (eachPrimitive.getIsColumnDictionary()) {
+          eachPrimitive.setSurrogateIndex(surrIndex++);
         }
-        noDictionayDimensionIndex++;
       }
     }
-
-    //To Set MDKey Index of each primitive type in complex type
-    Map<Integer, GenericDataType> complexIndexMap = getComplexMap(
-        configuration.getDataLoadProperty(DataLoadProcessorConstants.SERIALIZATION_NULL_FORMAT)
-            .toString(), simpleDimsCount, configuration.getDataFields());
-
+    List<DataType> noDictDataTypesList = new ArrayList<>();
+    for (DataField dataField : configuration.getDataFields()) {
+      if (!dataField.hasDictionaryEncoding() && dataField.getColumn().isDimension()) {
+        noDictDataTypesList.add(dataField.getColumn().getDataType());
+      }
+    }
     CarbonDataFileAttributes carbonDataFileAttributes =
         new CarbonDataFileAttributes(Long.parseLong(configuration.getTaskNo()),
             (Long) configuration.getDataLoadProperty(DataLoadProcessorConstants.FACT_TIME_STAMP));
@@ -264,6 +274,7 @@ public class CarbonFactDataHandlerModel {
     carbonFactDataHandlerModel.setNoDictionaryCount(noDictionaryCount);
     carbonFactDataHandlerModel.setDimensionCount(
         configuration.getDimensionCount() - noDictionaryCount);
+    carbonFactDataHandlerModel.setNoDictDataTypesList(noDictDataTypesList);
     carbonFactDataHandlerModel.setComplexIndexMap(complexIndexMap);
     carbonFactDataHandlerModel.setSegmentProperties(segmentProperties);
     carbonFactDataHandlerModel.setColCardinality(colCardinality);
@@ -301,7 +312,6 @@ public class CarbonFactDataHandlerModel {
     carbonFactDataHandlerModel.dataMapWriterlistener = listener;
     carbonFactDataHandlerModel.writingCoresCount = configuration.getWritingCoresCount();
     carbonFactDataHandlerModel.initNumberOfCores();
-    carbonFactDataHandlerModel.setVarcharDimIdxInNoDict(varcharDimIdxInNoDict);
     return carbonFactDataHandlerModel;
   }
 
@@ -316,31 +326,19 @@ public class CarbonFactDataHandlerModel {
       String[] tempStoreLocation, String carbonDataDirectoryPath) {
 
     // for dynamic page size in write step if varchar columns exist
-    List<Integer> varcharDimIdxInNoDict = new ArrayList<>();
-    List<CarbonDimension> allDimensions = carbonTable.getAllDimensions();
-    int dictDimCount = allDimensions.size() - segmentProperties.getNumberOfNoDictionaryDimension()
-            - segmentProperties.getComplexDimensions().size();
+    List<CarbonDimension> allDimensions = carbonTable.getDimensions();
     CarbonColumn[] noDicAndComplexColumns =
         new CarbonColumn[segmentProperties.getNumberOfNoDictionaryDimension() + segmentProperties
             .getComplexDimensions().size()];
     int noDicAndComp = 0;
-    int invisibleCount = 0;
+    List<DataType> noDictDataTypesList = new ArrayList<>();
     for (CarbonDimension dim : allDimensions) {
-      if (dim.isInvisible()) {
-        invisibleCount++;
-        continue;
-      }
-      if (!dim.isComplex() && !dim.hasEncoding(Encoding.DICTIONARY) &&
-          dim.getDataType() == DataTypes.VARCHAR) {
-        // ordinal is set in CarbonTable.fillDimensionsAndMeasuresForTables()
-        varcharDimIdxInNoDict.add(dim.getOrdinal() - dictDimCount - invisibleCount);
-      }
       if (!dim.hasEncoding(Encoding.DICTIONARY)) {
         noDicAndComplexColumns[noDicAndComp++] =
             new CarbonColumn(dim.getColumnSchema(), dim.getOrdinal(), dim.getSchemaOrdinal());
+        noDictDataTypesList.add(dim.getDataType());
       }
     }
-
     CarbonFactDataHandlerModel carbonFactDataHandlerModel = new CarbonFactDataHandlerModel();
     carbonFactDataHandlerModel.setSchemaUpdatedTimeStamp(carbonTable.getTableLastUpdatedTime());
     carbonFactDataHandlerModel.setDatabaseName(loadModel.getDatabaseName());
@@ -385,6 +383,7 @@ public class CarbonFactDataHandlerModel {
     }
     carbonFactDataHandlerModel.setMeasureDataType(measureDataTypes);
     carbonFactDataHandlerModel.setNoDictAndComplexColumns(noDicAndComplexColumns);
+    carbonFactDataHandlerModel.setNoDictDataTypesList(noDictDataTypesList);
     CarbonUtil.checkAndCreateFolderWithPermission(carbonDataDirectoryPath);
     carbonFactDataHandlerModel.setCarbonDataDirectoryPath(carbonDataDirectoryPath);
     carbonFactDataHandlerModel.setPrimitiveDimLens(segmentProperties.getDimColumnsCardinality());
@@ -410,7 +409,6 @@ public class CarbonFactDataHandlerModel {
     carbonFactDataHandlerModel.initNumberOfCores();
     carbonFactDataHandlerModel
         .setColumnLocalDictGenMap(CarbonUtil.getLocalDictionaryModel(carbonTable));
-    carbonFactDataHandlerModel.setVarcharDimIdxInNoDict(varcharDimIdxInNoDict);
     carbonFactDataHandlerModel.sortScope = carbonTable.getSortScope();
     return carbonFactDataHandlerModel;
   }
@@ -419,11 +417,11 @@ public class CarbonFactDataHandlerModel {
    * This routine takes the Complex Dimension and convert into generic DataType.
    *
    * @param segmentProperties
-   * @param isNullFormat
+   * @param nullFormat
    * @return
    */
   private static Map<Integer, GenericDataType> convertComplexDimensionToComplexIndexMap(
-      SegmentProperties segmentProperties, String isNullFormat) {
+      SegmentProperties segmentProperties, String nullFormat) {
     List<CarbonDimension> complexDimensions = segmentProperties.getComplexDimensions();
     int simpleDimsCount = segmentProperties.getDimensions().size() - segmentProperties
         .getNumberOfNoDictionaryDimension();
@@ -432,14 +430,14 @@ public class CarbonFactDataHandlerModel {
     for (CarbonColumn complexDimension : complexDimensions) {
       dataFields[i++] = new DataField(complexDimension);
     }
-    return getComplexMap(isNullFormat, simpleDimsCount, dataFields);
+    return getComplexMap(nullFormat, simpleDimsCount, dataFields);
   }
 
-  private static Map<Integer, GenericDataType> getComplexMap(String isNullFormat,
+  private static Map<Integer, GenericDataType> getComplexMap(String nullFormat,
       int simpleDimsCount, DataField[] dataFields) {
     int surrIndex = 0;
     Iterator<Map.Entry<String, GenericDataType>> complexMap =
-        CarbonDataProcessorUtil.getComplexTypesMap(dataFields, isNullFormat).entrySet().iterator();
+        CarbonDataProcessorUtil.getComplexTypesMap(dataFields, nullFormat).entrySet().iterator();
     Map<Integer, GenericDataType> complexIndexMap = new HashMap<>(dataFields.length);
     while (complexMap.hasNext()) {
       Map.Entry<String, GenericDataType> complexDataType = complexMap.next();
@@ -756,14 +754,6 @@ public class CarbonFactDataHandlerModel {
     return numberOfCores;
   }
 
-  public void setVarcharDimIdxInNoDict(List<Integer> varcharDimIdxInNoDict) {
-    this.varcharDimIdxInNoDict = varcharDimIdxInNoDict;
-  }
-
-  public List<Integer> getVarcharDimIdxInNoDict() {
-    return varcharDimIdxInNoDict;
-  }
-
   public String getColumnCompressor() {
     return columnCompressor;
   }
@@ -779,5 +769,21 @@ public class CarbonFactDataHandlerModel {
   public void setNoDictAndComplexColumns(CarbonColumn[] noDictAndComplexColumns) {
     this.noDictAndComplexColumns = noDictAndComplexColumns;
   }
+
+  public List<DataType> getNoDictDataTypesList() {
+    return this.noDictDataTypesList;
+  }
+
+  public void setNoDictDataTypesList(List<DataType> noDictDataTypesList) {
+    this.noDictDataTypesList = noDictDataTypesList;
+  }
+
+  public int getNoDictAllComplexColumnDepth() {
+    return noDictAllComplexColumnDepth;
+  }
+
+  public void setNoDictAllComplexColumnDepth(int noDictAllComplexColumnDepth) {
+    this.noDictAllComplexColumnDepth = noDictAllComplexColumnDepth;
+  }
 }
 
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
index 7cc8932..a201679 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/TablePage.java
@@ -17,8 +17,6 @@
 
 package org.apache.carbondata.processing.store;
 
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
@@ -30,7 +28,6 @@ import java.util.Map;
 import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.datastore.ColumnType;
 import org.apache.carbondata.core.datastore.TableSpec;
-import org.apache.carbondata.core.datastore.exception.CarbonDataWriterException;
 import org.apache.carbondata.core.datastore.page.ColumnPage;
 import org.apache.carbondata.core.datastore.page.ComplexColumnPage;
 import org.apache.carbondata.core.datastore.page.EncodedTablePage;
@@ -252,11 +249,11 @@ public class TablePage {
           }
         } else {
           // complex columns
-          addComplexColumn(i - noDictionaryCount, rowId, (byte[]) noDictAndComplex[i]);
+          addComplexColumn(i - noDictionaryCount, rowId,
+              (List<ArrayList<byte[]>>) noDictAndComplex[i]);
         }
       }
     }
-
     // 3. convert measure columns
     Object[] measureColumns = WriteStepRowUtil.getMeasure(row);
     for (int i = 0; i < measurePages.length; i++) {
@@ -278,14 +275,14 @@ public class TablePage {
    *
    * @param index          index of the complexDimensionPage
    * @param rowId          Id of the input row
-   * @param complexColumns byte array the complex columm to be added, extracted of input row
+   * @param encodedComplexColumnar flatten data of complex column
    */
   // TODO: this function should be refactoried, ColumnPage should support complex type encoding
   // directly instead of doing it here
-  private void addComplexColumn(int index, int rowId, byte[] complexColumns) {
+  private void addComplexColumn(int index, int rowId,
+      List<ArrayList<byte[]>> encodedComplexColumnar) {
     GenericDataType complexDataType = complexIndexMap.get(
         index + model.getPrimitiveDimLens().length);
-
     // initialize the page if first row
     if (rowId == 0) {
       List<ComplexColumnInfo> complexColumnInfoList = new ArrayList<>();
@@ -298,30 +295,7 @@ public class TablePage {
         throw new RuntimeException(e);
       }
     }
-
     int depthInComplexColumn = complexDimensionPages[index].getComplexColumnIndex();
-    // this is the result columnar data which will be added to page,
-    // size of this list is the depth of complex column, we will fill it by input data
-    List<ArrayList<byte[]>> encodedComplexColumnar = new ArrayList<>(depthInComplexColumn);
-    for (int k = 0; k < depthInComplexColumn; k++) {
-      encodedComplexColumnar.add(new ArrayList<byte[]>());
-    }
-
-    // apply the complex type data and fill columnsArray
-    try {
-      ByteBuffer byteArrayInput = ByteBuffer.wrap(complexColumns);
-      ByteArrayOutputStream byteArrayOutput = new ByteArrayOutputStream();
-      DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutput);
-      complexDataType.parseComplexValue(byteArrayInput, dataOutputStream,
-          model.getComplexDimensionKeyGenerator());
-      complexDataType.getColumnarDataForComplexType(encodedComplexColumnar,
-          ByteBuffer.wrap(byteArrayOutput.toByteArray()));
-      byteArrayOutput.close();
-    } catch (IOException | KeyGenException e) {
-      throw new CarbonDataWriterException("Problem while bit packing and writing complex datatype",
-          e);
-    }
-
     for (int depth = 0; depth < depthInComplexColumn; depth++) {
       complexDimensionPages[index].putComplexData(depth, encodedComplexColumnar.get(depth));
     }
diff --git a/processing/src/main/java/org/apache/carbondata/processing/store/writer/v3/CarbonFactDataWriterImplV3.java b/processing/src/main/java/org/apache/carbondata/processing/store/writer/v3/CarbonFactDataWriterImplV3.java
index dc2268c..cac0e8b 100644
--- a/processing/src/main/java/org/apache/carbondata/processing/store/writer/v3/CarbonFactDataWriterImplV3.java
+++ b/processing/src/main/java/org/apache/carbondata/processing/store/writer/v3/CarbonFactDataWriterImplV3.java
@@ -331,6 +331,13 @@ public class CarbonFactDataWriterImplV3 extends AbstractFactDataWriter {
         new BlockletInfo3(encodedBlocklet.getBlockletSize(), currentDataChunksOffset,
             currentDataChunksLength, dimensionOffset, measureOffset,
             encodedBlocklet.getNumberOfPages());
+    // Avoid storing as integer in encodedBocklet,
+    // but in thrift store as int for large number of rows future support
+    List<Integer> rowList = new ArrayList<>(encodedBlocklet.getRowCountInPage().size());
+    for (int rows : encodedBlocklet.getRowCountInPage()) {
+      rowList.add(rows);
+    }
+    blockletInfo3.setRow_count_in_page(rowList);
     blockletMetadata.add(blockletInfo3);
   }
 
diff --git a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
index cdb610d..cfae2ae 100644
--- a/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
+++ b/store/sdk/src/main/java/org/apache/carbondata/sdk/file/CarbonWriterBuilder.java
@@ -62,6 +62,7 @@ public class CarbonWriterBuilder {
   //initialize with empty array , as no columns should be selected for sorting in NO_SORT
   private String[] sortColumns = new String[0];
   private int blockletSize;
+  private int pageSizeInMb;
   private int blockSize;
   private long timestamp;
   private Map<String, String> options;
@@ -269,6 +270,7 @@ public class CarbonWriterBuilder {
    *                           default value is null.
    * l. inverted_index -- comma separated string columns for which inverted index needs to be
    *                      generated
+   * m. table_page_size_inmb -- [1-1755] MB.
    *
    * @return updated CarbonWriterBuilder
    */
@@ -277,7 +279,7 @@ public class CarbonWriterBuilder {
     Set<String> supportedOptions = new HashSet<>(Arrays
         .asList("table_blocksize", "table_blocklet_size", "local_dictionary_threshold",
             "local_dictionary_enable", "sort_columns", "sort_scope", "long_string_columns",
-            "inverted_index"));
+            "inverted_index","table_page_size_inmb"));
 
     for (String key : options.keySet()) {
       if (!supportedOptions.contains(key.toLowerCase())) {
@@ -317,6 +319,8 @@ public class CarbonWriterBuilder {
           invertedIndexColumns = entry.getValue().split(",");
         }
         this.invertedIndexFor(invertedIndexColumns);
+      } else if (entry.getKey().equalsIgnoreCase("table_page_size_inmb")) {
+        this.withPageSizeInMb(Integer.parseInt(entry.getValue()));
       }
     }
     return this;
@@ -446,6 +450,21 @@ public class CarbonWriterBuilder {
   }
 
   /**
+   * To set the blocklet size of CarbonData file
+   *
+   * @param pageSizeInMb is page size in MB
+   *
+   * @return updated CarbonWriterBuilder
+   */
+  public CarbonWriterBuilder withPageSizeInMb(int pageSizeInMb) {
+    if (pageSizeInMb < 1 || pageSizeInMb > 1755) {
+      throw new IllegalArgumentException("pageSizeInMb must be 1 MB - 1755 MB");
+    }
+    this.pageSizeInMb = pageSizeInMb;
+    return this;
+  }
+
+  /**
    * to build a {@link CarbonWriter}, which accepts row in CSV format
    *
    * @param schema carbon Schema object {org.apache.carbondata.sdk.file.Schema}
@@ -626,6 +645,11 @@ public class CarbonWriterBuilder {
     if (blockletSize > 0) {
       tableSchemaBuilder = tableSchemaBuilder.blockletSize(blockletSize);
     }
+
+    if (pageSizeInMb > 0) {
+      tableSchemaBuilder = tableSchemaBuilder.pageSizeInMb(pageSizeInMb);
+    }
+
     tableSchemaBuilder.enableLocalDictionary(isLocalDictionaryEnabled);
     tableSchemaBuilder.localDictionaryThreshold(localDictionaryThreshold);
     List<String> sortColumnsList = new ArrayList<>();


[carbondata] 03/22: [CARBONDATA-3331] Fix for external table in Show Metacache

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit f0e270667b0be7c7b66d922ddc176ebe4334a695
Author: namanrastogi <na...@gmail.com>
AuthorDate: Tue Mar 26 19:39:26 2019 +0530

    [CARBONDATA-3331] Fix for external table in Show Metacache
    
    Problem: In SHOW METACACHE command when an external table is queries upon, database size is more than ALL size for index column. External table is created upon a store on a table which is also present in current database.
    
    Bug: Index size for database was being summed up blindly for all ths tables. So some cache entries were being summed up multiple times.
    
    Solution: Compute the database index size while iterating over cache, when cache key contains path of one of the table's path.
    
    This closes #3164
---
 .../command/cache/CarbonShowCacheCommand.scala     | 91 ++++++++++++----------
 1 file changed, 52 insertions(+), 39 deletions(-)

diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
index 8461bf3..3b85313 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/cache/CarbonShowCacheCommand.scala
@@ -20,7 +20,6 @@ package org.apache.spark.sql.execution.command.cache
 import scala.collection.mutable
 import scala.collection.JavaConverters._
 
-import org.apache.hadoop.mapred.JobConf
 import org.apache.spark.sql.{CarbonEnv, Row, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
@@ -28,16 +27,13 @@ import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.execution.command.{Checker, MetadataCommand}
 import org.apache.spark.sql.types.StringType
 
-import org.apache.carbondata.core.cache.{CacheProvider, CacheType}
+import org.apache.carbondata.common.logging.LogServiceFactory
+import org.apache.carbondata.core.cache.CacheProvider
 import org.apache.carbondata.core.cache.dictionary.AbstractColumnDictionaryInfo
-import org.apache.carbondata.core.constants.CarbonCommonConstants
-import org.apache.carbondata.core.datamap.Segment
 import org.apache.carbondata.core.indexstore.BlockletDataMapIndexWrapper
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
-import org.apache.carbondata.core.readcommitter.LatestFilesReadCommittedScope
 import org.apache.carbondata.datamap.bloom.BloomCacheKeyValue
 import org.apache.carbondata.events.{OperationContext, OperationListenerBus, ShowTableCacheEvent}
-import org.apache.carbondata.processing.merger.CarbonDataMergerUtil
 import org.apache.carbondata.spark.util.CommonUtil.bytesToDisplaySize
 
 
@@ -45,6 +41,8 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
     internalCall: Boolean = false)
   extends MetadataCommand {
 
+  val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
+
   override def output: Seq[AttributeReference] = {
     if (tableIdentifier.isEmpty) {
       Seq(
@@ -71,38 +69,48 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
         Row("ALL", "ALL", 0L, 0L, 0L),
         Row(currentDatabase, "ALL", 0L, 0L, 0L))
     } else {
-      val carbonTables = CarbonEnv.getInstance(sparkSession).carbonMetaStore
-        .listAllTables(sparkSession).filter {
-        carbonTable =>
-          carbonTable.getDatabaseName.equalsIgnoreCase(currentDatabase) &&
-          isValidTable(carbonTable, sparkSession) &&
-          !carbonTable.isChildDataMap
+      var carbonTables = mutable.ArrayBuffer[CarbonTable]()
+      sparkSession.sessionState.catalog.listTables(currentDatabase).foreach {
+        tableIdent =>
+          try {
+            val carbonTable = CarbonEnv.getCarbonTable(tableIdent)(sparkSession)
+            if (!carbonTable.isChildDataMap) {
+              carbonTables += carbonTable
+            }
+          } catch {
+            case ex: NoSuchTableException =>
+              LOGGER.debug("Ignoring non-carbon table " + tableIdent.table)
+          }
       }
 
       // All tables of current database
-      var (dbIndexSize, dbDatamapSize, dbDictSize) = (0L, 0L, 0L)
-      val tableList: Seq[Row] = carbonTables.map {
+      var (dbDatamapSize, dbDictSize) = (0L, 0L)
+      val tableList = carbonTables.flatMap {
         carbonTable =>
-          val tableResult = getTableCache(sparkSession, carbonTable)
-          var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
-          tableResult.drop(2).foreach {
-            row =>
-              indexSize += row.getLong(1)
-              datamapSize += row.getLong(2)
-          }
-          val dictSize = tableResult(1).getLong(1)
-
-          dbIndexSize += indexSize
-          dbDictSize += dictSize
-          dbDatamapSize += datamapSize
-
-          val tableName = if (!carbonTable.isTransactionalTable) {
-            carbonTable.getTableName + " (external table)"
+          try {
+            val tableResult = getTableCache(sparkSession, carbonTable)
+            var (indexSize, datamapSize) = (tableResult(0).getLong(1), 0L)
+            tableResult.drop(2).foreach {
+              row =>
+                indexSize += row.getLong(1)
+                datamapSize += row.getLong(2)
+            }
+            val dictSize = tableResult(1).getLong(1)
+
+            dbDictSize += dictSize
+            dbDatamapSize += datamapSize
+
+            val tableName = if (!carbonTable.isTransactionalTable) {
+              carbonTable.getTableName + " (external table)"
+            }
+            else {
+              carbonTable.getTableName
+            }
+            Seq((currentDatabase, tableName, indexSize, datamapSize, dictSize))
+          } catch {
+            case ex: UnsupportedOperationException =>
+              Seq.empty
           }
-          else {
-            carbonTable.getTableName
-          }
-          (currentDatabase, tableName, indexSize, datamapSize, dictSize)
       }.collect {
         case (db, table, indexSize, datamapSize, dictSize) if !((indexSize == 0) &&
                                                                 (datamapSize == 0) &&
@@ -110,13 +118,23 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
           Row(db, table, indexSize, datamapSize, dictSize)
       }
 
+      val tablePaths = carbonTables.map {
+        carbonTable =>
+          carbonTable.getTablePath
+      }
+
       // Scan whole cache and fill the entries for All-Database-All-Tables
+      // and Current-Database-All-Tables
       var (allIndexSize, allDatamapSize, allDictSize) = (0L, 0L, 0L)
+      var dbIndexSize = 0L
       cache.getCacheMap.asScala.foreach {
-        case (_, cacheable) =>
+        case (key, cacheable) =>
           cacheable match {
             case _: BlockletDataMapIndexWrapper =>
               allIndexSize += cacheable.getMemorySize
+              if (tablePaths.exists { path => key.startsWith(path) }) {
+                dbIndexSize += cacheable.getMemorySize
+              }
             case _: BloomCacheKeyValue.CacheValue =>
               allDatamapSize += cacheable.getMemorySize
             case _: AbstractColumnDictionaryInfo =>
@@ -217,9 +235,4 @@ case class CarbonShowCacheCommand(tableIdentifier: Option[TableIdentifier],
       }
     }
   }
-
-  def isValidTable(carbonTable: CarbonTable, sparkSession: SparkSession): Boolean = {
-    CarbonEnv.getInstance(sparkSession).carbonMetaStore.tableExists(carbonTable.getTableName,
-      Some(carbonTable.getDatabaseName))(sparkSession)
-  }
 }


[carbondata] 08/22: [CARBONDATA-3348] Support alter SORT_COLUMNS property

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit d1b455f09590b48a4ba3709fa29635a18da1d790
Author: QiangCai <qi...@qq.com>
AuthorDate: Tue Apr 16 20:27:31 2019 +0800

    [CARBONDATA-3348] Support alter SORT_COLUMNS property
    
    Modification
    
    support alter SORT_COLUMNS
    alter table <table name> set tblproperties('sort_scope'='<sort scope type>', 'sort_columns'='[c1][,...cn ]')
    Limitation
    
    when a measure become a dimension and the query contain this column, the task distribution of this query will only support block and blocklet, but not merge_small_files or custom.
    
    This closes #3178
---
 .../core/constants/CarbonCommonConstants.java      |   5 +
 .../carbondata/core/datamap/DataMapFilter.java     |  89 ++++
 .../carbondata/core/datamap/TableDataMap.java      |  91 ++--
 .../datamap/dev/expr/DataMapExprWrapperImpl.java   |   3 +-
 .../core/metadata/schema/table/CarbonTable.java    |  20 +
 .../core/metadata/schema/table/TableInfo.java      |  23 +
 .../scan/executor/impl/AbstractQueryExecutor.java  |  62 +--
 .../executor/impl/QueryExecutorProperties.java     |   5 -
 .../core/scan/executor/util/RestructureUtil.java   |  75 ++-
 .../core/scan/model/QueryModelBuilder.java         |   2 +-
 .../scan/executor/util/RestructureUtilTest.java    |  11 +-
 .../carbondata/hadoop/api/CarbonInputFormat.java   |  29 +-
 .../test/resources/sort_columns/alldatatype1.csv   |  13 +
 .../test/resources/sort_columns/alldatatype2.csv   |  13 +
 .../TestAlterTableSortColumnsProperty.scala        | 541 +++++++++++++++++++++
 .../carbondata/spark/rdd/CarbonScanRDD.scala       |  10 +-
 .../apache/carbondata/spark/util/CommonUtil.scala  |  80 ++-
 .../spark/sql/catalyst/CarbonDDLSqlParser.scala    |  31 +-
 .../org/apache/spark/util/AlterTableUtil.scala     | 126 ++++-
 19 files changed, 1039 insertions(+), 190 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
index c9efc34..608b5fb 100644
--- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
+++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java
@@ -478,6 +478,11 @@ public final class CarbonCommonConstants {
    */
   public static final String CACHE_LEVEL_DEFAULT_VALUE = "BLOCK";
 
+  /**
+   * column level property: the measure is changed to the dimension
+   */
+  public static final String COLUMN_DRIFT = "column_drift";
+
   //////////////////////////////////////////////////////////////////////////////////////////
   // Data loading parameter start here
   //////////////////////////////////////////////////////////////////////////////////////////
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/DataMapFilter.java b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapFilter.java
new file mode 100644
index 0000000..c20d0d5
--- /dev/null
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/DataMapFilter.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.core.datamap;
+
+import java.io.Serializable;
+
+import org.apache.carbondata.core.datastore.block.SegmentProperties;
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
+import org.apache.carbondata.core.scan.executor.util.RestructureUtil;
+import org.apache.carbondata.core.scan.expression.Expression;
+import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
+
+/**
+ * the filter of DataMap
+ */
+public class DataMapFilter implements Serializable {
+
+  private CarbonTable table;
+
+  private Expression expression;
+
+  private FilterResolverIntf resolver;
+
+  public DataMapFilter(CarbonTable table, Expression expression) {
+    this.table = table;
+    this.expression = expression;
+    resolve();
+  }
+
+  public DataMapFilter(FilterResolverIntf resolver) {
+    this.resolver = resolver;
+  }
+
+  private void resolve() {
+    if (expression != null) {
+      table.processFilterExpression(expression, null, null);
+      resolver = CarbonTable.resolveFilter(expression, table.getAbsoluteTableIdentifier());
+    }
+  }
+
+  public Expression getExpression() {
+    return expression;
+  }
+
+  public void setExpression(Expression expression) {
+    this.expression = expression;
+  }
+
+  public FilterResolverIntf getResolver() {
+    return resolver;
+  }
+
+  public void setResolver(FilterResolverIntf resolver) {
+    this.resolver = resolver;
+  }
+
+  public boolean isEmpty() {
+    return resolver == null;
+  }
+
+  public boolean isResolvedOnSegment(SegmentProperties segmentProperties) {
+    if (expression == null || table == null) {
+      return true;
+    }
+    if (!table.isTransactionalTable()) {
+      return false;
+    }
+    if (table.hasColumnDrift() && RestructureUtil
+        .hasColumnDriftOnSegment(table, segmentProperties)) {
+      return false;
+    }
+    return true;
+  }
+}
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
index f9020bd..4375abb 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/TableDataMap.java
@@ -47,7 +47,6 @@ import org.apache.carbondata.core.indexstore.SegmentPropertiesFetcher;
 import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.DataMapSchema;
-import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.util.CarbonProperties;
 import org.apache.carbondata.events.Event;
@@ -100,38 +99,6 @@ public final class TableDataMap extends OperationEventListener {
     return blockletDetailsFetcher;
   }
 
-
-  /**
-   * Pass the valid segments and prune the datamap using filter expression
-   *
-   * @param segments
-   * @param filterExp
-   * @return
-   */
-  public List<ExtendedBlocklet> prune(List<Segment> segments, Expression filterExp,
-      List<PartitionSpec> partitions) throws IOException {
-    List<ExtendedBlocklet> blocklets = new ArrayList<>();
-    SegmentProperties segmentProperties;
-    Map<Segment, List<DataMap>> dataMaps = dataMapFactory.getDataMaps(segments);
-    for (Segment segment : segments) {
-      List<Blocklet> pruneBlocklets = new ArrayList<>();
-      // if filter is not passed then return all the blocklets
-      if (filterExp == null) {
-        pruneBlocklets = blockletDetailsFetcher.getAllBlocklets(segment, partitions);
-      } else {
-        segmentProperties = segmentPropertiesFetcher.getSegmentProperties(segment);
-        for (DataMap dataMap : dataMaps.get(segment)) {
-          pruneBlocklets.addAll(dataMap
-              .prune(filterExp, segmentProperties, partitions, table));
-        }
-      }
-      blocklets.addAll(addSegmentId(
-          blockletDetailsFetcher.getExtendedBlocklets(pruneBlocklets, segment),
-          segment));
-    }
-    return blocklets;
-  }
-
   public CarbonTable getTable() {
     return table;
   }
@@ -140,10 +107,10 @@ public final class TableDataMap extends OperationEventListener {
    * Pass the valid segments and prune the datamap using filter expression
    *
    * @param segments
-   * @param filterExp
+   * @param filter
    * @return
    */
-  public List<ExtendedBlocklet> prune(List<Segment> segments, final FilterResolverIntf filterExp,
+  public List<ExtendedBlocklet> prune(List<Segment> segments, final DataMapFilter filter,
       final List<PartitionSpec> partitions) throws IOException {
     final List<ExtendedBlocklet> blocklets = new ArrayList<>();
     final Map<Segment, List<DataMap>> dataMaps = dataMapFactory.getDataMaps(segments);
@@ -164,15 +131,15 @@ public final class TableDataMap extends OperationEventListener {
       // As 0.1 million files block pruning can take only 1 second.
       // Doing multi-thread for smaller values is not recommended as
       // driver should have minimum threads opened to support multiple concurrent queries.
-      if (filterExp == null) {
+      if (filter.isEmpty()) {
         // if filter is not passed, then return all the blocklets.
         return pruneWithoutFilter(segments, partitions, blocklets);
       }
-      return pruneWithFilter(segments, filterExp, partitions, blocklets, dataMaps);
+      return pruneWithFilter(segments, filter, partitions, blocklets, dataMaps);
     }
     // handle by multi-thread
-    List<ExtendedBlocklet> extendedBlocklets =
-        pruneMultiThread(segments, filterExp, partitions, blocklets, dataMaps, totalFiles);
+    List<ExtendedBlocklet> extendedBlocklets = pruneMultiThread(
+        segments, filter, partitions, blocklets, dataMaps, totalFiles);
     return extendedBlocklets;
   }
 
@@ -187,14 +154,22 @@ public final class TableDataMap extends OperationEventListener {
     return blocklets;
   }
 
-  private List<ExtendedBlocklet> pruneWithFilter(List<Segment> segments,
-      FilterResolverIntf filterExp, List<PartitionSpec> partitions,
-      List<ExtendedBlocklet> blocklets, Map<Segment, List<DataMap>> dataMaps) throws IOException {
+  private List<ExtendedBlocklet> pruneWithFilter(List<Segment> segments, DataMapFilter filter,
+      List<PartitionSpec> partitions, List<ExtendedBlocklet> blocklets,
+      Map<Segment, List<DataMap>> dataMaps) throws IOException {
     for (Segment segment : segments) {
       List<Blocklet> pruneBlocklets = new ArrayList<>();
       SegmentProperties segmentProperties = segmentPropertiesFetcher.getSegmentProperties(segment);
-      for (DataMap dataMap : dataMaps.get(segment)) {
-        pruneBlocklets.addAll(dataMap.prune(filterExp, segmentProperties, partitions));
+      if (filter.isResolvedOnSegment(segmentProperties)) {
+        for (DataMap dataMap : dataMaps.get(segment)) {
+          pruneBlocklets.addAll(
+              dataMap.prune(filter.getResolver(), segmentProperties, partitions));
+        }
+      } else {
+        for (DataMap dataMap : dataMaps.get(segment)) {
+          pruneBlocklets.addAll(
+              dataMap.prune(filter.getExpression(), segmentProperties, partitions, table));
+        }
       }
       blocklets.addAll(
           addSegmentId(blockletDetailsFetcher.getExtendedBlocklets(pruneBlocklets, segment),
@@ -204,7 +179,7 @@ public final class TableDataMap extends OperationEventListener {
   }
 
   private List<ExtendedBlocklet> pruneMultiThread(List<Segment> segments,
-      final FilterResolverIntf filterExp, final List<PartitionSpec> partitions,
+      final DataMapFilter filter, final List<PartitionSpec> partitions,
       List<ExtendedBlocklet> blocklets, final Map<Segment, List<DataMap>> dataMaps,
       int totalFiles) {
     /*
@@ -295,14 +270,24 @@ public final class TableDataMap extends OperationEventListener {
             SegmentProperties segmentProperties =
                 segmentPropertiesFetcher.getSegmentPropertiesFromDataMap(dataMapList.get(0));
             Segment segment = segmentDataMapGroup.getSegment();
-            for (int i = segmentDataMapGroup.getFromIndex();
-                 i <= segmentDataMapGroup.getToIndex(); i++) {
-              List<Blocklet> dmPruneBlocklets  = dataMapList.get(i).prune(filterExp,
-                  segmentProperties,
-                  partitions);
-              pruneBlocklets.addAll(addSegmentId(blockletDetailsFetcher
-                      .getExtendedBlocklets(dmPruneBlocklets, segment),
-                  segment));
+            if (filter.isResolvedOnSegment(segmentProperties)) {
+              for (int i = segmentDataMapGroup.getFromIndex();
+                   i <= segmentDataMapGroup.getToIndex(); i++) {
+                List<Blocklet> dmPruneBlocklets = dataMapList.get(i).prune(
+                    filter.getResolver(), segmentProperties, partitions);
+                pruneBlocklets.addAll(addSegmentId(
+                    blockletDetailsFetcher.getExtendedBlocklets(dmPruneBlocklets, segment),
+                    segment));
+              }
+            } else {
+              for (int i = segmentDataMapGroup.getFromIndex();
+                   i <= segmentDataMapGroup.getToIndex(); i++) {
+                List<Blocklet> dmPruneBlocklets = dataMapList.get(i).prune(
+                    filter.getExpression(), segmentProperties, partitions, table);
+                pruneBlocklets.addAll(addSegmentId(
+                    blockletDetailsFetcher.getExtendedBlocklets(dmPruneBlocklets, segment),
+                    segment));
+              }
             }
             synchronized (prunedBlockletMap) {
               List<ExtendedBlocklet> pruneBlockletsExisting =
diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
index 4643b47..bb2662b 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/expr/DataMapExprWrapperImpl.java
@@ -22,6 +22,7 @@ import java.util.List;
 import java.util.UUID;
 
 import org.apache.carbondata.core.datamap.DataMapDistributable;
+import org.apache.carbondata.core.datamap.DataMapFilter;
 import org.apache.carbondata.core.datamap.DataMapLevel;
 import org.apache.carbondata.core.datamap.Segment;
 import org.apache.carbondata.core.datamap.TableDataMap;
@@ -50,7 +51,7 @@ public class DataMapExprWrapperImpl implements DataMapExprWrapper {
   @Override
   public List<ExtendedBlocklet> prune(List<Segment> segments, List<PartitionSpec> partitionsToPrune)
       throws IOException {
-    return dataMap.prune(segments, expression, partitionsToPrune);
+    return dataMap.prune(segments, new DataMapFilter(expression), partitionsToPrune);
   }
 
   public List<ExtendedBlocklet> prune(DataMapDistributable distributable,
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
index 3623147..54ea772 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java
@@ -121,6 +121,11 @@ public class CarbonTable implements Serializable {
   private List<CarbonMeasure> allMeasures;
 
   /**
+   * list of column drift
+   */
+  private List<CarbonDimension> columnDrift;
+
+  /**
    * table bucket map.
    */
   private Map<String, BucketingInfo> tableBucketMap;
@@ -189,6 +194,7 @@ public class CarbonTable implements Serializable {
     this.tablePartitionMap = new HashMap<>();
     this.createOrderColumn = new HashMap<String, List<CarbonColumn>>();
     this.tablePrimitiveDimensionsMap = new HashMap<String, List<CarbonDimension>>();
+    this.columnDrift = new ArrayList<CarbonDimension>();
   }
 
   /**
@@ -898,6 +904,12 @@ public class CarbonTable implements Serializable {
     for (CarbonDimension dimension : allDimensions) {
       if (!dimension.isInvisible()) {
         visibleDimensions.add(dimension);
+        Map<String, String> columnProperties = dimension.getColumnProperties();
+        if (columnProperties != null) {
+          if (columnProperties.get(CarbonCommonConstants.COLUMN_DRIFT) != null) {
+            columnDrift.add(dimension);
+          }
+        }
       }
     }
     tableDimensionsMap.put(tableName, visibleDimensions);
@@ -912,6 +924,14 @@ public class CarbonTable implements Serializable {
     return allMeasures;
   }
 
+  public List<CarbonDimension> getColumnDrift() {
+    return columnDrift;
+  }
+
+  public boolean hasColumnDrift() {
+    return tableInfo.hasColumnDrift();
+  }
+
   /**
    * This method will all the visible allMeasures
    *
diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
index daba29b..ec9d311 100644
--- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
+++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/TableInfo.java
@@ -91,6 +91,8 @@ public class TableInfo implements Serializable, Writable {
    */
   private boolean isTransactionalTable = true;
 
+  private boolean hasColumnDrift = false;
+
   // this identifier is a lazy field which will be created when it is used first time
   private AbsoluteTableIdentifier identifier;
 
@@ -122,6 +124,7 @@ public class TableInfo implements Serializable, Writable {
     this.factTable = factTable;
     updateParentRelationIdentifier();
     updateIsSchemaModified();
+    updateHasColumnDrift();
   }
 
   private void updateIsSchemaModified() {
@@ -276,6 +279,7 @@ public class TableInfo implements Serializable, Writable {
     out.writeLong(lastUpdatedTime);
     out.writeUTF(getOrCreateAbsoluteTableIdentifier().getTablePath());
     out.writeBoolean(isTransactionalTable);
+    out.writeBoolean(hasColumnDrift);
     boolean isChildSchemaExists =
         null != dataMapSchemaList && dataMapSchemaList.size() > 0;
     out.writeBoolean(isChildSchemaExists);
@@ -305,6 +309,7 @@ public class TableInfo implements Serializable, Writable {
     this.lastUpdatedTime = in.readLong();
     this.tablePath = in.readUTF();
     this.isTransactionalTable = in.readBoolean();
+    this.hasColumnDrift = in.readBoolean();
     boolean isChildSchemaExists = in.readBoolean();
     this.dataMapSchemaList = new ArrayList<>();
     if (isChildSchemaExists) {
@@ -371,4 +376,22 @@ public class TableInfo implements Serializable, Writable {
     return isSchemaModified;
   }
 
+  private void updateHasColumnDrift() {
+    this.hasColumnDrift = false;
+    for (ColumnSchema columnSchema : factTable.getListOfColumns()) {
+      if (columnSchema.isDimensionColumn() && !columnSchema.isInvisible()) {
+        Map<String, String> columnProperties = columnSchema.getColumnProperties();
+        if (columnProperties != null) {
+          if (columnProperties.get(CarbonCommonConstants.COLUMN_DRIFT) != null) {
+            this.hasColumnDrift = true;
+            break;
+          }
+        }
+      }
+    }
+  }
+
+  public boolean hasColumnDrift() {
+    return hasColumnDrift;
+  }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
index b15bdb5..f06f5c3 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/AbstractQueryExecutor.java
@@ -46,7 +46,6 @@ import org.apache.carbondata.core.keygenerator.KeyGenException;
 import org.apache.carbondata.core.memory.UnsafeMemoryManager;
 import org.apache.carbondata.core.metadata.blocklet.BlockletInfo;
 import org.apache.carbondata.core.metadata.blocklet.DataFileFooter;
-import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.encoder.Encoding;
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
@@ -139,20 +138,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     queryStatistic
         .addStatistics(QueryStatisticsConstants.LOAD_BLOCKS_EXECUTOR, System.currentTimeMillis());
     queryProperties.queryStatisticsRecorder.recordStatistics(queryStatistic);
-    // calculating the total number of aggregated columns
-    int measureCount = queryModel.getProjectionMeasures().size();
-
-    int currentIndex = 0;
-    DataType[] dataTypes = new DataType[measureCount];
-
-    for (ProjectionMeasure carbonMeasure : queryModel.getProjectionMeasures()) {
-      // adding the data type and aggregation type of all the measure this
-      // can be used
-      // to select the aggregator
-      dataTypes[currentIndex] = carbonMeasure.getMeasure().getDataType();
-      currentIndex++;
-    }
-    queryProperties.measureDataTypes = dataTypes;
+
     // as aggregation will be executed in following order
     // 1.aggregate dimension expression
     // 2. expression
@@ -461,14 +447,15 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
       throws QueryExecutionException {
     BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
     SegmentProperties segmentProperties = blockIndex.getSegmentProperties();
-    List<CarbonDimension> tableBlockDimensions = segmentProperties.getDimensions();
-
+    // set actual query dimensions and measures. It may differ in case of restructure scenarios
+    RestructureUtil.actualProjectionOfSegment(blockExecutionInfo, queryModel, segmentProperties);
     // below is to get only those dimension in query which is present in the
     // table block
     List<ProjectionDimension> projectDimensions = RestructureUtil
         .createDimensionInfoAndGetCurrentBlockQueryDimension(blockExecutionInfo,
-            queryModel.getProjectionDimensions(), tableBlockDimensions,
-            segmentProperties.getComplexDimensions(), queryModel.getProjectionMeasures().size(),
+            blockExecutionInfo.getActualQueryDimensions(), segmentProperties.getDimensions(),
+            segmentProperties.getComplexDimensions(),
+            blockExecutionInfo.getActualQueryMeasures().length,
             queryModel.getTable().getTableInfo().isTransactionalTable());
     boolean isStandardTable = CarbonUtil.isStandardCarbonTable(queryModel.getTable());
     String blockId = CarbonUtil
@@ -486,10 +473,12 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     blockExecutionInfo.setProjectionDimensions(projectDimensions
         .toArray(new ProjectionDimension[projectDimensions.size()]));
     // get measures present in the current block
-    List<ProjectionMeasure> currentBlockQueryMeasures =
-        getCurrentBlockQueryMeasures(blockExecutionInfo, queryModel, blockIndex);
+    List<ProjectionMeasure> projectionMeasures = RestructureUtil
+        .createMeasureInfoAndGetCurrentBlockQueryMeasures(blockExecutionInfo,
+            blockExecutionInfo.getActualQueryMeasures(), segmentProperties.getMeasures(),
+            queryModel.getTable().getTableInfo().isTransactionalTable());
     blockExecutionInfo.setProjectionMeasures(
-        currentBlockQueryMeasures.toArray(new ProjectionMeasure[currentBlockQueryMeasures.size()]));
+        projectionMeasures.toArray(new ProjectionMeasure[projectionMeasures.size()]));
     blockExecutionInfo.setDataBlock(blockIndex);
     // setting whether raw record query or not
     blockExecutionInfo.setRawRecordDetailQuery(queryModel.isForcedDetailRawQuery());
@@ -581,7 +570,7 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     // list of measures to be projected
     List<Integer> allProjectionListMeasureIndexes = new ArrayList<>();
     int[] measureChunkIndexes = QueryUtil.getMeasureChunkIndexes(
-        currentBlockQueryMeasures, expressionMeasures,
+        projectionMeasures, expressionMeasures,
         segmentProperties.getMeasuresOrdinalToChunkMapping(), filterMeasures,
         allProjectionListMeasureIndexes);
     reusableBufferSize = Math.max(segmentProperties.getMeasuresOrdinalToChunkMapping().size(),
@@ -637,11 +626,6 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     blockExecutionInfo.setComplexColumnParentBlockIndexes(
         getComplexDimensionParentBlockIndexes(projectDimensions));
     blockExecutionInfo.setVectorBatchCollector(queryModel.isVectorReader());
-    // set actual query dimensions and measures. It may differ in case of restructure scenarios
-    blockExecutionInfo.setActualQueryDimensions(queryModel.getProjectionDimensions()
-        .toArray(new ProjectionDimension[queryModel.getProjectionDimensions().size()]));
-    blockExecutionInfo.setActualQueryMeasures(queryModel.getProjectionMeasures()
-        .toArray(new ProjectionMeasure[queryModel.getProjectionMeasures().size()]));
     DataTypeUtil.setDataTypeConverter(queryModel.getConverter());
     blockExecutionInfo.setRequiredRowId(queryModel.isRequiredRowId());
     return blockExecutionInfo;
@@ -691,28 +675,6 @@ public abstract class AbstractQueryExecutor<E> implements QueryExecutor<E> {
     return 0;
   }
 
-  /**
-   * Below method will be used to get the measures present in the current block
-   *
-   * @param executionInfo
-   * @param queryModel         query model
-   * @param tableBlock         table block
-   * @return
-   */
-  private List<ProjectionMeasure> getCurrentBlockQueryMeasures(BlockExecutionInfo executionInfo,
-      QueryModel queryModel, AbstractIndex tableBlock) throws QueryExecutionException {
-    // getting the measure info which will be used while filling up measure data
-    List<ProjectionMeasure> updatedQueryMeasures = RestructureUtil
-        .createMeasureInfoAndGetCurrentBlockQueryMeasures(executionInfo,
-            queryModel.getProjectionMeasures(),
-            tableBlock.getSegmentProperties().getMeasures(),
-            queryModel.getTable().getTableInfo().isTransactionalTable());
-    // setting the measure aggregator for all aggregation function selected
-    // in query
-    executionInfo.getMeasureInfo().setMeasureDataTypes(queryProperties.measureDataTypes);
-    return updatedQueryMeasures;
-  }
-
   private int[] getComplexDimensionParentBlockIndexes(List<ProjectionDimension> queryDimensions) {
     List<Integer> parentBlockIndexList = new ArrayList<Integer>();
     for (ProjectionDimension queryDimension : queryDimensions) {
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
index 4b59aa7..22939e1 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/impl/QueryExecutorProperties.java
@@ -23,7 +23,6 @@ import java.util.concurrent.ExecutorService;
 
 import org.apache.carbondata.core.cache.dictionary.Dictionary;
 import org.apache.carbondata.core.datastore.block.AbstractIndex;
-import org.apache.carbondata.core.metadata.datatype.DataType;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonDimension;
 import org.apache.carbondata.core.metadata.schema.table.column.CarbonMeasure;
 import org.apache.carbondata.core.stats.QueryStatisticsRecorder;
@@ -40,10 +39,6 @@ public class QueryExecutorProperties {
   public Map<String, Dictionary> columnToDictionaryMapping;
 
   /**
-   * Measure datatypes
-   */
-  public DataType[] measureDataTypes;
-  /**
    * all the complex dimension which is on filter
    */
   public Set<CarbonDimension> complexFilterDimension;
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
index e823eb2..11b7372 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/executor/util/RestructureUtil.java
@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
+import org.apache.carbondata.core.datastore.block.SegmentProperties;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryGenerator;
 import org.apache.carbondata.core.keygenerator.directdictionary.DirectDictionaryKeyGeneratorFactory;
 import org.apache.carbondata.core.metadata.datatype.DataType;
@@ -38,6 +39,7 @@ import org.apache.carbondata.core.scan.executor.infos.DimensionInfo;
 import org.apache.carbondata.core.scan.executor.infos.MeasureInfo;
 import org.apache.carbondata.core.scan.model.ProjectionDimension;
 import org.apache.carbondata.core.scan.model.ProjectionMeasure;
+import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.util.ByteUtil;
 import org.apache.carbondata.core.util.CarbonUtil;
 import org.apache.carbondata.core.util.DataTypeUtil;
@@ -63,16 +65,16 @@ public class RestructureUtil {
    * @return list of query dimension which is present in the table block
    */
   public static List<ProjectionDimension> createDimensionInfoAndGetCurrentBlockQueryDimension(
-      BlockExecutionInfo blockExecutionInfo, List<ProjectionDimension> queryDimensions,
+      BlockExecutionInfo blockExecutionInfo, ProjectionDimension[] queryDimensions,
       List<CarbonDimension> tableBlockDimensions, List<CarbonDimension> tableComplexDimension,
       int measureCount, boolean isTransactionalTable) {
     List<ProjectionDimension> presentDimension =
         new ArrayList<>(CarbonCommonConstants.DEFAULT_COLLECTION_SIZE);
-    boolean[] isDimensionExists = new boolean[queryDimensions.size()];
-    Object[] defaultValues = new Object[queryDimensions.size()];
+    boolean[] isDimensionExists = new boolean[queryDimensions.length];
+    Object[] defaultValues = new Object[queryDimensions.length];
     // create dimension information instance
     DimensionInfo dimensionInfo = new DimensionInfo(isDimensionExists, defaultValues);
-    dimensionInfo.dataType = new DataType[queryDimensions.size() + measureCount];
+    dimensionInfo.dataType = new DataType[queryDimensions.length + measureCount];
     int newDictionaryColumnCount = 0;
     int newNoDictionaryColumnCount = 0;
     // selecting only those dimension which is present in the query
@@ -412,14 +414,15 @@ public class RestructureUtil {
    * @return measures present in the block
    */
   public static List<ProjectionMeasure> createMeasureInfoAndGetCurrentBlockQueryMeasures(
-      BlockExecutionInfo blockExecutionInfo, List<ProjectionMeasure> queryMeasures,
+      BlockExecutionInfo blockExecutionInfo, ProjectionMeasure[] queryMeasures,
       List<CarbonMeasure> currentBlockMeasures, boolean isTransactionalTable) {
     MeasureInfo measureInfo = new MeasureInfo();
-    List<ProjectionMeasure> presentMeasure = new ArrayList<>(queryMeasures.size());
-    int numberOfMeasureInQuery = queryMeasures.size();
+    List<ProjectionMeasure> presentMeasure = new ArrayList<>(queryMeasures.length);
+    int numberOfMeasureInQuery = queryMeasures.length;
     List<Integer> measureOrdinalList = new ArrayList<>(numberOfMeasureInQuery);
     Object[] defaultValues = new Object[numberOfMeasureInQuery];
     boolean[] measureExistsInCurrentBlock = new boolean[numberOfMeasureInQuery];
+    DataType[] measureDataTypes = new DataType[numberOfMeasureInQuery];
     int index = 0;
     for (ProjectionMeasure queryMeasure : queryMeasures) {
       // if query measure exists in current dimension measures
@@ -437,12 +440,14 @@ public class RestructureUtil {
           presentMeasure.add(currentBlockMeasure);
           measureOrdinalList.add(carbonMeasure.getOrdinal());
           measureExistsInCurrentBlock[index] = true;
+          measureDataTypes[index] = carbonMeasure.getDataType();
           break;
         }
       }
       if (!measureExistsInCurrentBlock[index]) {
         defaultValues[index] = getMeasureDefaultValue(queryMeasure.getMeasure().getColumnSchema(),
             queryMeasure.getMeasure().getDefaultValue());
+        measureDataTypes[index] = queryMeasure.getMeasure().getDataType();
         blockExecutionInfo.setRestructuredBlock(true);
       }
       index++;
@@ -452,7 +457,63 @@ public class RestructureUtil {
     measureInfo.setDefaultValues(defaultValues);
     measureInfo.setMeasureOrdinals(measureOrdinals);
     measureInfo.setMeasureExists(measureExistsInCurrentBlock);
+    measureInfo.setMeasureDataTypes(measureDataTypes);
     blockExecutionInfo.setMeasureInfo(measureInfo);
     return presentMeasure;
   }
+
+  /**
+   * set actual projection of blockExecutionInfo
+   */
+  public static void actualProjectionOfSegment(BlockExecutionInfo blockExecutionInfo,
+      QueryModel queryModel, SegmentProperties segmentProperties) {
+    List<ProjectionDimension> projectionDimensions = queryModel.getProjectionDimensions();
+    List<ProjectionMeasure> projectionMeasures = queryModel.getProjectionMeasures();
+    if (queryModel.getTable().hasColumnDrift()) {
+      List<CarbonMeasure> tableBlockMeasures = segmentProperties.getMeasures();
+      List<ProjectionMeasure> updatedProjectionMeasures =
+          new ArrayList<>(projectionMeasures.size() + tableBlockMeasures.size());
+      updatedProjectionMeasures.addAll(projectionMeasures);
+      List<ProjectionDimension> updatedProjectionDimensions =
+          new ArrayList<>(projectionDimensions.size());
+      for (ProjectionDimension projectionDimension : projectionDimensions) {
+        CarbonMeasure carbonMeasure = null;
+        for (CarbonMeasure tableBlockMeasure : tableBlockMeasures) {
+          if (isColumnMatches(queryModel.getTable().isTransactionalTable(),
+              projectionDimension.getDimension(), tableBlockMeasure)) {
+            carbonMeasure = tableBlockMeasure;
+            break;
+          }
+        }
+        if (carbonMeasure != null) {
+          ProjectionMeasure projectionMeasure = new ProjectionMeasure(carbonMeasure);
+          projectionMeasure.setOrdinal(projectionDimension.getOrdinal());
+          updatedProjectionMeasures.add(projectionMeasure);
+        } else {
+          updatedProjectionDimensions.add(projectionDimension);
+        }
+      }
+      blockExecutionInfo.setActualQueryDimensions(updatedProjectionDimensions
+          .toArray(new ProjectionDimension[updatedProjectionDimensions.size()]));
+      blockExecutionInfo.setActualQueryMeasures(updatedProjectionMeasures
+          .toArray(new ProjectionMeasure[updatedProjectionMeasures.size()]));
+    } else {
+      blockExecutionInfo.setActualQueryDimensions(
+          projectionDimensions.toArray(new ProjectionDimension[projectionDimensions.size()]));
+      blockExecutionInfo.setActualQueryMeasures(
+          projectionMeasures.toArray(new ProjectionMeasure[projectionMeasures.size()]));
+    }
+  }
+
+  public static boolean hasColumnDriftOnSegment(CarbonTable table,
+      SegmentProperties segmentProperties) {
+    for (CarbonDimension queryColumn : table.getColumnDrift()) {
+      for (CarbonMeasure tableColumn : segmentProperties.getMeasures()) {
+        if (isColumnMatches(table.isTransactionalTable(), queryColumn, tableColumn)) {
+          return true;
+        }
+      }
+    }
+    return false;
+  }
 }
diff --git a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
index 4f934ce..d736805 100644
--- a/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
+++ b/core/src/main/java/org/apache/carbondata/core/scan/model/QueryModelBuilder.java
@@ -312,7 +312,7 @@ public class QueryModelBuilder {
     queryModel.setReadPageByPage(readPageByPage);
     queryModel.setProjection(projection);
 
-    if (table.isTransactionalTable()) {
+    if (table.isTransactionalTable() && !table.hasColumnDrift()) {
       // set the filter to the query model in order to filter blocklet before scan
       boolean[] isFilterDimensions = new boolean[table.getDimensionOrdinalMax()];
       boolean[] isFilterMeasures = new boolean[table.getAllMeasures().size()];
diff --git a/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java b/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
index 7332614..80ec647 100644
--- a/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
+++ b/core/src/test/java/org/apache/carbondata/core/scan/executor/util/RestructureUtilTest.java
@@ -86,8 +86,8 @@ public class RestructureUtilTest {
     ProjectionMeasure queryMeasure2 = new ProjectionMeasure(new CarbonMeasure(columnSchema4, 4));
     List<ProjectionMeasure> queryMeasures = Arrays.asList(queryMeasure1, queryMeasure2);
 
-    List<ProjectionDimension> queryDimensions =
-        Arrays.asList(queryDimension1, queryDimension2, queryDimension3);
+    ProjectionDimension[] queryDimensions =
+        new ProjectionDimension[] { queryDimension1, queryDimension2, queryDimension3 };
 
     List<ProjectionDimension> result = null;
     result = RestructureUtil
@@ -124,10 +124,11 @@ public class RestructureUtilTest {
     ProjectionMeasure queryMeasure1 = new ProjectionMeasure(carbonMeasure1);
     ProjectionMeasure queryMeasure2 = new ProjectionMeasure(carbonMeasure2);
     ProjectionMeasure queryMeasure3 = new ProjectionMeasure(carbonMeasure3);
-    List<ProjectionMeasure> queryMeasures = Arrays.asList(queryMeasure1, queryMeasure2, queryMeasure3);
+    ProjectionMeasure[] queryMeasures =
+        new ProjectionMeasure[] { queryMeasure1, queryMeasure2, queryMeasure3 };
     BlockExecutionInfo blockExecutionInfo = new BlockExecutionInfo();
-    RestructureUtil.createMeasureInfoAndGetCurrentBlockQueryMeasures(blockExecutionInfo, queryMeasures,
-        currentBlockMeasures, true);
+    RestructureUtil.createMeasureInfoAndGetCurrentBlockQueryMeasures(blockExecutionInfo,
+        queryMeasures, currentBlockMeasures, true);
     MeasureInfo measureInfo = blockExecutionInfo.getMeasureInfo();
     boolean[] measuresExist = { true, true, false };
     assertThat(measureInfo.getMeasureExists(), is(equalTo(measuresExist)));
diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
index aba0ab7..90532fb 100644
--- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
+++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonInputFormat.java
@@ -34,6 +34,7 @@ import org.apache.carbondata.common.logging.LogServiceFactory;
 import org.apache.carbondata.core.constants.CarbonCommonConstants;
 import org.apache.carbondata.core.constants.CarbonCommonConstantsInternal;
 import org.apache.carbondata.core.datamap.DataMapChooser;
+import org.apache.carbondata.core.datamap.DataMapFilter;
 import org.apache.carbondata.core.datamap.DataMapJob;
 import org.apache.carbondata.core.datamap.DataMapStoreManager;
 import org.apache.carbondata.core.datamap.DataMapUtil;
@@ -54,7 +55,6 @@ import org.apache.carbondata.core.profiler.ExplainCollector;
 import org.apache.carbondata.core.readcommitter.ReadCommittedScope;
 import org.apache.carbondata.core.scan.expression.Expression;
 import org.apache.carbondata.core.scan.filter.FilterUtil;
-import org.apache.carbondata.core.scan.filter.resolver.FilterResolverIntf;
 import org.apache.carbondata.core.scan.model.QueryModel;
 import org.apache.carbondata.core.scan.model.QueryModelBuilder;
 import org.apache.carbondata.core.stats.QueryStatistic;
@@ -468,15 +468,8 @@ m filterExpression
   private List<ExtendedBlocklet> getPrunedBlocklets(JobContext job, CarbonTable carbonTable,
       Expression expression, List<Segment> segmentIds) throws IOException {
     ExplainCollector.addPruningInfo(carbonTable.getTableName());
-    FilterResolverIntf resolver = null;
-    if (expression != null) {
-      carbonTable.processFilterExpression(expression, null, null);
-      resolver = CarbonTable.resolveFilter(expression, carbonTable.getAbsoluteTableIdentifier());
-      ExplainCollector.setFilterStatement(expression.getStatement());
-    } else {
-      ExplainCollector.setFilterStatement("none");
-    }
-
+    final DataMapFilter filter = new DataMapFilter(carbonTable, expression);
+    ExplainCollector.setFilterStatement(expression == null ? "none" : expression.getStatement());
     boolean distributedCG = Boolean.parseBoolean(CarbonProperties.getInstance()
         .getProperty(CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP,
             CarbonCommonConstants.USE_DISTRIBUTED_DATAMAP_DEFAULT));
@@ -487,11 +480,7 @@ m filterExpression
     List<ExtendedBlocklet> prunedBlocklets = null;
     // This is to log the event, so user will know what is happening by seeing logs.
     LOG.info("Started block pruning ...");
-    if (carbonTable.isTransactionalTable()) {
-      prunedBlocklets = defaultDataMap.prune(segmentIds, resolver, partitionsToPrune);
-    } else {
-      prunedBlocklets = defaultDataMap.prune(segmentIds, expression, partitionsToPrune);
-    }
+    prunedBlocklets = defaultDataMap.prune(segmentIds, filter, partitionsToPrune);
 
     if (ExplainCollector.enabled()) {
       ExplainCollector.setDefaultDataMapPruningBlockHit(getBlockCount(prunedBlocklets));
@@ -504,15 +493,15 @@ m filterExpression
     DataMapChooser chooser = new DataMapChooser(getOrCreateCarbonTable(job.getConfiguration()));
 
     // Get the available CG datamaps and prune further.
-    DataMapExprWrapper cgDataMapExprWrapper = chooser.chooseCGDataMap(resolver);
+    DataMapExprWrapper cgDataMapExprWrapper = chooser.chooseCGDataMap(filter.getResolver());
     if (cgDataMapExprWrapper != null) {
       // Prune segments from already pruned blocklets
       pruneSegments(segmentIds, prunedBlocklets);
       List<ExtendedBlocklet> cgPrunedBlocklets;
       // Again prune with CG datamap.
       if (distributedCG && dataMapJob != null) {
-        cgPrunedBlocklets = DataMapUtil.executeDataMapJob(carbonTable,
-            resolver, segmentIds, cgDataMapExprWrapper, dataMapJob, partitionsToPrune);
+        cgPrunedBlocklets = DataMapUtil.executeDataMapJob(carbonTable, filter.getResolver(),
+            segmentIds, cgDataMapExprWrapper, dataMapJob, partitionsToPrune);
       } else {
         cgPrunedBlocklets = cgDataMapExprWrapper.prune(segmentIds, partitionsToPrune);
       }
@@ -529,12 +518,12 @@ m filterExpression
     }
     // Now try to prune with FG DataMap.
     if (isFgDataMapPruningEnable(job.getConfiguration()) && dataMapJob != null) {
-      DataMapExprWrapper fgDataMapExprWrapper = chooser.chooseFGDataMap(resolver);
+      DataMapExprWrapper fgDataMapExprWrapper = chooser.chooseFGDataMap(filter.getResolver());
       if (fgDataMapExprWrapper != null) {
         // Prune segments from already pruned blocklets
         pruneSegments(segmentIds, prunedBlocklets);
         List<ExtendedBlocklet> fgPrunedBlocklets = DataMapUtil.executeDataMapJob(carbonTable,
-            resolver, segmentIds, fgDataMapExprWrapper, dataMapJob, partitionsToPrune);
+            filter.getResolver(), segmentIds, fgDataMapExprWrapper, dataMapJob, partitionsToPrune);
         // note that the 'fgPrunedBlocklets' has extra datamap related info compared with
         // 'prunedBlocklets', so the intersection should keep the elements in 'fgPrunedBlocklets'
         prunedBlocklets = intersectFilteredBlocklets(carbonTable, prunedBlocklets,
diff --git a/integration/spark-common-test/src/test/resources/sort_columns/alldatatype1.csv b/integration/spark-common-test/src/test/resources/sort_columns/alldatatype1.csv
new file mode 100644
index 0000000..1176363
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/sort_columns/alldatatype1.csv
@@ -0,0 +1,13 @@
+smallIntField,intField,bigIntField,floatField,doubleField,decimalField,timestampField,dateField,stringField,varcharField,charField,arrayField,structField
+1,1,2,1.1,12.12,1.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde1,a$b$c$1,a$b$1
+1,1,2,2.1,11.12,2.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde1,a$b$c$1,a$b$1
+2,1,2,3.1,10.12,3.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde2,a$b$c$2,a$b$2
+2,1,2,4.1,9.12,4.123,2017-01-11 00:00:01,2017-01-11,abc2,abcd2,abcde2,a$b$c$2,a$b$2
+2,2,3,5.2,8.12,5.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde2,a$b$c$2,a$b$3
+2,2,3,6.2,7.12,6.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde2,a$b$c$2,a$b$3
+4,2,3,7.2,6.12,7.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde1,a$b$c$4,a$b$1
+4,2,3,8.1,5.12,8.123,2017-02-12 00:00:02,2017-02-12,abc4,abcd1,abcde1,a$b$c$4,a$b$1
+4,4,1,9.1,4.12,9.123,2017-03-13 00:00:04,2017-03-14,abc4,abcd4,abcde4,a$b$c$4,a$b$2
+4,4,1,10.1,3.12,10.123,2017-03-13 00:00:04,2017-03-14,abc4,abcd4,abcde4,a$b$c$4,a$b$2
+1,4,1,11.1,2.12,11.123,2017-03-13 00:00:04,2017-03-14,abc4,abcd4,abcde4,a$b$c$1,a$b$3
+1,4,1,12.1,1.12,12.123,2017-03-13 00:00:04,2017-03-14,abc1,abcd4,abcde4,a$b$c$1,a$b$3
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/resources/sort_columns/alldatatype2.csv b/integration/spark-common-test/src/test/resources/sort_columns/alldatatype2.csv
new file mode 100644
index 0000000..649bbdc
--- /dev/null
+++ b/integration/spark-common-test/src/test/resources/sort_columns/alldatatype2.csv
@@ -0,0 +1,13 @@
+smallIntField,intField,bigIntField,floatField,doubleField,decimalField,timestampField,dateField,stringField,varcharField,charField,arrayField,structField
+1,1,1,13.2,6.12,7.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde1,a$b$c$1,a$b$1
+1,1,1,14.1,5.12,8.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde1,a$b$c$1,a$b$1
+1,1,2,15.1,4.12,9.123,2017-03-11 00:00:03,2017-03-11,abc2,abcd1,abcde1,a$b$c$2,a$b$2
+1,2,2,16.1,3.12,10.123,2017-03-11 00:00:03,2017-03-11,abc1,abcd1,abcde2,a$b$c$2,a$b$2
+1,2,2,17.1,2.12,11.123,2017-03-12 00:00:03,2017-03-12,abc1,abcd2,abcde2,a$b$c$1,a$b$1
+1,2,1,18.1,1.12,12.123,2017-03-12 00:00:03,2017-03-12,abc1,abcd2,abcde1,a$b$c$1,a$b$1
+2,2,1,19.1,12.12,1.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde1,a$b$c$1,a$b$1
+2,2,1,20.1,11.12,2.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde1,a$b$c$1,a$b$1
+2,2,2,21.1,10.12,3.123,2017-01-11 00:00:01,2017-01-11,abc1,abcd2,abcde2,a$b$c$2,a$b$2
+2,1,2,22.1,9.12,4.123,2017-01-11 00:00:01,2017-01-11,abc2,abcd2,abcde2,a$b$c$2,a$b$2
+2,1,2,23.2,8.12,5.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde2,a$b$c$2,a$b$2
+2,1,1,24.2,7.12,6.123,2017-02-12 00:00:02,2017-02-12,abc2,abcd1,abcde2,a$b$c$2,a$b$2
\ No newline at end of file
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
new file mode 100644
index 0000000..bf4bae6
--- /dev/null
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/alterTable/TestAlterTableSortColumnsProperty.scala
@@ -0,0 +1,541 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.carbondata.spark.testsuite.alterTable
+
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.test.util.QueryTest
+import org.scalatest.BeforeAndAfterAll
+
+import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.util.CarbonProperties
+
+class TestAlterTableSortColumnsProperty extends QueryTest with BeforeAndAfterAll {
+
+  override def beforeAll(): Unit = {
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+      "yyyy-MM-dd")
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+      "yyyy-MM-dd HH:mm:ss")
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS, "true")
+    dropTable()
+    prepareTable()
+  }
+
+  override def afterAll(): Unit = {
+    dropTable()
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
+      CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
+    CarbonProperties.getInstance().addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
+      CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
+    CarbonProperties.getInstance()
+      .addProperty(CarbonCommonConstants.ENABLE_QUERY_STATISTICS,
+        CarbonCommonConstants.ENABLE_QUERY_STATISTICS_DEFAULT)
+  }
+
+  private def prepareTable(): Unit = {
+    createTable(
+      "alter_sc_base",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField")
+    )
+    createTable(
+      "alter_sc_base_complex",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField"),
+      true
+    )
+    createTable(
+      "alter_sc_validate",
+      Map("dictionary_include"->"charField"),
+      true
+    )
+    createTable(
+      "alter_sc_iud",
+      Map("dictionary_include"->"charField")
+    )
+    createTable(
+      "alter_sc_iud_complex",
+      Map("dictionary_include"->"charField"),
+      true
+    )
+    createTable(
+      "alter_sc_long_string",
+      Map("LONG_STRING_COLUMNS"->"stringField"),
+      true
+    )
+    createTable(
+      "alter_sc_insert",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField")
+    )
+    loadData("alter_sc_insert")
+    createTable(
+      "alter_sc_insert_complex",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField"),
+      true
+    )
+    loadData("alter_sc_insert_complex")
+    createTable(
+      "alter_sc_range_column",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField", "range_column"->"smallIntField")
+    )
+    createTable(
+      "alter_sc_range_column_base",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField")
+    )
+
+    Array("alter_sc_add_column", "alter_sc_add_column_base").foreach { tableName =>
+      sql(
+        s"""create table $tableName(
+           | smallIntField smallInt,
+           | intField int,
+           | bigIntField bigint,
+           | floatField float,
+           | doubleField double,
+           | timestampField timestamp,
+           | dateField date,
+           | stringField string
+           | )
+           | stored as carbondata
+      """.stripMargin)
+    }
+    // decimalField decimal(25, 4),
+
+    createTable(
+      "alter_sc_bloom",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField")
+    )
+    createBloomDataMap("alter_sc_bloom", "alter_sc_bloom_dm1")
+    createTable(
+      "alter_sc_bloom_base",
+      Map("sort_scope"->"local_sort", "sort_columns"->"stringField")
+    )
+    createBloomDataMap("alter_sc_bloom_base", "alter_sc_bloom_base_dm1")
+    createTable(
+      "alter_sc_agg",
+      Map("sort_scope"->"local_sort", "sort_columns"->"intField")
+    )
+    createAggDataMap("alter_sc_agg", "alter_sc_agg_dm1")
+    createTable(
+      "alter_sc_agg_base",
+      Map("sort_scope"->"local_sort", "sort_columns"->"intField")
+    )
+    createAggDataMap("alter_sc_agg_base", "alter_sc_agg_base_dm1")
+  }
+
+  private def dropTable(): Unit = {
+    sql(s"drop table if exists alter_sc_base")
+    sql(s"drop table if exists alter_sc_base_complex")
+    sql(s"drop table if exists alter_sc_validate")
+    sql(s"drop table if exists alter_sc_iud")
+    sql(s"drop table if exists alter_sc_iud_complex")
+    sql(s"drop table if exists alter_sc_long_string")
+    sql(s"drop table if exists alter_sc_insert")
+    sql(s"drop table if exists alter_sc_insert_complex")
+    sql(s"drop table if exists alter_sc_range_column")
+    sql(s"drop table if exists alter_sc_range_column_base")
+    sql(s"drop table if exists alter_sc_add_column")
+    sql(s"drop table if exists alter_sc_add_column_base")
+    sql(s"drop table if exists alter_sc_bloom")
+    sql(s"drop table if exists alter_sc_bloom_base")
+    sql(s"drop table if exists alter_sc_agg")
+    sql(s"drop table if exists alter_sc_agg_base")
+  }
+
+  private def createTable(
+      tableName: String,
+      tblProperties: Map[String, String] = Map.empty,
+      withComplex: Boolean = false
+  ): Unit = {
+    val complexSql =
+      if (withComplex) {
+        ", arrayField array<string>, structField struct<col1:string, col2:string, col3:string>"
+      } else {
+        ""
+      }
+    val tblPropertiesSql =
+      if (tblProperties.isEmpty) {
+        ""
+      } else {
+        val propertiesString =
+          tblProperties
+            .map { entry =>
+              s"'${ entry._1 }'='${ entry._2 }'"
+            }
+            .mkString(",")
+        s"tblproperties($propertiesString)"
+      }
+
+    sql(
+      s"""create table $tableName(
+         | smallIntField smallInt,
+         | intField int,
+         | bigIntField bigint,
+         | floatField float,
+         | doubleField double,
+         | timestampField timestamp,
+         | dateField date,
+         | stringField string,
+         | varcharField varchar(10),
+         | charField char(10)
+         | $complexSql
+         | )
+         | stored as carbondata
+         | $tblPropertiesSql
+      """.stripMargin)
+    // decimalField decimal(25, 4),
+  }
+
+  private def createBloomDataMap(tableName: String, dataMapName: String): Unit = {
+    sql(
+      s"""
+         | CREATE DATAMAP $dataMapName ON TABLE $tableName
+         | USING 'bloomfilter'
+         | DMPROPERTIES(
+         | 'INDEX_COLUMNS'='smallIntField,floatField,timestampField,dateField,stringField',
+         | 'BLOOM_SIZE'='6400',
+         | 'BLOOM_FPP'='0.001',
+         | 'BLOOM_COMPRESS'='TRUE')
+       """.stripMargin)
+  }
+
+  private def createAggDataMap(tableName: String, dataMapName: String): Unit = {
+    sql(s"create datamap PreAggSum$dataMapName on table $tableName using 'preaggregate' as " +
+        s"select stringField,sum(intField) as sum from $tableName group by stringField")
+    sql(s"create datamap PreAggAvg$dataMapName on table $tableName using 'preaggregate' as " +
+        s"select stringField,avg(intField) as avg from $tableName group by stringField")
+    sql(s"create datamap PreAggCount$dataMapName on table $tableName using 'preaggregate' as " +
+        s"select stringField,count(intField) as count from $tableName group by stringField")
+    sql(s"create datamap PreAggMin$dataMapName on table $tableName using 'preaggregate' as " +
+        s"select stringField,min(intField) as min from $tableName group by stringField")
+    sql(s"create datamap PreAggMax$dataMapName on table $tableName using 'preaggregate' as " +
+        s"select stringField,max(intField) as max from $tableName group by stringField")
+  }
+
+  private def loadData(tableNames: String*): Unit = {
+    tableNames.foreach { tableName =>
+      sql(
+        s"""load data local inpath '$resourcesPath/sort_columns'
+           | into table $tableName
+           | options ('global_sort_partitions'='2', 'COMPLEX_DELIMITER_LEVEL_1'='$$', 'COMPLEX_DELIMITER_LEVEL_2'=':')
+      """.stripMargin)
+    }
+  }
+
+  private def insertData(insertTable: String, tableNames: String*): Unit = {
+    tableNames.foreach { tableName =>
+      sql(
+        s"""insert into table $tableName select * from $insertTable
+      """.stripMargin)
+    }
+  }
+
+  test("validate sort_scope and sort_columns") {
+    // invalid combination
+    var ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_scope'='local_sort')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_SCOPE as local_sort when table has no SORT_COLUMNS"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_scope'='global_sort')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_SCOPE as global_sort when table has no SORT_COLUMNS"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_scope'='local_sort', 'sort_columns'='')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_COLUMNS as empty when setting SORT_SCOPE as local_sort"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_scope'='global_sort', 'sort_columns'=' ')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_COLUMNS as empty when setting SORT_SCOPE as global_sort"))
+
+    sql("alter table alter_sc_validate set tblproperties('sort_columns'='stringField', 'sort_scope'='local_sort')")
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'=' ')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_COLUMNS as empty when SORT_SCOPE is LOCAL_SORT"))
+
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='global_sort')")
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'='')")
+    }
+    assert(ex.getMessage.contains("Cannot set SORT_COLUMNS as empty when SORT_SCOPE is GLOBAL_SORT"))
+
+    // wrong/duplicate sort_columns
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'=' stringField1 , intField')")
+    }
+    assert(ex.getMessage.contains("stringField1 does not exist in table"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'=' stringField1 , intField, stringField1')")
+    }
+    assert(ex.getMessage.contains("SORT_COLUMNS Either having duplicate columns : stringField1 or it contains illegal argumnet"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'=' stringField , intField, stringField')")
+    }
+    assert(ex.getMessage.contains("SORT_COLUMNS Either having duplicate columns : stringField or it contains illegal argumnet"))
+
+    // not supported data type
+//    ex = intercept[RuntimeException] {
+//      sql("alter table alter_sc_validate set tblproperties('sort_columns'='decimalField')")
+//    }
+//    assert(ex.getMessage.contains("sort_columns is unsupported for DECIMAL data type column: decimalField"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'='doubleField')")
+    }
+    assert(ex.getMessage.contains("sort_columns is unsupported for DOUBLE datatype column: doubleField"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'='arrayField')")
+    }
+    assert(ex.getMessage.contains("sort_columns is unsupported for ARRAY datatype column: arrayField"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'='structField')")
+    }
+    assert(ex.getMessage.contains("sort_columns is unsupported for STRUCT datatype column: structField"))
+
+    ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_validate set tblproperties('sort_columns'='structField.col1')")
+    }
+    assert(ex.getMessage.contains("sort_columns: structField.col1 does not exist in table"))
+  }
+
+  test("long string column") {
+    val ex = intercept[RuntimeException] {
+      sql("alter table alter_sc_long_string set tblproperties('sort_columns'='intField, stringField')")
+    }
+    assert(ex.getMessage.contains("sort_columns is unsupported for long string datatype column: stringField"))
+  }
+
+  test("describe formatted") {
+    // valid combination
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='no_sort', 'sort_columns'='')")
+    checkExistence(sql("describe formatted alter_sc_validate"), true, "NO_SORT")
+
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='no_sort', 'sort_columns'='bigIntField,stringField')")
+    checkExistence(sql("describe formatted alter_sc_validate"), true, "no_sort", "bigIntField, stringField".toLowerCase())
+
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='local_sort', 'sort_columns'='stringField,bigIntField')")
+    checkExistence(sql("describe formatted alter_sc_validate"), true, "local_sort", "stringField, bigIntField".toLowerCase())
+
+    // global dictionary or direct dictionary
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='global_sort', 'sort_columns'=' charField , bigIntField , timestampField ')")
+    checkExistence(sql("describe formatted alter_sc_validate"), true, "global_sort", "charField, bigIntField, timestampField".toLowerCase())
+
+    // supported data type
+    sql("alter table alter_sc_validate set tblproperties('sort_scope'='local_sort', 'sort_columns'='smallIntField, intField, bigIntField, timestampField, dateField, stringField, varcharField, charField')")
+    checkExistence(sql("describe formatted alter_sc_validate"), true, "local_sort", "smallIntField, intField, bigIntField, timestampField, dateField, stringField, varcharField, charField".toLowerCase())
+  }
+
+  test("IUD and Query") {
+    testIUDAndQuery("alter_sc_iud", "alter_sc_base", "alter_sc_insert")
+  }
+
+  test("IUD and Query with complex data type") {
+    testIUDAndQuery("alter_sc_iud_complex", "alter_sc_base_complex", "alter_sc_insert_complex")
+  }
+
+  private def testIUDAndQuery(tableName: String, baseTableName: String, insertTableName: String): Unit = {
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // alter table to local_sort with new SORT_COLUMNS
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='timestampField, intField, stringField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // alter table to revert SORT_COLUMNS
+    sql(s"alter table $tableName set tblproperties('sort_columns'='stringField, intField, timestampField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // alter table to change SORT_COLUMNS
+    sql(s"alter table $tableName set tblproperties('sort_columns'='smallIntField, stringField, intField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // alter table to change SORT_SCOPE and SORT_COLUMNS
+    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='charField, bigIntField, smallIntField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // alter table to change SORT_SCOPE
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='charField, bigIntField, smallIntField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // query
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
+
+    // set input segments
+    (0 to 5).foreach { segment =>
+      sql(s"set carbon.input.segments.default.$tableName=$segment").show(100, false)
+      sql(s"set carbon.input.segments.default.$baseTableName=$segment").show(100, false)
+      checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    }
+    sql(s"set carbon.input.segments.default.$tableName=*").show(100, false)
+    sql(s"set carbon.input.segments.default.$baseTableName=*").show(100, false)
+
+    // delete
+    sql(s"delete from $tableName where smallIntField = 2")
+    sql(s"delete from $baseTableName where smallIntField = 2")
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    sql(s"delete from $tableName")
+    checkAnswer(sql(s"select count(*) from $tableName"), Seq(Row(0)))
+    sql(s"delete from $baseTableName")
+    checkAnswer(sql(s"select count(*) from $baseTableName"), Seq(Row(0)))
+
+    // insert & load data
+    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='timestampField')")
+    insertData(insertTableName, tableName, baseTableName)
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    sql(s"alter table $tableName set tblproperties('sort_scope'='no_sort', 'sort_columns'='')")
+    insertData(insertTableName, tableName, baseTableName)
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='charField, bigIntField, smallIntField')")
+    insertData(insertTableName, tableName, baseTableName)
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // update
+    sql(s"update $tableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").show()
+    sql(s"update $baseTableName set (smallIntField, intField, bigIntField, floatField, doubleField) = (smallIntField + 3, intField + 3, bigIntField + 3, floatField + 3, doubleField + 3) where smallIntField = 2").show()
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+
+    // query
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
+
+    // set input segments
+    (6 to 11).foreach { segment =>
+      sql(s"set carbon.input.segments.default.$tableName=$segment").show(100, false)
+      sql(s"set carbon.input.segments.default.$baseTableName=$segment").show(100, false)
+      checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    }
+    sql(s"set carbon.input.segments.default.$tableName=*").show(100, false)
+    sql(s"set carbon.input.segments.default.$baseTableName=*").show(100, false)
+
+    // compaction
+    sql(s"show segments for table $tableName").show(100, false)
+    sql(s"show segments for table $baseTableName").show(100, false)
+    sql(s"alter table $tableName compact 'minor'")
+    sql(s"alter table $baseTableName compact 'minor'")
+    sql(s"show segments for table $tableName").show(100, false)
+    sql(s"show segments for table $baseTableName").show(100, false)
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where intField >= 2 order by floatField"), sql(s"select * from $baseTableName where intField >= 2 order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 or intField >= 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 or intField >= 2 order by floatField"))
+  }
+
+  test("range column") {
+    val tableName = "alter_sc_range_column"
+    val baseTableName = "alter_sc_range_column_base"
+    loadData(tableName, baseTableName)
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='smallIntField, charField')")
+    loadData(tableName, baseTableName)
+
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField"), sql(s"select * from $baseTableName order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField"))
+  }
+
+  test("add/drop column for sort_columns") {
+    val tableName = "alter_sc_add_column"
+    val baseTableName = "alter_sc_add_column_base"
+    loadData(tableName, baseTableName)
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='smallIntField, stringField')")
+    loadData(tableName, baseTableName)
+    // add column
+    sql(s"alter table $tableName add columns( varcharField varchar(10), charField char(10))")
+    sql(s"alter table $baseTableName add columns( varcharField varchar(10), charField char(10))")
+    loadData(tableName, baseTableName)
+
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField, charField"), sql(s"select * from $baseTableName order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField, charField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is null order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is not null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is not null order by floatField"))
+
+    // add new column to sort_columns
+    sql(s"alter table $tableName set tblproperties('sort_scope'='local_sort', 'sort_columns'='smallIntField, charField')")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField, charField"), sql(s"select * from $baseTableName order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField, charField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is null order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is not null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is not null order by floatField"))
+
+    // drop column of old sort_columns
+    sql(s"alter table $tableName drop columns(stringField)")
+    sql(s"alter table $baseTableName drop columns(stringField)")
+    loadData(tableName, baseTableName)
+    checkAnswer(sql(s"select count(*) from $tableName"), sql(s"select count(*) from $baseTableName"))
+    checkAnswer(sql(s"select * from $tableName order by floatField, charField"), sql(s"select * from $baseTableName order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 order by floatField, charField"), sql(s"select * from $baseTableName where smallIntField = 2 order by floatField, charField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is null order by floatField"))
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 2 and charField is not null order by floatField"), sql(s"select * from $baseTableName where smallIntField = 2 and charField is not null order by floatField"))
+  }
+
+  test("bloom filter") {
+    val tableName = "alter_sc_bloom"
+    val dataMapName = "alter_sc_bloom_dm1"
+    val baseTableName = "alter_sc_bloom_base"
+    loadData(tableName, baseTableName)
+    checkExistence(sql(s"SHOW DATAMAP ON TABLE $tableName"), true, "bloomfilter", dataMapName)
+    checkExistence(sql(s"EXPLAIN SELECT * FROM $tableName WHERE smallIntField = 3"), true, "bloomfilter", dataMapName)
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 3 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 3 order by floatField"))
+
+    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='smallIntField, charField')")
+    loadData(tableName, baseTableName)
+    checkExistence(sql(s"EXPLAIN SELECT * FROM $tableName WHERE smallIntField = 3"), true, "bloomfilter", dataMapName)
+    checkAnswer(sql(s"select * from $tableName where smallIntField = 3 order by floatField"), sql(s"select * from $baseTableName where smallIntField = 3 order by floatField"))
+  }
+
+  test("pre-aggregate") {
+    val tableName = "alter_sc_agg"
+    val dataMapName = "alter_sc_agg_dm1"
+    val baseTableName = "alter_sc_agg_base"
+    loadData(tableName, baseTableName)
+    sql(s"SHOW DATAMAP ON TABLE $tableName").show(100, false)
+    checkExistence(sql(s"SHOW DATAMAP ON TABLE $tableName"), true, "preaggregate", dataMapName)
+    checkExistence(sql(s"EXPLAIN select stringField,sum(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), true, "preaggregate", dataMapName)
+    checkAnswer(sql(s"select stringField,sum(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), sql(s"select stringField,sum(intField) as sum from $baseTableName where stringField = 'abc2' group by stringField"))
+
+    sql(s"alter table $tableName set tblproperties('sort_scope'='global_sort', 'sort_columns'='smallIntField, charField')")
+    loadData(tableName, baseTableName)
+    sql(s"EXPLAIN select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField").show(100, false)
+    checkExistence(sql(s"EXPLAIN select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), true, "preaggregate", dataMapName)
+    checkAnswer(sql(s"select stringField,max(intField) as sum from $tableName where stringField = 'abc2' group by stringField"), sql(s"select stringField,max(intField) as sum from $baseTableName where stringField = 'abc2' group by stringField"))
+  }
+}
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
index 6cee8dc..d0ed815 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/CarbonScanRDD.scala
@@ -232,7 +232,11 @@ class CarbonScanRDD[T: ClassTag](
       statistic.addStatistics(QueryStatisticsConstants.BLOCK_ALLOCATION, System.currentTimeMillis)
       statisticRecorder.recordStatisticsForDriver(statistic, queryId)
       statistic = new QueryStatistic()
-      val carbonDistribution = if (directFill) {
+      // When the table has column drift, it means different blocks maybe have different schemas.
+      // the query doesn't support to scan the blocks with different schemas in a task.
+      // So if the table has the column drift, CARBON_TASK_DISTRIBUTION_MERGE_FILES and
+      // CARBON_TASK_DISTRIBUTION_CUSTOM can't work.
+      val carbonDistribution = if (directFill && !tableInfo.hasColumnDrift) {
         CarbonCommonConstants.CARBON_TASK_DISTRIBUTION_MERGE_FILES
       } else {
         CarbonProperties.getInstance().getProperty(
@@ -260,7 +264,7 @@ class CarbonScanRDD[T: ClassTag](
             CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION,
             "false").toBoolean ||
           carbonDistribution.equalsIgnoreCase(CarbonCommonConstants.CARBON_TASK_DISTRIBUTION_CUSTOM)
-        if (useCustomDistribution) {
+        if (useCustomDistribution && !tableInfo.hasColumnDrift) {
           // create a list of block based on split
           val blockList = splits.asScala.map(_.asInstanceOf[Distributable])
 
@@ -297,7 +301,7 @@ class CarbonScanRDD[T: ClassTag](
             val partition = new CarbonSparkPartition(id, splitWithIndex._2, multiBlockSplit)
             result.add(partition)
           }
-        } else if (carbonDistribution.equalsIgnoreCase(
+        } else if (!tableInfo.hasColumnDrift && carbonDistribution.equalsIgnoreCase(
             CarbonCommonConstants.CARBON_TASK_DISTRIBUTION_MERGE_FILES)) {
 
           // sort blocks in reverse order of length
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
index d90c6b2..da42363 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/CommonUtil.scala
@@ -17,7 +17,6 @@
 
 package org.apache.carbondata.spark.util
 
-
 import java.io.File
 import java.math.BigDecimal
 import java.text.SimpleDateFormat
@@ -47,16 +46,16 @@ import org.apache.carbondata.core.metadata.CarbonMetadata
 import org.apache.carbondata.core.metadata.datatype.{DataType, DataTypes}
 import org.apache.carbondata.core.metadata.schema.PartitionInfo
 import org.apache.carbondata.core.metadata.schema.partition.PartitionType
+import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.scan.partition.PartitionUtil
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager
-import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties, ThreadLocalTaskInfo}
+import org.apache.carbondata.core.util.{ByteUtil, CarbonProperties, CarbonUtil, ThreadLocalTaskInfo}
 import org.apache.carbondata.core.util.comparator.Comparator
 import org.apache.carbondata.core.util.path.CarbonTablePath
 import org.apache.carbondata.processing.loading.csvinput.CSVInputFormat
 import org.apache.carbondata.processing.loading.model.CarbonLoadModel
 import org.apache.carbondata.processing.util.CarbonDataProcessorUtil
 
-
 object CommonUtil {
   private val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
 
@@ -794,6 +793,7 @@ object CommonUtil {
     }
     storeLocation
   }
+
   /**
    * This method will validate the cache level
    *
@@ -909,6 +909,80 @@ object CommonUtil {
     }
   }
 
+  def isDataTypeSupportedForSortColumn(columnDataType: String): Boolean = {
+    val dataTypes = Array("array", "struct", "map", "double", "float", "decimal", "binary")
+    dataTypes.exists(x => x.equalsIgnoreCase(columnDataType))
+  }
+
+  def validateSortScope(newProperties: Map[String, String]): Unit = {
+    val sortScopeOption = newProperties.get(CarbonCommonConstants.SORT_SCOPE)
+    if (sortScopeOption.isDefined) {
+      if (!CarbonUtil.isValidSortOption(sortScopeOption.get)) {
+        throw new MalformedCarbonCommandException(
+          s"Invalid SORT_SCOPE ${ sortScopeOption.get }, " +
+          s"valid SORT_SCOPE are 'NO_SORT', 'BATCH_SORT', 'LOCAL_SORT' and 'GLOBAL_SORT'")
+      }
+    }
+  }
+
+  def validateSortColumns(
+      sortKey: Array[String],
+      fields: Seq[(String, String)],
+      varcharCols: Seq[String]
+  ): Unit = {
+    if (sortKey.diff(sortKey.distinct).length > 0 ||
+        (sortKey.length > 1 && sortKey.contains(""))) {
+      throw new MalformedCarbonCommandException(
+        "SORT_COLUMNS Either having duplicate columns : " +
+        sortKey.diff(sortKey.distinct).mkString(",") + " or it contains illegal argumnet.")
+    }
+
+    sortKey.foreach { column =>
+      if (!fields.exists(x => x._1.equalsIgnoreCase(column))) {
+        val errorMsg = "sort_columns: " + column +
+                       " does not exist in table. Please check the create table statement."
+        throw new MalformedCarbonCommandException(errorMsg)
+      } else {
+        val dataType = fields.find(x =>
+          x._1.equalsIgnoreCase(column)).get._2
+        if (isDataTypeSupportedForSortColumn(dataType)) {
+          val errorMsg = s"sort_columns is unsupported for $dataType datatype column: " + column
+          throw new MalformedCarbonCommandException(errorMsg)
+        }
+        if (varcharCols.exists(x => x.equalsIgnoreCase(column))) {
+          throw new MalformedCarbonCommandException(
+            s"sort_columns is unsupported for long string datatype column: $column")
+        }
+      }
+    }
+  }
+
+  def validateSortColumns(carbonTable: CarbonTable, newProperties: Map[String, String]): Unit = {
+    val fields = carbonTable.getCreateOrderColumn(carbonTable.getTableName).asScala
+    val tableProperties = carbonTable.getTableInfo.getFactTable.getTableProperties
+    var sortKeyOption = newProperties.get(CarbonCommonConstants.SORT_COLUMNS)
+    val varcharColsString = tableProperties.get(CarbonCommonConstants.LONG_STRING_COLUMNS)
+    val varcharCols: Seq[String] = if (varcharColsString == null) {
+      Seq.empty[String]
+    } else {
+      varcharColsString.split(",").map(_.trim)
+    }
+
+    if (!sortKeyOption.isDefined) {
+      // default no columns are selected for sorting in no_sort scope
+      sortKeyOption = Some("")
+    }
+    val sortKeyString = CarbonUtil.unquoteChar(sortKeyOption.get).trim
+    if (!sortKeyString.isEmpty) {
+      val sortKey = sortKeyString.split(',').map(_.trim)
+      validateSortColumns(
+        sortKey,
+        fields.map { field => (field.getColName, field.getDataType.getName) },
+        varcharCols
+      )
+    }
+  }
+
   def bytesToDisplaySize(size: Long): String = bytesToDisplaySize(BigDecimal.valueOf(size))
 
   // This method converts the bytes count to display size upto 2 decimal places
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index 3e80ea6..d978128 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -760,32 +760,11 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
     var sortKeyDimsTmp: Seq[String] = Seq[String]()
     if (!sortKeyString.isEmpty) {
       val sortKey = sortKeyString.split(',').map(_.trim)
-      if (sortKey.diff(sortKey.distinct).length > 0 ||
-          (sortKey.length > 1 && sortKey.contains(""))) {
-        throw new MalformedCarbonCommandException(
-          "SORT_COLUMNS Either having duplicate columns : " +
-          sortKey.diff(sortKey.distinct).mkString(",") + " or it contains illegal argumnet.")
-      }
-
-      sortKey.foreach { column =>
-        if (!fields.exists(x => x.column.equalsIgnoreCase(column))) {
-          val errorMsg = "sort_columns: " + column +
-            " does not exist in table. Please check the create table statement."
-          throw new MalformedCarbonCommandException(errorMsg)
-        } else {
-          val dataType = fields.find(x =>
-            x.column.equalsIgnoreCase(column)).get.dataType.get
-          if (isDataTypeSupportedForSortColumn(dataType)) {
-            val errorMsg = s"sort_columns is unsupported for $dataType datatype column: " + column
-            throw new MalformedCarbonCommandException(errorMsg)
-          }
-          if (varcharCols.exists(x => x.equalsIgnoreCase(column))) {
-            throw new MalformedCarbonCommandException(
-              s"sort_columns is unsupported for long string datatype column: $column")
-          }
-        }
-      }
-
+      CommonUtil.validateSortColumns(
+        sortKey,
+        fields.map { field => (field.column, field.dataType.get) },
+        varcharCols
+      )
       sortKey.foreach { dimension =>
         if (!sortKeyDimsTmp.exists(dimension.equalsIgnoreCase)) {
           fields.foreach { field =>
diff --git a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
index 1dc562dc..99bc863 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/util/AlterTableUtil.scala
@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
 import scala.collection.mutable
 import scala.collection.mutable.ListBuffer
 
+import org.apache.commons.lang3.StringUtils
 import org.apache.spark.SparkConf
 import org.apache.spark.sql.{CarbonEnv, SparkSession}
 import org.apache.spark.sql.catalyst.TableIdentifier
@@ -32,13 +33,14 @@ import org.apache.spark.sql.hive.HiveExternalCatalog._
 import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
 import org.apache.carbondata.common.logging.LogServiceFactory
 import org.apache.carbondata.core.constants.CarbonCommonConstants
+import org.apache.carbondata.core.constants.SortScopeOptions.SortScope
 import org.apache.carbondata.core.datamap.DataMapStoreManager
 import org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder
 import org.apache.carbondata.core.datastore.impl.FileFactory
 import org.apache.carbondata.core.exception.InvalidConfigurationException
 import org.apache.carbondata.core.locks.{CarbonLockUtil, ICarbonLock, LockUsage}
 import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
-import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
+import org.apache.carbondata.core.metadata.converter.{SchemaConverter, ThriftWrapperSchemaConverterImpl}
 import org.apache.carbondata.core.metadata.datatype.DataTypes
 import org.apache.carbondata.core.metadata.schema.table.CarbonTable
 import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
@@ -101,6 +103,76 @@ object AlterTableUtil {
   }
 
   /**
+   * update schema when SORT_COLUMNS are be changed
+   */
+  private def updateSchemaForSortColumns(
+      thriftTable: TableInfo,
+      lowerCasePropertiesMap: mutable.Map[String, String],
+      schemaConverter: SchemaConverter
+  ) = {
+    val sortColumnsOption = lowerCasePropertiesMap.get(CarbonCommonConstants.SORT_COLUMNS)
+    if (sortColumnsOption.isDefined) {
+      val sortColumnsString = CarbonUtil.unquoteChar(sortColumnsOption.get).trim
+      val columns = thriftTable.getFact_table.getTable_columns
+      // remove old sort_columns property from ColumnSchema
+      val columnSeq =
+        columns
+          .asScala
+          .map { column =>
+            val columnProperties = column.getColumnProperties
+            if (columnProperties != null) {
+              columnProperties.remove(CarbonCommonConstants.SORT_COLUMNS)
+            }
+            column
+          }
+          .zipWithIndex
+      if (!sortColumnsString.isEmpty) {
+        val newSortColumns = sortColumnsString.split(',').map(_.trim)
+        // map sort_columns index in column list
+        val sortColumnsIndexMap = newSortColumns
+          .zipWithIndex
+          .map { entry =>
+            val column = columnSeq.find(_._1.getColumn_name.equalsIgnoreCase(entry._1)).get
+            var columnProperties = column._1.getColumnProperties
+            if (columnProperties == null) {
+              columnProperties = new util.HashMap[String, String]()
+              column._1.setColumnProperties(columnProperties)
+            }
+            // change sort_columns to dimension
+            if (!column._1.isDimension) {
+              column._1.setDimension(true)
+              columnProperties.put(CarbonCommonConstants.COLUMN_DRIFT, "true")
+            }
+            // add sort_columns property
+            columnProperties.put(CarbonCommonConstants.SORT_COLUMNS, "true")
+            (column._2, entry._2)
+          }
+          .toMap
+        var index = newSortColumns.length
+        // re-order all columns, move sort_columns to the head of column list
+        val newColumns = columnSeq
+          .map { entry =>
+            val sortColumnIndexOption = sortColumnsIndexMap.get(entry._2)
+            val newIndex = if (sortColumnIndexOption.isDefined) {
+              sortColumnIndexOption.get
+            } else {
+              val tempIndex = index
+              index += 1
+              tempIndex
+            }
+            (newIndex, entry._1)
+          }
+          .sortWith(_._1 < _._1)
+          .map(_._2)
+          .asJava
+        // use new columns
+        columns.clear()
+        columns.addAll(newColumns)
+      }
+    }
+  }
+
+  /**
    * @param carbonTable
    * @param schemaEvolutionEntry
    * @param thriftTable
@@ -361,9 +433,10 @@ object AlterTableUtil {
       // validate the range column properties
       validateRangeColumnProperties(carbonTable, lowerCasePropertiesMap)
 
-      // validate the Sort Scope
-      validateSortScopeProperty(carbonTable, lowerCasePropertiesMap)
-
+      // validate the Sort Scope and Sort Columns
+      validateSortScopeAndSortColumnsProperties(carbonTable, lowerCasePropertiesMap)
+      // if SORT_COLUMN is changed, it will move them to the head of column list
+      updateSchemaForSortColumns(thriftTable, lowerCasePropertiesMap, schemaConverter)
       // below map will be used for cache invalidation. As tblProperties map is getting modified
       // in the next few steps the original map need to be retained for any decision making
       val existingTablePropertiesMap = mutable.Map(tblPropertiesMap.toSeq: _*)
@@ -394,9 +467,13 @@ object AlterTableUtil {
             if (propKey.equalsIgnoreCase(CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE)) {
               tblPropertiesMap
                 .put(propKey.toLowerCase, CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE_DEFAULT)
-            } else if (propKey.equalsIgnoreCase("sort_scope")) {
+            } else if (propKey.equalsIgnoreCase(CarbonCommonConstants.SORT_SCOPE)) {
               tblPropertiesMap
                 .put(propKey.toLowerCase, CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
+            } else if (propKey.equalsIgnoreCase(CarbonCommonConstants.SORT_COLUMNS)) {
+              val errorMessage = "Error: Invalid option(s): " + propKey +
+                                 ", please set SORT_COLUMNS as empty instead of unset"
+              throw new MalformedCarbonCommandException(errorMessage)
             } else {
               tblPropertiesMap.remove(propKey.toLowerCase)
             }
@@ -440,7 +517,8 @@ object AlterTableUtil {
       "LOCAL_DICTIONARY_EXCLUDE",
       "LOAD_MIN_SIZE_INMB",
       "RANGE_COLUMN",
-      "SORT_SCOPE")
+      "SORT_SCOPE",
+      "SORT_COLUMNS")
     supportedOptions.contains(propKey.toUpperCase)
   }
 
@@ -542,18 +620,34 @@ object AlterTableUtil {
     }
   }
 
-  def validateSortScopeProperty(carbonTable: CarbonTable,
+  def validateSortScopeAndSortColumnsProperties(carbonTable: CarbonTable,
       propertiesMap: mutable.Map[String, String]): Unit = {
-    propertiesMap.foreach { property =>
-      if (property._1.equalsIgnoreCase("SORT_SCOPE")) {
-        if (!CarbonUtil.isValidSortOption(property._2)) {
-          throw new MalformedCarbonCommandException(
-            s"Invalid SORT_SCOPE ${ property._2 }, valid SORT_SCOPE are 'NO_SORT', 'BATCH_SORT', " +
-            s"'LOCAL_SORT' and 'GLOBAL_SORT'")
-        } else if (!property._2.equalsIgnoreCase("NO_SORT") &&
-                   (carbonTable.getNumberOfSortColumns == 0)) {
+    CommonUtil.validateSortScope(propertiesMap)
+    CommonUtil.validateSortColumns(carbonTable, propertiesMap)
+    // match SORT_SCOPE and SORT_COLUMNS
+    val newSortScope = propertiesMap.get(CarbonCommonConstants.SORT_SCOPE)
+    val newSortColumns = propertiesMap.get(CarbonCommonConstants.SORT_COLUMNS)
+    if (newSortScope.isDefined) {
+      // 1. check SORT_COLUMNS when SORT_SCOPE is not changed to NO_SORT
+      if (!SortScope.NO_SORT.name().equalsIgnoreCase(newSortScope.get)) {
+        if (newSortColumns.isDefined) {
+          if (StringUtils.isBlank(CarbonUtil.unquoteChar(newSortColumns.get))) {
+            throw new InvalidConfigurationException(
+              s"Cannot set SORT_COLUMNS as empty when setting SORT_SCOPE as ${newSortScope.get} ")
+          }
+        } else {
+          if (carbonTable.getNumberOfSortColumns == 0) {
+            throw new InvalidConfigurationException(
+              s"Cannot set SORT_SCOPE as ${newSortScope.get} when table has no SORT_COLUMNS")
+          }
+        }
+      }
+    } else if (newSortColumns.isDefined) {
+      // 2. check SORT_SCOPE when SORT_COLUMNS is changed to empty
+      if (StringUtils.isBlank(CarbonUtil.unquoteChar(newSortColumns.get))) {
+        if (!SortScope.NO_SORT.equals(carbonTable.getSortScope)) {
           throw new InvalidConfigurationException(
-            s"Cannot set SORT_SCOPE as ${ property._2 } when table has no SORT_COLUMNS")
+            s"Cannot set SORT_COLUMNS as empty when SORT_SCOPE is ${carbonTable.getSortScope} ")
         }
       }
     }


[carbondata] 12/22: [CARBONDATA-3345]A growing streaming ROW_V1 carbondata file would be ingored some InputSplits

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit f80a28dd9fee8c5d355b30d4e422b854a981b796
Author: junyan-zg <27...@qq.com>
AuthorDate: Wed Apr 24 22:46:51 2019 +0800

    [CARBONDATA-3345]A growing streaming ROW_V1 carbondata file would be ingored some InputSplits
    
    After looking at carbondata segments, when the file grows to more than 150 M (possibly 128M),
    Presto initiates a query by separating several small files, including those in ROW_V1 format.
    This bug causes some small files in ROW_V1 format to be ignored, resulting in inaccurate queries.
    So for the carbondata ROW_V1 inputSplits MapKey(Java), I adjust concat 'carbonInput.getStart()' to keeping the required inputSplit
    
    This closes #3186
---
 .../org/apache/carbondata/presto/impl/CarbonTableReader.java     | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/integration/presto/src/main/java/org/apache/carbondata/presto/impl/CarbonTableReader.java b/integration/presto/src/main/java/org/apache/carbondata/presto/impl/CarbonTableReader.java
index 57d8d5e..7ffe053 100755
--- a/integration/presto/src/main/java/org/apache/carbondata/presto/impl/CarbonTableReader.java
+++ b/integration/presto/src/main/java/org/apache/carbondata/presto/impl/CarbonTableReader.java
@@ -46,6 +46,7 @@ import org.apache.carbondata.core.metadata.schema.table.CarbonTable;
 import org.apache.carbondata.core.metadata.schema.table.TableInfo;
 import org.apache.carbondata.core.reader.ThriftReader;
 import org.apache.carbondata.core.scan.expression.Expression;
+import org.apache.carbondata.core.statusmanager.FileFormat;
 import org.apache.carbondata.core.statusmanager.LoadMetadataDetails;
 import org.apache.carbondata.core.statusmanager.SegmentStatusManager;
 import org.apache.carbondata.core.util.CarbonProperties;
@@ -291,7 +292,13 @@ public class CarbonTableReader {
         // Use block distribution
         List<List<CarbonLocalInputSplit>> inputSplits = new ArrayList(
             result.stream().map(x -> (CarbonLocalInputSplit) x).collect(Collectors.groupingBy(
-                carbonInput -> carbonInput.getSegmentId().concat(carbonInput.getPath()))).values());
+                carbonInput -> {
+                  if (FileFormat.ROW_V1.equals(carbonInput.getFileFormat())) {
+                    return carbonInput.getSegmentId().concat(carbonInput.getPath())
+                      .concat(carbonInput.getStart() + "");
+                  }
+                  return carbonInput.getSegmentId().concat(carbonInput.getPath());
+                })).values());
         if (inputSplits != null) {
           for (int j = 0; j < inputSplits.size(); j++) {
             multiBlockSplitList.add(new CarbonLocalMultiBlockSplit(inputSplits.get(j),


[carbondata] 18/22: [DOC] Update doc for sort_columns modification

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit 4d21b6a8cb7fb3f5b70fd862bbbba78e5f0f12bd
Author: QiangCai <qi...@qq.com>
AuthorDate: Mon May 6 10:39:19 2019 +0800

    [DOC] Update doc for sort_columns modification
    
    Update doc for sort_columns modification
    
    This closes #3203
---
 docs/ddl-of-carbondata.md | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 88615a2..5bc8f10 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -793,6 +793,27 @@ Users can specify which columns to include and exclude for local dictionary gene
        ALTER TABLE tablename UNSET TBLPROPERTIES('SORT_SCOPE')
        ```
 
+     - ##### SORT COLUMNS
+       Example to SET SORT COLUMNS:
+       ```
+       ALTER TABLE tablename SET TBLPROPERTIES('SORT_COLUMNS'='column1')
+       ```
+       After this operation, the new loading will use the new SORT_COLUMNS. The user can adjust 
+       the SORT_COLUMNS according to the query, but it will not impact the old data directly. So 
+       it will not impact the query performance of the old data segments which are not sorted by 
+       new SORT_COLUMNS.  
+       
+       UNSET is not supported, but it can set SORT_COLUMNS to empty string instead of using UNSET.
+       ```
+       ALTER TABLE tablename SET TBLPROPERTIES('SORT_COLUMNS'='')
+       ```
+
+       **NOTE:**
+        * The future version will enhance "custom" compaction to sort the old segment one by one.
+        * The streaming table is not supported for SORT_COLUMNS modification.
+        * If the inverted index columns are removed from the new SORT_COLUMNS, they will not 
+        create the inverted index. But the old configuration of INVERTED_INDEX will be kept.
+
 ### DROP TABLE
 
   This command is used to delete an existing table.


[carbondata] 20/22: [CARBONDATA-3374] Optimize documentation and fix some spell errors.

Posted by ra...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

ravipesala pushed a commit to branch branch-1.5
in repository https://gitbox.apache.org/repos/asf/carbondata.git

commit b42f1acfb056e9a1cfc5e19b9d652e4af4848aa6
Author: xubo245 <xu...@huawei.com>
AuthorDate: Tue May 7 20:47:13 2019 +0800

    [CARBONDATA-3374] Optimize documentation and fix some spell errors.
    
    Optimize documentation and fix some spell errors.
    
    This closes #3207
---
 .../apache/carbondata/core/datamap/dev/DataMapFactory.java   |  4 ++--
 .../carbondata/core/indexstore/BlockletDetailsFetcher.java   |  4 ++--
 .../indexstore/blockletindex/BlockletDataMapFactory.java     |  2 +-
 .../carbondata/core/indexstore/schema/SchemaGenerator.java   |  2 +-
 .../apache/carbondata/core/util/path/CarbonTablePath.java    |  2 +-
 .../carbondata/datamap/lucene/LuceneDataMapFactoryBase.java  |  2 +-
 .../datamap/lucene/LuceneFineGrainDataMapFactory.java        |  2 +-
 docs/carbon-as-spark-datasource-guide.md                     |  2 +-
 docs/ddl-of-carbondata.md                                    | 12 +++++++-----
 .../spark/testsuite/dataload/TestLoadDataGeneral.scala       |  4 ++--
 .../spark/testsuite/datamap/CGDataMapTestCase.scala          |  4 ++--
 .../spark/testsuite/datamap/DataMapWriterSuite.scala         |  2 +-
 .../spark/testsuite/datamap/FGDataMapTestCase.scala          |  4 ++--
 .../apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala   |  2 +-
 .../org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala   |  6 +++---
 .../execution/datasources/SparkCarbonFileFormat.scala        |  3 ++-
 .../scala/org/apache/spark/sql/CarbonCatalystOperators.scala |  4 ++--
 .../execution/command/management/CarbonLoadDataCommand.scala |  2 +-
 .../scala/org/apache/spark/sql/optimizer/CarbonFilters.scala |  2 +-
 19 files changed, 34 insertions(+), 31 deletions(-)

diff --git a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
index ee7914d..b32a482 100644
--- a/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/datamap/dev/DataMapFactory.java
@@ -88,7 +88,7 @@ public abstract class DataMapFactory<T extends DataMap> {
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   public abstract List<T> getDataMaps(Segment segment) throws IOException;
 
@@ -99,7 +99,7 @@ public abstract class DataMapFactory<T extends DataMap> {
       throws IOException;
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    * @return
    */
   public abstract List<DataMapDistributable> toDistributable(Segment segment);
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
index 1971f40..ae01e9e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/BlockletDetailsFetcher.java
@@ -27,7 +27,7 @@ import org.apache.carbondata.core.datamap.Segment;
 public interface BlockletDetailsFetcher {
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid.
+   * Get the blocklet detail information based on blockletid, blockid and segmentId.
    *
    * @param blocklets
    * @param segment
@@ -38,7 +38,7 @@ public interface BlockletDetailsFetcher {
       throws IOException;
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid.
+   * Get the blocklet detail information based on blockletid, blockid and segmentId.
    *
    * @param blocklet
    * @param segment
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
index 2ef7b88..93be06e 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/blockletindex/BlockletDataMapFactory.java
@@ -185,7 +185,7 @@ public class BlockletDataMapFactory extends CoarseGrainDataMapFactory
   }
 
   /**
-   * Get the blocklet detail information based on blockletid, blockid and segmentid. This method is
+   * Get the blocklet detail information based on blockletid, blockid and segmentId. This method is
    * exclusively for BlockletDataMapFactory as detail information is only available in this
    * default datamap.
    */
diff --git a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
index 41c382b..288e062 100644
--- a/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
+++ b/core/src/main/java/org/apache/carbondata/core/indexstore/schema/SchemaGenerator.java
@@ -189,7 +189,7 @@ public class SchemaGenerator {
     // for storing file name
     taskMinMaxSchemas
         .add(new CarbonRowSchema.VariableCarbonRowSchema(DataTypes.BYTE_ARRAY));
-    // for storing segmentid
+    // for storing segmentId
     taskMinMaxSchemas
         .add(new CarbonRowSchema.VariableCarbonRowSchema(DataTypes.BYTE_ARRAY));
     // for storing min max flag for each column which reflects whether min max for a column is
diff --git a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
index da558be..422a6dc 100644
--- a/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
+++ b/core/src/main/java/org/apache/carbondata/core/util/path/CarbonTablePath.java
@@ -291,7 +291,7 @@ public class CarbonTablePath {
   }
 
   /**
-   * Return the segment path from table path and segmentid
+   * Return the segment path from table path and segmentId
    */
   public static String getSegmentPath(String tablePath, String segmentId) {
     return getPartitionDir(tablePath) + File.separator + SEGMENT_PREFIX + segmentId;
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
index 68c3bcc..3ae390d 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneDataMapFactoryBase.java
@@ -209,7 +209,7 @@ abstract class LuceneDataMapFactoryBase<T extends DataMap> extends DataMapFactor
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    */
   @Override
   public List<DataMapDistributable> toDistributable(Segment segment) {
diff --git a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
index a3c4063..7ee843b 100644
--- a/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
+++ b/datamap/lucene/src/main/java/org/apache/carbondata/datamap/lucene/LuceneFineGrainDataMapFactory.java
@@ -47,7 +47,7 @@ public class LuceneFineGrainDataMapFactory extends LuceneDataMapFactoryBase<Fine
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   @Override public List<FineGrainDataMap> getDataMaps(Segment segment) throws IOException {
     List<FineGrainDataMap> lstDataMap = new ArrayList<>();
diff --git a/docs/carbon-as-spark-datasource-guide.md b/docs/carbon-as-spark-datasource-guide.md
index fe46b09..66338f1 100644
--- a/docs/carbon-as-spark-datasource-guide.md
+++ b/docs/carbon-as-spark-datasource-guide.md
@@ -19,7 +19,7 @@
 
 The CarbonData fileformat is now integrated as Spark datasource for read and write operation without using CarbonSession. This is useful for users who wants to use carbondata as spark's data source. 
 
-**Note:** You can only apply the functions/features supported by spark datasource APIs, functionalities supported would be similar to Parquet. The carbon session features are not supported.
+**Note:** You can only apply the functions/features supported by spark datasource APIs, functionalities supported would be similar to Parquet. The carbon session features are not supported. The result is displayed as byte array format when select query on binary column in spark-sql.
 
 # Create Table with DDL
 
diff --git a/docs/ddl-of-carbondata.md b/docs/ddl-of-carbondata.md
index 34eca8d..2d43645 100644
--- a/docs/ddl-of-carbondata.md
+++ b/docs/ddl-of-carbondata.md
@@ -157,6 +157,7 @@ CarbonData DDL statements are documented here,which includes:
       * BOOLEAN
       * FLOAT
       * BYTE
+      * Binary
    * In case of multi-level complex dataType columns, primitive string/varchar/char columns are considered for local dictionary generation.
 
    System Level Properties for Local Dictionary: 
@@ -224,7 +225,7 @@ CarbonData DDL statements are documented here,which includes:
    - ##### Sort Columns Configuration
 
      This property is for users to specify which columns belong to the MDK(Multi-Dimensions-Key) index.
-     * If users don't specify "SORT_COLUMN" property, by default no columns are sorted 
+     * If users don't specify "SORT_COLUMNS" property, by default no columns are sorted 
      * If this property is specified but with empty argument, then the table will be loaded without sort.
      * This supports only string, date, timestamp, short, int, long, byte and boolean data types.
      Suggested use cases : Only build MDK index for required columns,it might help to improve the data loading performance.
@@ -233,7 +234,7 @@ CarbonData DDL statements are documented here,which includes:
      TBLPROPERTIES ('SORT_COLUMNS'='column1, column3')
      ```
 
-     **NOTE**: Sort_Columns for Complex datatype columns is not supported.
+     **NOTE**: Sort_Columns for Complex datatype columns and binary data type is not supported.
 
    - ##### Sort Scope Configuration
    
@@ -331,7 +332,7 @@ CarbonData DDL statements are documented here,which includes:
 
    - ##### Caching Min/Max Value for Required Columns
 
-     By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. 
+     By default, CarbonData caches min and max values of all the columns in schema.  As the load increases, the memory required to hold the min and max values increases considerably. This feature enables you to configure min and max values only for the required columns, resulting in optimized memory usage. This feature doesn't support binary data type.
 
       Following are the valid values for COLUMN_META_CACHE:
       * If you want no column min/max values to be cached in the driver.
@@ -519,6 +520,7 @@ CarbonData DDL statements are documented here,which includes:
    - ##### Range Column
      This property is used to specify a column to partition the input data by range.
      Only one column can be configured. During data loading, you can use "global_sort_partitions" or "scale_factor" to avoid generating small files.
+     This feature doesn't support binary data type.
 
      ```
      TBLPROPERTIES('RANGE_COLUMN'='col1')
@@ -916,7 +918,7 @@ Users can specify which columns to include and exclude for local dictionary gene
   PARTITIONED BY (productCategory STRING, productBatch STRING)
   STORED AS carbondata
   ```
-   **NOTE:** Hive partition is not supported on complex datatype columns.
+   **NOTE:** Hive partition is not supported on complex data type columns and binary data type.
 
 
 #### Show Partitions
@@ -959,7 +961,7 @@ Users can specify which columns to include and exclude for local dictionary gene
 
 ### CARBONDATA PARTITION(HASH,RANGE,LIST) -- Alpha feature, this partition feature does not support update and delete data.
 
-  The partition supports three type:(Hash,Range,List), similar to other system's partition features, CarbonData's partition feature can be used to improve query performance by filtering on the partition column.
+  The partition supports three type:(Hash,Range,List), similar to other system's partition features, CarbonData's partition feature can be used to improve query performance by filtering on the partition column. Partition feature doesn't support binary data type.
 
 ### Create Hash Partition Table
 
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
index 8361862..51e7f68 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala
@@ -45,9 +45,9 @@ class TestLoadDataGeneral extends QueryTest with BeforeAndAfterEach {
 
   private def checkSegmentExists(
       segmentId: String,
-      datbaseName: String,
+      databaseName: String,
       tableName: String): Boolean = {
-    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(datbaseName, tableName)
+    val carbonTable = CarbonMetadata.getInstance().getCarbonTable(databaseName, tableName)
     val partitionPath =
       CarbonTablePath.getPartitionDir(carbonTable.getAbsoluteTableIdentifier.getTablePath)
     val fileType: FileFactory.FileType = FileFactory.getFileType(partitionPath)
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
index a6bc30d..69334a0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/CGDataMapTestCase.scala
@@ -62,7 +62,7 @@ class CGDataMapFactory(
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   override def getDataMaps(segment: Segment): java.util.List[CoarseGrainDataMap] = {
     val path = identifier.getTablePath
@@ -98,7 +98,7 @@ class CGDataMapFactory(
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
index c75649a..449ffa0 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/DataMapWriterSuite.scala
@@ -62,7 +62,7 @@ class C2DataMapFactory(
     new DataMapMeta(carbonTable.getIndexedColumns(dataMapSchema), List(ExpressionType.EQUALS).asJava)
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
index 99e0509..ff77820 100644
--- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
+++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/FGDataMapTestCase.scala
@@ -61,7 +61,7 @@ class FGDataMapFactory(carbonTable: CarbonTable,
   }
 
   /**
-   * Get the datamap for segmentid
+   * Get the datamap for segmentId
    */
   override def getDataMaps(segment: Segment): java.util.List[FineGrainDataMap] = {
     val path = CarbonTablePath.getSegmentPath(carbonTable.getTablePath, segment.getSegmentNo)
@@ -86,7 +86,7 @@ class FGDataMapFactory(carbonTable: CarbonTable,
   }
 
   /**
-   * Get all distributable objects of a segmentid
+   * Get all distributable objects of a segmentId
    *
    * @return
    */
diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
index 1140e72..ac8224e 100644
--- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
+++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/rdd/NewCarbonDataLoadRDD.scala
@@ -205,7 +205,7 @@ class NewCarbonDataLoadRDD[K, V](
        *
        * @return
        */
-      def gernerateBlocksID: String = {
+      def generateBlocksID: String = {
         carbonLoadModel.getDatabaseName + "_" + carbonLoadModel.getTableName + "_" +
         UUID.randomUUID()
       }
diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
index a2b2af6..703ae37 100644
--- a/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
+++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/catalyst/CarbonDDLSqlParser.scala
@@ -870,7 +870,7 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
                  !dictIncludeCols.exists(x => x.equalsIgnoreCase(field.column))) {
         noDictionaryDims :+= field.column
         dimFields += field
-      } else if (isDetectAsDimentionDataType(field.dataType.get)) {
+      } else if (isDetectAsDimensionDataType(field.dataType.get)) {
         dimFields += field
         // consider all String and binary cols as noDicitonaryDims by default
         if ((DataTypes.STRING.getName.equalsIgnoreCase(field.dataType.get)) ||
@@ -934,11 +934,11 @@ abstract class CarbonDDLSqlParser extends AbstractCarbonSparkSQLParser {
   }
 
   /**
-   * detect dimention data type
+   * detect dimension data type
    *
    * @param dimensionDatatype
    */
-  def isDetectAsDimentionDataType(dimensionDatatype: String): Boolean = {
+  def isDetectAsDimensionDataType(dimensionDatatype: String): Boolean = {
     val dimensionType = Array("string",
       "array",
       "struct",
diff --git a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
index 5f62362..185058c 100644
--- a/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
+++ b/integration/spark-datasource/src/main/scala/org/apache/spark/sql/carbondata/execution/datasources/SparkCarbonFileFormat.scala
@@ -78,7 +78,8 @@ class SparkCarbonFileFormat extends FileFormat
 
   /**
    * If user does not provide schema while reading the data then spark calls this method to infer
-   * schema from the carbodata files. It reads the schema present in carbondata files and return it.
+   * schema from the carbondata files.
+   * It reads the schema present in carbondata files and return it.
    */
   override def inferSchema(sparkSession: SparkSession,
       options: Map[String, String],
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
index 450ead1..ff0b9e6 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala
@@ -38,12 +38,12 @@ case class CarbonDictionaryCatalystDecoder(
   override def output: Seq[Attribute] = {
     child match {
       case l: LogicalRelation =>
-        // If the child is logical plan then firts update all dictionary attr with IntegerType
+        // If the child is logical plan then first update all dictionary attr with IntegerType
         val logicalOut =
           CarbonDictionaryDecoder.updateAttributes(child.output, relations, aliasMap)
         CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, aliasMap)
       case Filter(cond, l: LogicalRelation) =>
-        // If the child is logical plan then firts update all dictionary attr with IntegerType
+        // If the child is logical plan then first update all dictionary attr with IntegerType
         val logicalOut =
           CarbonDictionaryDecoder.updateAttributes(child.output, relations, aliasMap)
         CarbonDictionaryDecoder.convertOutput(logicalOut, relations, profile, aliasMap)
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
index b4ef1f0..41494c5 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/management/CarbonLoadDataCommand.scala
@@ -1041,7 +1041,7 @@ case class CarbonLoadDataCommand(
   }
 
   /**
-   * Create the logical plan for update scenario. Here we should drop the segmentid column from the
+   * Create the logical plan for update scenario. Here we should drop the segmentId column from the
    * input rdd.
    */
   private def getLogicalQueryForUpdate(
diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
index b84a7b0..c4415f8 100644
--- a/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
+++ b/integration/spark2/src/main/scala/org/apache/spark/sql/optimizer/CarbonFilters.scala
@@ -532,7 +532,7 @@ object CarbonFilters {
           // read partitions directly from hive metastore using filters
           sparkSession.sessionState.catalog.listPartitionsByFilter(identifier, partitionFilters)
         } else {
-          // Read partitions alternatively by firts get all partitions then filter them
+          // Read partitions alternatively by first get all partitions then filter them
           sparkSession.sessionState.catalog.
             asInstanceOf[CarbonSessionCatalog].getPartitionsAlternate(
             partitionFilters,